diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
index 86057d5410650e400eac24cee01dfdbaf91fdb57..12a7df0e38b2f1f35b1b8de234b5d67ada5e0761 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
@@ -84,36 +84,23 @@ properties:
     const: dma-mem
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Input endpoints of the controller.
 
       port@1:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Output endpoints of the controller.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
index 3eb1c2bbf4e7f976c195f13dba51a507ae8ec641..055157fbf3bf935fdceb1cebf90191f86d1595c7 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
@@ -57,35 +57,22 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoints of the controller.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
index 75e6479397a5023d781379be1fe760271bde7e35..7f11452539f413e17627d5c098cef77ecc4388df 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
@@ -76,37 +76,24 @@ properties:
       - const: audio-tx
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoints of the controller. Usually an HDMI
           connector.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 4c15a2644a7cfc216e9f4be9e6a542d5de3c7c94..c13faf3e6581ef101a7a2b26fa0db14769edb764 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -115,31 +115,24 @@ properties:
           - const: lvds
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/$defs/port-base
+        unevaluatedProperties: false
         description: |
           Output endpoints of the controller.
 
         patternProperties:
           "^endpoint(@[0-9])$":
-            type: object
+            $ref: /schemas/graph.yaml#/$defs/endpoint-base
+            unevaluatedProperties: false
 
             properties:
               allwinner,tcon-channel:
@@ -156,16 +149,10 @@ properties:
                   property is not present, the endpoint number will be
                   used as the channel number.
 
-            unevaluatedProperties: true
-
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
index 6009324be967ef517a64b99c0efed92d7862f932..afc0ed799e0ed20200c87735769a27c827c27f3c 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
@@ -24,11 +24,9 @@ properties:
     maxItems: 1
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
-      A port node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt. The
-      first port should be the input endpoint, usually coming from the
+      The first port should be the input endpoint, usually coming from the
       associated TCON.
 
 required:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
index 0c1ce55940e1641cfb34530823bbcef307b2376f..71cce5687580612c9cc8a79470b64754a125533d 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
@@ -46,36 +46,23 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoints of the controller.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index 7aa330dabc446e8823bd81ffeef7f3dd5fb1cb21..a738d7c12a97a481d1cbd97b4ef266413baad0ce 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -47,11 +47,9 @@ properties:
     const: dphy
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
-      A port node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt. That
-      port should be the input endpoint, usually coming from the
+      The port should be the input endpoint, usually coming from the
       associated TCON.
 
 required:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
index c040eef5651820d1aef3579382b2cc297740ab96..4f91eec26de9dcf95ed613f12c85994d4a4adae1 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
@@ -43,35 +43,22 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoints of the controller.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
index fa4769a0b26ec9f838fff75ddc82c0662ecbf2ca..b3e9992525c2ae4f5a0c2ac306074857faa34997 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
@@ -93,38 +93,25 @@ properties:
       The VCC power supply of the controller
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller. Usually the associated
           TCON.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoints of the controller. Usually an HDMI
           connector.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
index b98ca609824b92a2e3bfef82251be8b1aadd4221..ec21e8bf2767316ece8d222404a9a6dc61602df2 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
@@ -80,141 +80,45 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
-      All ports should have only one endpoint connected to
-      remote endpoint.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoint for Mixer 0 mux.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoint for Mixer 0 mux
 
-        properties:
-          "#address-cells":
-            const: 1
-
-          "#size-cells":
-            const: 0
-
-          reg: true
-
-        patternProperties:
-          "^endpoint@[0-9]$":
-            type: object
-
-            properties:
-              reg:
-                description: |
-                  ID of the target TCON
-
-            required:
-              - reg
-
-        required:
-          - "#address-cells"
-          - "#size-cells"
-
-        additionalProperties: false
-
       port@2:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoint for Mixer 1 mux.
 
       port@3:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoint for Mixer 1 mux
 
-        properties:
-          "#address-cells":
-            const: 1
-
-          "#size-cells":
-            const: 0
-
-          reg: true
-
-        patternProperties:
-          "^endpoint@[0-9]$":
-            type: object
-
-            properties:
-              reg:
-                description: |
-                  ID of the target TCON
-
-            required:
-              - reg
-
-        required:
-          - "#address-cells"
-          - "#size-cells"
-
-        additionalProperties: false
-
       port@4:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoint for HDMI mux.
 
-        properties:
-          "#address-cells":
-            const: 1
-
-          "#size-cells":
-            const: 0
-
-          reg: true
-
-        patternProperties:
-          "^endpoint@[0-9]$":
-            type: object
-
-            properties:
-              reg:
-                description: |
-                  ID of the target TCON
-
-            required:
-              - reg
-
-        required:
-          - "#address-cells"
-          - "#size-cells"
-
-        additionalProperties: false
-
       port@5:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoint for HDMI mux
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
       - port@4
       - port@5
 
-    additionalProperties: false
-
 required:
   - "#clock-cells"
   - compatible
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
index 96de41d32b3eb741cbbcb5267f34f1ad052d76c9..637372ec4614d42a1211532c628f35d8754f5a88 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
@@ -40,36 +40,23 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description: |
-      A ports node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Input endpoints of the controller.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Output endpoints of the controller.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index 0da42ab8fd3a52b2f9fef7b1ff47919a273c3003..cf5a208f2f1056215e8dfdd57a8eff65138855ef 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -81,12 +81,12 @@ properties:
     description: phandle to an external 5V regulator to power the HDMI logic
 
   port@0:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
       A port node pointing to the VENC Input port node.
 
   port@1:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
       A port node pointing to the TMDS Output port node.
 
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index a8d202c9d004c67f7009e17d8928cbb415e45355..851cb078121731aff6c86a21dfabd93177727988 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -83,12 +83,12 @@ properties:
     description: phandle to the associated power domain
 
   port@0:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
       A port node pointing to the CVBS VDAC port node.
 
   port@1:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
       A port node pointing to the HDMI-TX port node.
 
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
index 7ce06f9f9f8ee70d8a3d2d817af4724539eada01..6e8ac910bdd8dad8a64a702a6db6ca7877b405b6 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
@@ -53,6 +53,24 @@ properties:
       - const: audio
       - const: cec
 
+  interrupts:
+    items:
+      - description: CEC TX interrupt
+      - description: CEC RX interrupt
+      - description: CEC stuck at low interrupt
+      - description: Wake-up interrupt
+      - description: Hotplug connected interrupt
+      - description: Hotplug removed interrupt
+
+  interrupt-names:
+    items:
+      - const: cec-tx
+      - const: cec-rx
+      - const: cec-low
+      - const: wakeup
+      - const: hpd-connected
+      - const: hpd-removed
+
   ddc:
     allOf:
       - $ref: /schemas/types.yaml#/definitions/phandle
@@ -90,7 +108,7 @@ required:
   - resets
   - ddc
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
index 5c1024bbc1b30c61d8f0b58e85e69187e7a1cc5f..c9ad0ecc9b6d73acca6cbf5aec65f2b1281a5673 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
@@ -27,10 +27,9 @@ properties:
       - const: pixel
 
   port:
-    type: object
-    description: >
-      Port node with a single endpoint connecting to the panel, as
-      defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/port
+    description:
+      Port node with a single endpoint connecting to the panel.
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
index eb44e072b6e59d24a8f25b3b880363c88423d3da..55c60919991f7d171fd26af3ce0fa4a61a572f7b 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
@@ -18,6 +18,7 @@ properties:
 
   compatible:
     enum:
+      - brcm,bcm2711-dsi1
       - brcm,bcm2835-dsi0
       - brcm,bcm2835-dsi1
 
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
index 9392b5502a3293cce3601be417d4d9aee6796792..c789784efe3062fcee91adcdd7b80d64b6418f10 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
@@ -35,16 +35,16 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Video port for MIPI DSI input.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Video port for panel or connector.
 
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
index 3ba477aefdd79466869fade87d51b70f61568e12..8e13f27b28edc0c64684c883869ba4885ee479fd 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
@@ -42,31 +42,18 @@ properties:
     description: Regulator for 1.0V digital core power.
 
   ports:
-    type: object
-    description:
-      A node containing input and output port nodes with endpoint
-      definitions as documented in
-      Documentation/devicetree/bindings/media/video-interfaces.txt
-      Documentation/devicetree/bindings/graph.txt
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: Video port for HDMI input.
 
-        properties:
-          reg:
-            const: 0
-
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Video port for SlimPort, DisplayPort, eDP or MyDP output.
 
-        properties:
-          reg:
-            const: 1
-
     required:
       - port@0
       - port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
index fccd63521a8ca3fa68ed45d42c39184accabbe3b..1c0406c38fe549ac5fbc1d98950363262496dec3 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
@@ -32,31 +32,23 @@ properties:
     description: Regulator for 2.5V digital core power.
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
-
       port@0:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Video port for LVTTL input
 
       port@1:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Video port for eDP output (panel or connector).
           May be omitted if EDID works reliably.
 
     required:
       - port@0
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
index 74d675fc6e7ba65b56f2572e834019a18871f4cb..63427878715e2ba56246e309178a0047b85396a4 100644
--- a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
@@ -57,47 +57,37 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description:
-      Ports as described in Documentation/devicetree/bindings/graph.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           First input port representing the DP bridge input.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Second input port representing the DP bridge input.
 
       port@2:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Third input port representing the DP bridge input.
 
       port@3:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Fourth input port representing the DP bridge input.
 
       port@4:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Output port representing the DP bridge output.
 
     required:
       - port@0
       - port@4
-      - '#address-cells'
-      - '#size-cells'
 
 allOf:
   - if:
diff --git a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
index 9f38f55fc990421ba118383773da7bb324060ab1..bb6289c7d375e79928edcc1f8d649f9849754919 100644
--- a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
@@ -19,16 +19,16 @@ properties:
     description: I2C address of the device
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Video port for RGB input.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           DVI port, should be connected to a node compatible with the
           dvi-connector binding.
diff --git a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
index 35c9dfd866501a04ae317ddee260d7c78ea02203..dcb1336ee2a56e398ce1df321294693db3dc2a3f 100644
--- a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
@@ -35,29 +35,21 @@ properties:
       - const: clk_mipi_cfg
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: MIPI DSI input port.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: DSI output port.
 
     required:
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
index 02cfc0a3b550289b157a546946a87e85e73bdf3f..833d11b2303a7d11a4f831522c16feb6991e7070 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
@@ -53,7 +53,7 @@ properties:
     description: extcon specifier for the Power Delivery
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description: A port node pointing to DPI host port node
 
 required:
diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
index 7a1c89b995e215bb095451ab5daaf54fa4825a36..5b9d36f7af304848ff93168d1b7764ee85339fd6 100644
--- a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
@@ -38,82 +38,26 @@ properties:
     description: Regulator for 3.3V IO power.
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Primary MIPI port-1 for MIPI input
 
-        properties:
-          reg:
-            const: 0
-
-        patternProperties:
-          "^endpoint(@[0-9])$":
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint:
-                $ref: /schemas/types.yaml#/definitions/phandle
-
-        required:
-          - reg
-
       port@1:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Additional MIPI port-2 for MIPI input, used in combination
           with primary MIPI port-1 to drive higher resolution displays
 
-        properties:
-          reg:
-            const: 1
-
-        patternProperties:
-          "^endpoint(@[0-9])$":
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint:
-                $ref: /schemas/types.yaml#/definitions/phandle
-
-        required:
-          - reg
-
       port@2:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           HDMI port for HDMI output
 
-        properties:
-          reg:
-            const: 2
-
-        patternProperties:
-          "^endpoint(@[0-9])$":
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint:
-                $ref: /schemas/types.yaml#/definitions/phandle
-
-        required:
-          - reg
-
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@2
 
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 66a14d60ce1d54b2aafd4594aa140980b93918f9..304a1367faaa728b29b3f121700d5024af17182c 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -45,25 +45,17 @@ properties:
           - thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
 
   ports:
-    type: object
-    description: |
-      This device has two video ports. Their connections are modeled using the
-      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt
-    properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
+    $ref: /schemas/graph.yaml#/properties/ports
 
+    properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           For LVDS encoders, port 0 is the parallel input
           For LVDS decoders, port 0 is the LVDS input
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           For LVDS encoders, port 1 is the LVDS output
           For LVDS decoders, port 1 is the parallel output
@@ -72,8 +64,6 @@ properties:
       - port@0
       - port@1
 
-    additionalProperties: false
-
   powerdown-gpios:
     description:
       The GPIO used to control the power down line of this device.
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
index a125b2dd3a2f11bb7e80e4ccd5ca225489fcf263..350fb8f400f02265b0cc1c89ff7484db5c36c95c 100644
--- a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
@@ -84,40 +84,23 @@ properties:
       - const: pclk
 
   ports:
-    type: object
-    description:
-      A node containing DSI input & output port nodes with endpoint
-      definitions as documented in
-      Documentation/devicetree/bindings/graph.txt.
+    $ref: /schemas/graph.yaml#/properties/ports
+
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/$defs/port-base
         description:
           Input port node to receive pixel data from the
           display controller. Exactly one endpoint must be
           specified.
         properties:
-          '#address-cells':
-            const: 1
-
-          '#size-cells':
-            const: 0
-
           endpoint@0:
+            $ref: /schemas/graph.yaml#/properties/endpoint
             description: sub-node describing the input from LCDIF
-            type: object
 
           endpoint@1:
+            $ref: /schemas/graph.yaml#/properties/endpoint
             description: sub-node describing the input from DCSS
-            type: object
-
-          reg:
-            const: 0
-
-        required:
-          - '#address-cells'
-          - '#size-cells'
-          - reg
 
         oneOf:
           - required:
@@ -125,28 +108,18 @@ properties:
           - required:
               - endpoint@1
 
-        additionalProperties: false
+        unevaluatedProperties: false
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           DSI output port node to the panel or the next bridge
           in the chain
 
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
-
     required:
-      - '#address-cells'
-      - '#size-cells'
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - '#address-cells'
   - '#size-cells'
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
index 763c7909473efe7bf3aa62ed6818eda598380fca..fce82b605c8bf4e959c4e56e3390945f433e0dbe 100644
--- a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -41,34 +41,22 @@ properties:
     description: Regulator for 3.3V digital core power.
 
   ports:
-    type: object
-    description:
-      A node containing DSI input & output port nodes with endpoint
-      definitions as documented in
-      Documentation/devicetree/bindings/media/video-interfaces.txt
-      Documentation/devicetree/bindings/graph.txt
-    properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
+    $ref: /schemas/graph.yaml#/properties/ports
 
+    properties:
       port@0:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Video port for DSI input
 
       port@1:
-        type: object
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Video port for eDP output (panel or connector).
 
     required:
       - port@0
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
index e5b163951b919fed5dd54aa579909cabb4546551..acfc327f70a709ef82a3c6368aa87c913b29bdde 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
@@ -49,33 +49,21 @@ properties:
     maxItems: 1
 
   ports:
-    type: object
-    description: |
-      This device has two video ports. Their connections are modelled using the
-      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-      Each port shall have a single endpoint.
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: Parallel RGB input port
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: LVDS output port
 
     required:
       - port@0
       - port@1
 
-    additionalProperties: false
-
   power-domains:
     maxItems: 1
 
@@ -83,9 +71,9 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle
     description:
       phandle to the companion LVDS encoder. This property is mandatory
-      for the first LVDS encoder on D3 and E3 SoCs, and shall point to
-      the second encoder to be used as a companion in dual-link mode. It
-      shall not be set for any other LVDS encoder.
+      for the first LVDS encoder on R-Car D3 and E3, and RZ/G2E SoCs, and shall
+      point to the second encoder to be used as a companion in dual-link mode.
+      It shall not be set for any other LVDS encoder.
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
index 64e8a1c24b402102ab03b028efc5ece510e27cf5..6c7b577fd471111d15621cbcb0ce29b8b20a8fbe 100644
--- a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
@@ -30,31 +30,21 @@ properties:
           - ti,ths8135
 
   ports:
-    type: object
-    description: |
-      This device has two video ports. Their connections are modeled using the
-      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-    properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
+    $ref: /schemas/graph.yaml#/properties/ports
 
+    properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: The bridge input
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: The bridge output
 
     required:
       - port@0
       - port@1
 
-    additionalProperties: false
-
   enable-gpios:
     maxItems: 1
     description: GPIO controlling bridge enable
diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
index e42cb610f545c112f068766a7a96d8a32c6f9dce..3c3e51af154bc5a0eea9caa93203997a1f8150cf 100644
--- a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
@@ -47,14 +47,15 @@ properties:
     const: apb
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: Input node to receive pixel data.
+
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: DSI output node to panel.
 
     required:
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
index 3d5ce08a5792726e71dea33154939237726d0023..8ae382429d2bff1efe1acc24e33fcd6c22b9aefe 100644
--- a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
@@ -25,46 +25,41 @@ properties:
     const: thine,thc63lvd1024
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
     description: |
-      This device has four video ports. Their connections are modeled using the
-      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+      The device can operate in single or dual input and output modes.
 
-      The device can operate in single-link mode or dual-link mode. In
-      single-link mode, all pixels are received on port@0, and port@1 shall not
-      contain any endpoint. In dual-link mode, even-numbered pixels are
-      received on port@0 and odd-numbered pixels on port@1, and both port@0 and
-      port@1 shall contain endpoints.
+      When operating in single input mode, all pixels are received on port@0,
+      and port@1 shall not contain any endpoint. In dual input mode,
+      even-numbered pixels are received on port@0 and odd-numbered pixels on
+      port@1, and both port@0 and port@1 shall contain endpoints.
 
-    properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
+      When operating in single output mode all pixels are output from the first
+      CMOS/TTL port and port@3 shall not contain any endpoint. In dual output
+      mode pixels are output from both CMOS/TTL ports and both port@2 and
+      port@3 shall contain endpoints.
 
+    properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: First LVDS input port
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: Second LVDS input port
 
       port@2:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: First digital CMOS/TTL parallel output
 
       port@3:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: Second digital CMOS/TTL parallel output
 
     required:
       - port@0
       - port@2
 
-    additionalProperties: false
-
   oe-gpios:
     maxItems: 1
     description: Output enable GPIO signal, pin name "OE", active high.
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
index f8622bd0f61ee62d232af4e5417f30e8fb564d61..26932d2e86aba5b5b791a4183a042c446fdce3a6 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
@@ -71,54 +71,26 @@ properties:
     description: See ../../pwm/pwm.yaml for description of the cell formats.
 
   ports:
-    type: object
-    additionalProperties: false
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
-        additionalProperties: false
-
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Video port for MIPI DSI input
 
-        properties:
-          reg:
-            const: 0
-
-          endpoint:
-            type: object
-            additionalProperties: false
-            properties:
-              remote-endpoint: true
-
-        required:
-          - reg
-
       port@1:
-        type: object
-        additionalProperties: false
-
+        $ref: /schemas/graph.yaml#/$defs/port-base
+        unevaluatedProperties: false
         description:
           Video port for eDP output (panel or connector).
 
         properties:
-          reg:
-            const: 1
-
           endpoint:
-            type: object
-            additionalProperties: false
+            $ref: /schemas/graph.yaml#/$defs/endpoint-base
+            unevaluatedProperties: false
 
             properties:
-              remote-endpoint: true
-
               data-lanes:
                 oneOf:
                   - minItems: 1
@@ -171,12 +143,7 @@ properties:
             dependencies:
               lane-polarities: [data-lanes]
 
-        required:
-          - reg
-
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
index 605831c1e8366eb045b04460cfeaaa4ecec04b87..4c5dd8ec2951b1836c01a399ca82f81fb145ad69 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
@@ -31,23 +31,18 @@ properties:
     maximum: 7
 
   ports:
-    description:
-      A node containing input and output port nodes with endpoint
-      definitions as documented in
-      Documentation/devicetree/bindings/media/video-interfaces.txt
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
       port@0:
+        $ref: /schemas/graph.yaml#/$defs/port-base
+        unevaluatedProperties: false
         description: DPI input port.
-        type: object
 
         properties:
-          reg:
-            const: 0
-
           endpoint:
-            type: object
+            $ref: /schemas/graph.yaml#/$defs/endpoint-base
+            unevaluatedProperties: false
 
             properties:
               pclk-sample:
@@ -67,15 +62,8 @@ properties:
                 default: 24
 
       port@1:
+        $ref: /schemas/graph.yaml#/properties/port
         description: DVI output port.
-        type: object
-
-        properties:
-          reg:
-            const: 1
-
-          endpoint:
-            type: object
 
     required:
       - port@0
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
index 195025e6803cdd4804d7962a59f8247f29f0eba4..5216c27fc0ada58881235761037162c5873ed0c6 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
@@ -25,62 +25,20 @@ properties:
     description: Regulator for 1.2V internal core power.
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
-        additionalProperties: false
-
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Video port for MIPI DSI input
 
-        properties:
-          reg:
-            const: 0
-
-        patternProperties:
-          endpoint:
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint: true
-
-        required:
-          - reg
-
       port@1:
-        type: object
-        additionalProperties: false
-
-        description: |
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
           Video port for MIPI DPI output (panel or connector).
 
-        properties:
-          reg:
-            const: 1
-
-        patternProperties:
-          endpoint:
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint: true
-
-        required:
-          - reg
-
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
index c036a75db8f7ce29391ee5ae464c1262bd608c5b..eacfe716508327fd2b791b2c9aa0d75a3e175192 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
@@ -42,65 +42,30 @@ properties:
     const: refclk
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
-        additionalProperties: false
-
+        $ref: /schemas/graph.yaml#/$defs/port-base
+        unevaluatedProperties: false
         description: |
           Video port for RGB input
 
         properties:
-          reg:
-            const: 0
-
-        patternProperties:
           endpoint:
-            type: object
-            additionalProperties: false
+            $ref: /schemas/graph.yaml#/$defs/endpoint-base
+            unevaluatedProperties: false
 
             properties:
               data-lines:
                 enum: [ 16, 18, 24 ]
 
-              remote-endpoint: true
-
-        required:
-          - reg
-
       port@1:
-        type: object
-        additionalProperties: false
-
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Video port for DSI output (panel or connector).
 
-        properties:
-          reg:
-            const: 1
-
-        patternProperties:
-          endpoint:
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint: true
-
-        required:
-          - reg
-
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
@@ -156,4 +121,3 @@ examples:
         };
       };
     };
-    
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
index b5959cc78b8d34e1ce51772b8dbc29532750d653..10471c6c1ff91b2681cec910da55ab1bdf2bdf9a 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
@@ -42,31 +42,22 @@ properties:
     description: Hardware reset, Low active
 
   ports:
-    type: object
-    description:
-      A node containing input and output port nodes with endpoint definitions
-      as documented in
-      Documentation/devicetree/bindings/media/video-interfaces.txt
-    properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
+    $ref: /schemas/graph.yaml#/properties/ports
 
+    properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           DSI Input. The remote endpoint phandle should be a
           reference to a valid mipi_dsi_host device node.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Video port for LVDS output (panel or connector).
 
       port@2:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: |
           Video port for Dual link LVDS output (panel or connector).
 
diff --git a/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml b/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
index eebe88fed999e9f269a9a1a732d1e5c61f3c5940..a31ca2d52b868f16e04d06e5e3ca7e2f38571ad4 100644
--- a/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
@@ -25,6 +25,7 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
 
   port:
+    $ref: /schemas/graph.yaml#/properties/port
     description: Connection to controller providing analog TV signals
 
 required:
diff --git a/Documentation/devicetree/bindings/display/connector/dp-connector.yaml b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1c17d60e7760068efc04299e66bf2853010012a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/connector/dp-connector.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DisplayPort Connector
+
+maintainers:
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+properties:
+  compatible:
+    const: dp-connector
+
+  label: true
+
+  type:
+    enum:
+      - full-size
+      - mini
+
+  hpd-gpios:
+    description: A GPIO line connected to HPD
+    maxItems: 1
+
+  dp-pwr-supply:
+    description: Power supply for the DP_PWR pin
+    maxItems: 1
+
+  port:
+    $ref: /schemas/graph.yaml#/properties/port
+    description: Connection to controller providing DP signals
+
+required:
+  - compatible
+  - type
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    connector {
+        compatible = "dp-connector";
+        label = "dp0";
+        type = "full-size";
+
+        port {
+            dp_connector_in: endpoint {
+                remote-endpoint = <&dp_out>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml b/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
index 71cb9220fa593e500df65b09c67e50d4611b91ac..93eb14294e6888d8cf22043a80b18146deb8b5c5 100644
--- a/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
@@ -36,6 +36,7 @@ properties:
     description: the connector has pins for DVI dual-link
 
   port:
+    $ref: /schemas/graph.yaml#/properties/port
     description: Connection to controller providing DVI signals
 
 required:
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml b/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
index 14d7128af592a98fadd532d2266dec3f81a0641c..83c0d008265b349f2e6ba529bbfe9b418c2d79f4 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
@@ -37,6 +37,7 @@ properties:
     maxItems: 1
 
   port:
+    $ref: /schemas/graph.yaml#/properties/port
     description: Connection to controller providing HDMI signals
 
 required:
diff --git a/Documentation/devicetree/bindings/display/connector/vga-connector.yaml b/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
index 5782c4bb3252ba43bde59e1b514d8b83210b3b4f..25f86800200052ad78db03c4258142361876540c 100644
--- a/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
@@ -20,6 +20,7 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle
 
   port:
+    $ref: /schemas/graph.yaml#/properties/port
     description: Connection to controller providing VGA signals
 
 required:
diff --git a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
index f1f25aa794d935bb6dbbe8bdd4ebb80d589a9fb6..0091df9dd73b72974b003bb825b78161da79f077 100644
--- a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
+++ b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
@@ -74,7 +74,7 @@ properties:
       - description: Must be 400 MHz
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
       A port node pointing to the input port of a HDMI/DP or MIPI display bridge.
 
diff --git a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
index 12064a8e7a92a09de004d31a8abd1abd2794bc3b..e679f48a38862f27c811a5687464092a5abbfae8 100644
--- a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
+++ b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
@@ -31,9 +31,8 @@ properties:
   clock-names:
     const: ipu
 
-patternProperties:
-  "^ports?$":
-    description: OF graph bindings (specified in bindings/graph.txt).
+  port:
+    $ref: /schemas/graph.yaml#/properties/port
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
index 768050f30dbaf1287bc2fd8ae32574670cfd8e4b..50d2b0a50e8ac7b400a8228f6432c619c8574b83 100644
--- a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
+++ b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
@@ -39,18 +39,18 @@ properties:
     minItems: 1
 
   port:
-    description: OF graph bindings (specified in bindings/graph.txt).
+    $ref: /schemas/graph.yaml#/properties/port
 
   ports:
-    description: OF graph bindings (specified in bindings/graph.txt).
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
+
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: DPI output, to interface with TFT panels.
 
       port@8:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description: Link to the Image Processing Unit (IPU).
           (See ingenic,ipu.yaml).
 
diff --git a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
index 0a697d45c2ad9c9864883ce3b7c0ee854dc8e05a..bc6622b010cabf282b8c58ca140a3bd4945ea8aa 100644
--- a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
+++ b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
@@ -36,7 +36,7 @@ properties:
     maxItems: 1
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description: Display output node to DSI.
 
 required:
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index ed76332ec01e830dd0607a000afb6c4c34e5dedd..93b160df3eec4b684c15602cd86a34c2fe047aef 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -37,13 +37,14 @@ Required properties (all function blocks):
 	"mediatek,<chip>-disp-aal"   		- adaptive ambient light controller
 	"mediatek,<chip>-disp-gamma" 		- gamma correction
 	"mediatek,<chip>-disp-merge" 		- merge streams from two RDMA sources
+	"mediatek,<chip>-disp-postmask" 	- control round corner for display frame
 	"mediatek,<chip>-disp-split" 		- split stream to two encoders
 	"mediatek,<chip>-disp-ufoe"  		- data compression engine
 	"mediatek,<chip>-dsi"        		- DSI controller, see mediatek,dsi.txt
 	"mediatek,<chip>-dpi"        		- DPI controller, see mediatek,dpi.txt
 	"mediatek,<chip>-disp-mutex" 		- display mutex
 	"mediatek,<chip>-disp-od"    		- overdrive
-  the supported chips are mt2701, mt7623, mt2712, mt8167 and mt8173.
+  the supported chips are mt2701, mt7623, mt2712, mt8167, mt8173, mt8183 and mt8192.
 - reg: Physical base address and length of the function block register space
 - interrupts: The interrupt signal from the function block (required, except for
   merge and split function blocks).
@@ -66,6 +67,14 @@ Required properties (DMA function blocks):
   argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
   for details.
 
+Optional properties (RDMA function blocks):
+- mediatek,rdma-fifo-size: rdma fifo size may be different even in same SOC, add this
+  property to the corresponding rdma
+  the value is the Max value which defined in hardware data sheet.
+  mediatek,rdma-fifo-size of mt8173-rdma0 is 8K
+  mediatek,rdma-fifo-size of mt8183-rdma0 is 5K
+  mediatek,rdma-fifo-size of mt8183-rdma1 is 2K
+
 Examples:
 
 mmsys: clock-controller@14000000 {
@@ -103,6 +112,7 @@ rdma0: rdma@1400e000 {
 	clocks = <&mmsys CLK_MM_DISP_RDMA0>;
 	iommus = <&iommu M4U_PORT_DISP_RDMA0>;
 	mediatek,larb = <&larb0>;
+	mediatek,rdma-fifosize = <8192>;
 };
 
 rdma1: rdma@1400f000 {
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
index 6b7fddc80c412702c706c367db005ddcdb1fe71f..67682fe77f1014216144d935c51db2424ba5bcb7 100644
--- a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -37,34 +37,33 @@ properties:
   panel-timing: true
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
+
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/$defs/port-base
+        unevaluatedProperties: false
         description: The sink for odd pixels.
         properties:
-          reg:
-            const: 0
-
           dual-lvds-odd-pixels: true
 
         required:
-          - reg
           - dual-lvds-odd-pixels
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/$defs/port-base
+        unevaluatedProperties: false
         description: The sink for even pixels.
         properties:
-          reg:
-            const: 1
-
           dual-lvds-even-pixels: true
 
         required:
-          - reg
           - dual-lvds-even-pixels
 
+    required:
+      - port@0
+      - port@1
+
 additionalProperties: false
 
 required:
diff --git a/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml b/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
index 51f423297ec8447b46575016d8b428644ba4822d..9e78f2e60f990a6b58f5ff635e9daa006e3ca880 100644
--- a/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
@@ -20,6 +20,7 @@ properties:
   compatible:
     enum:
       - mantix,mlaf057we51-x
+      - ys,ys57pss36bh5gq
 
   port: true
   reg:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index cd6dc5461721d7504a0e4722f3550f7f36f798a8..5b38dc89cb21bc9e90df321609cf81b6395ac460 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -68,16 +68,7 @@ properties:
 
   # Connectivity
   port:
-    type: object
-
-  ports:
-    type: object
-    description:
-      Panels receive video data through one or multiple connections. While
-      the nature of those connections is specific to the panel type, the
-      connectivity is expressed in a standard fashion using ports as specified
-      in the device graph bindings defined in
-      Documentation/devicetree/bindings/graph.txt.
+    $ref: /schemas/graph.yaml#/properties/port
 
   ddc-i2c-bus:
     $ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index 72e4b6d4d5e1e517153575b9f15ee40c7fbf30af..fbd71669248f71c4b6b49d0b802405e019b812dd 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -35,6 +35,8 @@ properties:
       - boe,tv080wum-nl0
         # Innolux P079ZCA 7.85" 768x1024 TFT LCD panel
       - innolux,p079zca
+        # Khadas TS050 5" 1080x1920 LCD panel
+      - khadas,ts050
         # Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
       - kingdisplay,kd097d04
         # LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 3207608d1178315f3379254263b87f2d153ebb3a..62b0d54d87b7f0e2c6c32c3cd7320da1640a9bad 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -76,6 +76,8 @@ properties:
         # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
       - boe,nv101wxmn51
         # BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
+      - boe,nv110wtm-n61
+        # BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
       - boe,nv133fhm-n61
         # BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
       - boe,nv133fhm-n62
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
index 1dab80ae1d0a12e9499315706f33af2ccac84ccb..ea58df49263ac3bd08bc55a42a2fd1c374ad9b70 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
@@ -11,6 +11,7 @@ maintainers:
 
 allOf:
   - $ref: panel-common.yaml#
+  - $ref: /schemas/leds/backlight/common.yaml#
 
 properties:
   compatible:
@@ -19,6 +20,8 @@ properties:
   reg: true
   reset-gpios: true
   port: true
+  default-brightness: true
+  max-brightness: true
 
   vdd3-supply:
     description: VDD regulator
@@ -31,7 +34,6 @@ required:
   - reset-gpios
   - vdd3-supply
   - vci-supply
-  - port
 
 unevaluatedProperties: false
 
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
index 4110d003ce1f594748f049ec6beb38f0d6087582..008c144257cbb0cc2e2e3933f987eff7f81b390c 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
@@ -43,34 +43,24 @@ properties:
       This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
 
   ports:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/ports
 
     properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
-
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Port node with two endpoints, numbered 0 and 1,
           connected respectively to vop0 and vop1.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           Port node with one endpoint connected to a hdmi-connector node.
 
     required:
-      - "#address-cells"
-      - "#size-cells"
       - port@0
       - port@1
 
-    additionalProperties: false
-
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
index ed8148e26e24df8b0c4405ead1edd050087591e1..6f43d885c9b30892ccdd7d3414fc7224f577962f 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
@@ -70,10 +70,7 @@ properties:
       - const: dclk
 
   port:
-    type: object
-    description:
-      A port node with endpoint definitions as defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
+    $ref: /schemas/graph.yaml#/properties/port
 
   assigned-clocks:
     maxItems: 2
diff --git a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
index 327a14d85df85e6fba3c9bcdd1405e68434ba3b6..679daed4124ebc1ccc10a2cacc7744153e49cbd8 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
@@ -51,20 +51,16 @@ properties:
       Phandle of the regulator that provides the supply voltage.
 
   ports:
-    type: object
-    description:
-      A node containing DSI input & output port nodes with endpoint
-      definitions as documented in
-      Documentation/devicetree/bindings/media/video-interfaces.txt
-      Documentation/devicetree/bindings/graph.txt
+    $ref: /schemas/graph.yaml#/properties/ports
+
     properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           DSI input port node, connected to the ltdc rgb output port.
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           DSI output port node, connected to a panel or a bridge input port"
 
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
index bf8ad916e9b0a76c495b11ea787c52f7de56d1d1..d54f9ca207afaa9b4d851b93421fc5c9f5ffd343 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
@@ -35,15 +35,13 @@ properties:
     maxItems: 1
 
   port:
-    type: object
-    description:
-      "Video port for DPI RGB output.
+    $ref: /schemas/graph.yaml#/properties/port
+    description: |
+      Video port for DPI RGB output.
       ltdc has one video port with up to 2 endpoints:
       - for external dpi rgb panel or bridge, using gpios.
       - for internal dpi input of the MIPI DSI host controller.
       Note: These 2 endpoints cannot be activated simultaneously.
-      Please refer to the bindings defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt."
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.txt b/Documentation/devicetree/bindings/display/ste,mcde.txt
deleted file mode 100644
index 4c33c692bd5f58b6dd01b826b03c29d2cb4e3cfb..0000000000000000000000000000000000000000
--- a/Documentation/devicetree/bindings/display/ste,mcde.txt
+++ /dev/null
@@ -1,104 +0,0 @@
-ST-Ericsson Multi Channel Display Engine MCDE
-
-The ST-Ericsson MCDE is a display controller with support for compositing
-and displaying several channels memory resident graphics data on DSI or
-LCD displays or bridges. It is used in the ST-Ericsson U8500 platform.
-
-Required properties:
-
-- compatible: must be:
-  "ste,mcde"
-- reg: register base for the main MCDE control registers, should be
-  0x1000 in size
-- interrupts: the interrupt line for the MCDE
-- epod-supply: a phandle to the EPOD regulator
-- vana-supply: a phandle to the analog voltage regulator
-- clocks: an array of the MCDE clocks in this strict order:
-  MCDECLK (main MCDE clock), LCDCLK (LCD clock), PLLDSI
-  (HDMI clock), DSI0ESCLK (DSI0 energy save clock),
-  DSI1ESCLK (DSI1 energy save clock), DSI2ESCLK (DSI2 energy
-  save clock)
-- clock-names: must be the following array:
-  "mcde", "lcd", "hdmi"
-  to match the required clock inputs above.
-- #address-cells: should be <1> (for the DSI hosts that will be children)
-- #size-cells: should be <1> (for the DSI hosts that will be children)
-- ranges: this should always be stated
-
-Required subnodes:
-
-The devicetree must specify subnodes for the DSI host adapters.
-These must have the following characteristics:
-
-- compatible: must be:
-  "ste,mcde-dsi"
-- reg: must specify the register range for the DSI host
-- vana-supply: phandle to the VANA voltage regulator
-- clocks: phandles to the high speed and low power (energy save) clocks
-  the high speed clock is not present on the third (dsi2) block, so it
-  should only have the "lp" clock
-- clock-names: "hs" for the high speed clock and "lp" for the low power
-  (energy save) clock
-- #address-cells: should be <1>
-- #size-cells: should be <0>
-
-Display panels and bridges will appear as children on the DSI hosts, and
-the displays are connected to the DSI hosts using the common binding
-for video transmitter interfaces; see
-Documentation/devicetree/bindings/media/video-interfaces.txt
-
-If a DSI host is unused (not connected) it will have no children defined.
-
-Example:
-
-mcde@a0350000 {
-	compatible = "ste,mcde";
-	reg = <0xa0350000 0x1000>;
-	interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
-	epod-supply = <&db8500_b2r2_mcde_reg>;
-	vana-supply = <&ab8500_ldo_ana_reg>;
-	clocks = <&prcmu_clk PRCMU_MCDECLK>, /* Main MCDE clock */
-		 <&prcmu_clk PRCMU_LCDCLK>, /* LCD clock */
-		 <&prcmu_clk PRCMU_PLLDSI>; /* HDMI clock */
-	clock-names = "mcde", "lcd", "hdmi";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	ranges;
-
-	dsi0: dsi@a0351000 {
-		compatible = "ste,mcde-dsi";
-		reg = <0xa0351000 0x1000>;
-		vana-supply = <&ab8500_ldo_ana_reg>;
-		clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
-		clock-names = "hs", "lp";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		panel {
-			compatible = "samsung,s6d16d0";
-			reg = <0>;
-			vdd1-supply = <&ab8500_ldo_aux1_reg>;
-			reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
-		};
-
-	};
-	dsi1: dsi@a0352000 {
-		compatible = "ste,mcde-dsi";
-		reg = <0xa0352000 0x1000>;
-		vana-supply = <&ab8500_ldo_ana_reg>;
-		clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
-		clock-names = "hs", "lp";
-		#address-cells = <1>;
-		#size-cells = <0>;
-	};
-	dsi2: dsi@a0353000 {
-		compatible = "ste,mcde-dsi";
-		reg = <0xa0353000 0x1000>;
-		vana-supply = <&ab8500_ldo_ana_reg>;
-		/* This DSI port only has the Low Power / Energy Save clock */
-		clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
-		clock-names = "lp";
-		#address-cells = <1>;
-		#size-cells = <0>;
-	};
-};
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.yaml b/Documentation/devicetree/bindings/display/ste,mcde.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..de0c678b3c29406e365ca5382c39dcfe06d9db44
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ste,mcde.yaml
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ste,mcde.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST-Ericsson Multi Channel Display Engine MCDE
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+  compatible:
+    const: ste,mcde
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    description: an array of the MCDE clocks
+    items:
+      - description: MCDECLK (main MCDE clock)
+      - description: LCDCLK (LCD clock)
+      - description: PLLDSI (HDMI clock)
+
+  clock-names:
+    items:
+      - const: mcde
+      - const: lcd
+      - const: hdmi
+
+  resets:
+    maxItems: 1
+
+  epod-supply:
+    description: a phandle to the EPOD regulator
+
+  vana-supply:
+    description: a phandle to the analog voltage regulator
+
+  port:
+    $ref: /schemas/graph.yaml#/properties/port
+    description:
+      A DPI port node
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 1
+
+  ranges: true
+
+patternProperties:
+  "^dsi@[0-9a-f]+$":
+    description: subnodes for the three DSI host adapters
+    type: object
+    allOf:
+      - $ref: dsi-controller.yaml#
+    properties:
+      compatible:
+        const: ste,mcde-dsi
+
+      reg:
+        maxItems: 1
+
+      vana-supply:
+        description: a phandle to the analog voltage regulator
+
+      clocks:
+        description: phandles to the high speed and low power (energy save) clocks
+          the high speed clock is not present on the third (dsi2) block, so it
+          should only have the "lp" clock
+        minItems: 1
+        maxItems: 2
+
+      clock-names:
+        oneOf:
+          - items:
+              - const: hs
+              - const: lp
+          - items:
+              - const: lp
+
+    required:
+      - compatible
+      - reg
+      - vana-supply
+      - clocks
+      - clock-names
+
+    unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - epod-supply
+  - vana-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/mfd/dbx500-prcmu.h>
+    #include <dt-bindings/gpio/gpio.h>
+
+    mcde@a0350000 {
+      compatible = "ste,mcde";
+      reg = <0xa0350000 0x1000>;
+      interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+      epod-supply = <&db8500_b2r2_mcde_reg>;
+      vana-supply = <&ab8500_ldo_ana_reg>;
+      clocks = <&prcmu_clk PRCMU_MCDECLK>,
+               <&prcmu_clk PRCMU_LCDCLK>,
+               <&prcmu_clk PRCMU_PLLDSI>;
+      clock-names = "mcde", "lcd", "hdmi";
+      #address-cells = <1>;
+      #size-cells = <1>;
+      ranges;
+
+      dsi0: dsi@a0351000 {
+        compatible = "ste,mcde-dsi";
+        reg = <0xa0351000 0x1000>;
+        vana-supply = <&ab8500_ldo_ana_reg>;
+        clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
+        clock-names = "hs", "lp";
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+          compatible = "samsung,s6d16d0";
+          reg = <0>;
+          vdd1-supply = <&ab8500_ldo_aux1_reg>;
+          reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+        };
+      };
+
+      dsi1: dsi@a0352000 {
+        compatible = "ste,mcde-dsi";
+        reg = <0xa0352000 0x1000>;
+        vana-supply = <&ab8500_ldo_ana_reg>;
+        clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
+        clock-names = "hs", "lp";
+        #address-cells = <1>;
+        #size-cells = <0>;
+      };
+
+      dsi2: dsi@a0353000 {
+        compatible = "ste,mcde-dsi";
+        reg = <0xa0353000 0x1000>;
+        vana-supply = <&ab8500_ldo_ana_reg>;
+        /* This DSI port only has the Low Power / Energy Save clock */
+        clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
+        clock-names = "lp";
+        #address-cells = <1>;
+        #size-cells = <0>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index 4dc30738ee576cc579f14294e72723aa0cba9886..781c1868b0b8f629851d7f3eabf54282e88ab918 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -74,30 +74,19 @@ properties:
     type: boolean
 
   ports:
-    type: object
-    description:
-      Ports as described in Documentation/devicetree/bindings/graph.txt
-    properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
+    $ref: /schemas/graph.yaml#/properties/ports
 
+    properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           The DSS OLDI output port node form video port 1
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           The DSS DPI output port node from video port 2
 
-    required:
-      - "#address-cells"
-      - "#size-cells"
-
   ti,am65x-oldi-io-ctrl:
     $ref: "/schemas/types.yaml#/definitions/phandle-array"
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
index c9a947d55fa4a5999c45c5430dbbb5ba237ad085..2986f9acc9f00615fe0460fd1c16239ba78b22f1 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -107,40 +107,29 @@ properties:
     type: boolean
 
   ports:
-    type: object
-    description:
-      Ports as described in Documentation/devicetree/bindings/graph.txt
-    properties:
-      "#address-cells":
-        const: 1
-
-      "#size-cells":
-        const: 0
+    $ref: /schemas/graph.yaml#/properties/ports
 
+    properties:
       port@0:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           The output port node form video port 1
 
       port@1:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           The output port node from video port 2
 
       port@2:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           The output port node from video port 3
 
       port@3:
-        type: object
+        $ref: /schemas/graph.yaml#/properties/port
         description:
           The output port node from video port 4
 
-    required:
-      - "#address-cells"
-      - "#size-cells"
-
   max-memory-bandwidth:
     $ref: /schemas/types.yaml#/definitions/uint32
     description:
diff --git a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
index 8f87b82c66951e1216aa549fc461db36d22115b8..7ce7bbad57803ca70a66fab61e8636ae6849a1cd 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -54,9 +54,8 @@ properties:
     description: phandle to the associated power domain
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
     description:
-      Port as described in Documentation/devicetree/bindings/graph.txt.
       The DSS DPI output port node
 
   max-memory-bandwidth:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index b4e026f2d6c52d892bd3f1ad3ec556ce53630607..9d5680c7c8530499898867ab22255044d376a843 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1272,6 +1272,8 @@ patternProperties:
     description: YSH & ATIL
   "^yones-toptech,.*":
     description: Yones Toptech Co., Ltd.
+  "^ys,.*":
+    description: Shenzhen Yashi Changhua Intelligent Technology Co., Ltd.
   "^ysoft,.*":
     description: Y Soft Corporation a.s.
   "^zealz,.*":
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 3c5ae4f6dfd2340f1de3b6fc68be2aac0b39f284..87e5023e3f5508e047cc1025df05009acdf2b5b7 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -319,6 +319,15 @@ CRTC Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :export:
 
+Color Management Functions Reference
+------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+   :internal:
+
 Frame Buffer Abstraction
 ========================
 
@@ -370,6 +379,21 @@ Plane Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_plane.c
    :export:
 
+Plane Composition Functions Reference
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :export:
+
+Plane Damage Tracking Functions Reference
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_damage_helper.h
+   :internal:
+
 Display Modes Function Reference
 ================================
 
@@ -436,6 +460,9 @@ KMS Locking
 KMS Properties
 ==============
 
+This section of the documentation is primarily aimed at user-space developers.
+For the driver APIs, see the other sections.
+
 Property Types and Blob Property Support
 ----------------------------------------
 
@@ -466,39 +493,30 @@ Standard CRTC Properties
 .. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :doc: standard CRTC properties
 
+Standard Plane Properties
+-------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :doc: standard plane properties
+
 Plane Composition Properties
 ----------------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_blend.c
    :doc: overview
 
-.. kernel-doc:: drivers/gpu/drm/drm_blend.c
-   :export:
-
-FB_DAMAGE_CLIPS
-~~~~~~~~~~~~~~~
+Damage Tracking Properties
+--------------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
    :doc: overview
 
-.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
-   :export:
-
-.. kernel-doc:: include/drm/drm_damage_helper.h
-   :internal:
-
 Color Management Properties
 ---------------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
    :doc: overview
 
-.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
-   :export:
-
-.. kernel-doc:: include/drm/drm_color_mgmt.h
-   :internal:
-
 Tile Group Property
 -------------------
 
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 7dce175f6d75bc13745d52f3ebea992d8c212698..04bdc7a91d5371fe7df901361d454d9be48c4777 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -457,5 +457,8 @@ Userspace API Structures
 .. kernel-doc:: include/uapi/drm/drm_mode.h
    :doc: overview
 
+.. kernel-doc:: include/uapi/drm/drm.h
+   :internal:
+
 .. kernel-doc:: include/uapi/drm/drm_mode.h
    :internal:
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 20868f3d0123606291b3fc01cb593e9acf6c6ad1..486c720f38907f8629a3abd53325ff3042a1c9c1 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -428,7 +428,7 @@ User Batchbuffer Execution
 Logical Rings, Logical Ring Contexts and Execlists
 --------------------------------------------------
 
-.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_lrc.c
+.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_execlists_submission.c
    :doc: Logical Rings, Logical Ring Contexts and Execlists
 
 Global GTT views
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 009d8e6c7e3c3746b61985333c2cf50f5575be04..77fbfe93df56fc25b8b903764d86d3fb19fd0e69 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -23,6 +23,9 @@ Advanced: Tricky tasks that need fairly good understanding of the DRM subsystem
 and graphics topics. Generally need the relevant hardware for development and
 testing.
 
+Expert: Only attempt these if you've successfully completed some tricky
+refactorings already and are an expert in the specific area
+
 Subsystem-wide refactorings
 ===========================
 
@@ -168,6 +171,22 @@ Contact: Daniel Vetter, respective driver maintainers
 
 Level: Advanced
 
+Move Buffer Object Locking to dma_resv_lock()
+---------------------------------------------
+
+Many drivers have their own per-object locking scheme, usually using
+mutex_lock(). This causes all kinds of trouble for buffer sharing, since
+depending which driver is the exporter and importer, the locking hierarchy is
+reversed.
+
+To solve this we need one standard per-object locking mechanism, which is
+dma_resv_lock(). This lock needs to be called as the outermost lock, with all
+other driver specific per-object locks removed. The problem is tha rolling out
+the actual change to the locking contract is a flag day, due to struct dma_buf
+buffer sharing.
+
+Level: Expert
+
 Convert logging to drm_* functions with drm_device paramater
 ------------------------------------------------------------
 
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 13bab1d93bb3bb20a9ccb9ae7535071ebd82dc0e..2c9b376da5ca877d27a2672dd9e29d4f55d564dd 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -7,6 +7,88 @@
 .. kernel-doc:: drivers/gpu/drm/vkms/vkms_drv.c
    :doc: vkms (Virtual Kernel Modesetting)
 
+Setup
+=====
+
+The VKMS driver can be setup with the following steps:
+
+To check if VKMS is loaded, run::
+
+  lsmod | grep vkms
+
+This should list the VKMS driver. If no output is obtained, then
+you need to enable and/or load the VKMS driver.
+Ensure that the VKMS driver has been set as a loadable module in your
+kernel config file. Do::
+
+  make nconfig
+
+  Go to `Device Drivers> Graphics support`
+
+  Enable `Virtual KMS (EXPERIMENTAL)`
+
+Compile and build the kernel for the changes to get reflected.
+Now, to load the driver, use::
+
+  sudo modprobe vkms
+
+On running the lsmod command now, the VKMS driver will appear listed.
+You can also observe the driver being loaded in the dmesg logs.
+
+The VKMS driver has optional features to simulate different kinds of hardware,
+which are exposed as module options. You can use the `modinfo` command
+to see the module options for vkms::
+
+  modinfo vkms
+
+Module options are helpful when testing, and enabling modules
+can be done while loading vkms. For example, to load vkms with cursor enabled,
+use::
+
+  sudo modprobe vkms enable_cursor=1
+
+To disable the driver, use ::
+
+  sudo modprobe -r vkms
+
+Testing With IGT
+================
+
+The IGT GPU Tools is a test suite used specifically for debugging and
+development of the DRM drivers.
+The IGT Tools can be installed from
+`here <https://gitlab.freedesktop.org/drm/igt-gpu-tools>`_ .
+
+The tests need to be run without a compositor, so you need to switch to text
+only mode. You can do this by::
+
+  sudo systemctl isolate multi-user.target
+
+To return to graphical mode, do::
+
+  sudo systemctl isolate graphical.target
+
+Once you are in text only mode, you can run tests using the --device switch
+or IGT_DEVICE variable to specify the device filter for the driver we want
+to test. IGT_DEVICE can also be used with the run-test.sh script to run the
+tests for a specific driver::
+
+  sudo ./build/tests/<name of test> --device "sys:/sys/devices/platform/vkms"
+  sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/<name of test>
+  sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t <name of test>
+
+For example, to test the functionality of the writeback library,
+we can run the kms_writeback test::
+
+  sudo ./build/tests/kms_writeback --device "sys:/sys/devices/platform/vkms"
+  sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_writeback
+  sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t kms_writeback
+
+You can also run subtests if you do not want to run the entire test::
+
+  sudo ./build/tests/kms_flip --run-subtest basic-plain-flip --device "sys:/sys/devices/platform/vkms"
+  sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip
+
 TODO
 ====
 
diff --git a/MAINTAINERS b/MAINTAINERS
index b9d55c84fc12612da4a119539b22afbe20d6b816..fffcb42ed4d740ae17a40f669a2d6991ebb86210 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5760,6 +5760,7 @@ F:	drivers/gpu/drm/vboxvideo/
 DRM DRIVER FOR VMWARE VIRTUAL GPU
 M:	"VMware Graphics" <linux-graphics-maintainer@vmware.com>
 M:	Roland Scheidegger <sroland@vmware.com>
+M:	Zack Rusin <zackr@vmware.com>
 L:	dri-devel@lists.freedesktop.org
 S:	Supported
 T:	git git://people.freedesktop.org/~sroland/linux
@@ -5961,8 +5962,8 @@ F:	Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
 F:	drivers/gpu/drm/stm
 
 DRM DRIVERS FOR TI KEYSTONE
-M:	Jyri Sarha <jsarha@ti.com>
-M:	Tomi Valkeinen <tomi.valkeinen@ti.com>
+M:	Jyri Sarha <jyri.sarha@iki.fi>
+M:	Tomi Valkeinen <tomba@kernel.org>
 L:	dri-devel@lists.freedesktop.org
 S:	Maintained
 T:	git git://anongit.freedesktop.org/drm/drm-misc
@@ -5972,15 +5973,15 @@ F:	Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
 F:	drivers/gpu/drm/tidss/
 
 DRM DRIVERS FOR TI LCDC
-M:	Jyri Sarha <jsarha@ti.com>
-R:	Tomi Valkeinen <tomi.valkeinen@ti.com>
+M:	Jyri Sarha <jyri.sarha@iki.fi>
+R:	Tomi Valkeinen <tomba@kernel.org>
 L:	dri-devel@lists.freedesktop.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/display/tilcdc/
 F:	drivers/gpu/drm/tilcdc/
 
 DRM DRIVERS FOR TI OMAP
-M:	Tomi Valkeinen <tomi.valkeinen@ti.com>
+M:	Tomi Valkeinen <tomba@kernel.org>
 L:	dri-devel@lists.freedesktop.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/display/ti/
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5f1a8bd1388049edd7413cec78d62703786dcdbc..e025b7c9a3572e45c8bb6187edf4816357f72021 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -518,6 +518,9 @@
 						clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
 							 <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
 						clock-names = "fck", "sys_clk";
+
+						#address-cells = <1>;
+						#size-cells = <0>;
 					};
 				};
 
@@ -550,6 +553,9 @@
 						clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
 							 <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
 						clock-names = "fck", "sys_clk";
+
+						#address-cells = <1>;
+						#size-cells = <0>;
 					};
 				};
 
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b26ef4866a35e63ad1ebb9e9d15282b3d40703f7..f250bf1cc022c90d5b5875b47c6dd8721be409aa 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -356,8 +356,8 @@ CONFIG_DRM_OMAP=m
 CONFIG_OMAP5_DSS_HDMI=y
 CONFIG_OMAP2_DSS_SDI=y
 CONFIG_OMAP2_DSS_DSI=y
-CONFIG_DRM_OMAP_PANEL_DSI_CM=m
 CONFIG_DRM_TILCDC=m
+CONFIG_DRM_PANEL_DSI_CM=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_PANEL_LG_LB035Q02=m
 CONFIG_DRM_PANEL_NEC_NL8048HL11=m
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 4f8224a6ac9562c0ad10b781c9c55b851c5b7453..4e16c71c24b71d27e5df9cc3368d5209af2607c5 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
 	  This is marked experimental because we don't yet have a consistent
 	  execution context and memory management between drivers.
 
+config DMABUF_DEBUG
+	bool "DMA-BUF debug checks"
+	default y if DMA_API_DEBUG
+	help
+	  This option enables additional checks for DMA-BUF importers and
+	  exporters. Specifically it validates that importers do not peek at the
+	  underlying struct page when they import a buffer.
+
 config DMABUF_SELFTESTS
 	tristate "Selftests for the dma-buf interfaces"
 	default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 9ad6397aaa97e10b580a23b54836162e765ae7ff..f264b70c383eb4ec5db083bc455296929668dc4d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -493,7 +493,7 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
  *
  * 4. Once a driver is done with a shared buffer it needs to call
  *    dma_buf_detach() (after cleaning up any mappings) and then release the
- *    reference acquired with dma_buf_get by calling dma_buf_put().
+ *    reference acquired with dma_buf_get() by calling dma_buf_put().
  *
  * For the detailed semantics exporters are expected to implement see
  * &dma_buf_ops.
@@ -509,9 +509,10 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
  *			by the exporter. see &struct dma_buf_export_info
  *			for further details.
  *
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
  *
  * For most cases the easiest way to create @exp_info is through the
  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -597,7 +598,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 EXPORT_SYMBOL_GPL(dma_buf_export);
 
 /**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
  * @flags:      [in]    flags to give to fd
  *
@@ -621,10 +622,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 EXPORT_SYMBOL_GPL(dma_buf_fd);
 
 /**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd:	[in]	fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd:	[in]	fd associated with the struct dma_buf to be returned
  *
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
  * file's refcounting done by fget to increase refcount. returns ERR_PTR
  * otherwise.
  */
@@ -665,9 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf)
 }
 EXPORT_SYMBOL_GPL(dma_buf_put);
 
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+	int i;
+	struct scatterlist *sg;
+
+	/* To catch abuse of the underlying struct page by importers mix
+	 * up the bits, but take care to preserve the low SG_ bits to
+	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+	 * before passing the sgt back to the exporter. */
+	for_each_sgtable_sg(sg_table, sg, i)
+		sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+				       enum dma_data_direction direction)
+{
+	struct sg_table *sg_table;
+
+	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+
+	if (!IS_ERR_OR_NULL(sg_table))
+		mangle_sg_table(sg_table);
+
+	return sg_table;
+}
+
 /**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
  * @dmabuf:		[in]	buffer to attach device to.
  * @dev:		[in]	device to be attached.
  * @importer_ops:	[in]	importer operations for the attachment
@@ -676,6 +704,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
  * must be cleaned up by calling dma_buf_detach().
  *
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
  * Returns:
  *
  * A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -734,7 +765,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 				goto err_unlock;
 		}
 
-		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
 		if (!sgt)
 			sgt = ERR_PTR(-ENOMEM);
 		if (IS_ERR(sgt)) {
@@ -781,13 +812,24 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_GPL(dma_buf_attach);
 
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+			    struct sg_table *sg_table,
+			    enum dma_data_direction direction)
+{
+	/* uses XOR, hence this unmangles */
+	mangle_sg_table(sg_table);
+
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
 /**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
  * @dmabuf:	[in]	buffer to detach from.
  * @attach:	[in]	attachment to be detached; is free'd after this call.
  *
  * Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
  */
 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 {
@@ -798,7 +840,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 		if (dma_buf_is_dynamic(attach->dmabuf))
 			dma_resv_lock(attach->dmabuf->resv, NULL);
 
-		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+		__unmap_dma_buf(attach, attach->sgt, attach->dir);
 
 		if (dma_buf_is_dynamic(attach->dmabuf)) {
 			dma_buf_unpin(attach);
@@ -818,9 +860,15 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
 
 /**
  * dma_buf_pin - Lock down the DMA-buf
- *
  * @attach:	[in]	attachment which should be pinned
  *
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
  * Returns:
  * 0 on success, negative error code on failure.
  */
@@ -829,6 +877,8 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
 	struct dma_buf *dmabuf = attach->dmabuf;
 	int ret = 0;
 
+	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
 	dma_resv_assert_held(dmabuf->resv);
 
 	if (dmabuf->ops->pin)
@@ -839,14 +889,19 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
 EXPORT_SYMBOL_GPL(dma_buf_pin);
 
 /**
- * dma_buf_unpin - Remove lock from DMA-buf
- *
+ * dma_buf_unpin - Unpin a DMA-buf
  * @attach:	[in]	attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
  */
 void dma_buf_unpin(struct dma_buf_attachment *attach)
 {
 	struct dma_buf *dmabuf = attach->dmabuf;
 
+	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
 	dma_resv_assert_held(dmabuf->resv);
 
 	if (dmabuf->ops->unpin)
@@ -907,7 +962,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 		}
 	}
 
-	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+	sg_table = __map_dma_buf(attach, direction);
 	if (!sg_table)
 		sg_table = ERR_PTR(-ENOMEM);
 
@@ -970,7 +1025,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 	if (dma_buf_is_dynamic(attach->dmabuf))
 		dma_resv_assert_held(attach->dmabuf->resv);
 
-	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+	__unmap_dma_buf(attach, sg_table, direction);
 
 	if (dma_buf_is_dynamic(attach->dmabuf) &&
 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
@@ -1014,15 +1069,15 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
  *   vmalloc space might be limited and result in vmap calls failing.
  *
  *   Interfaces::
+ *
  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
  *
  *   The vmap call can fail if there is no vmap support in the exporter, or if
- *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- *   that the dma-buf layer keeps a reference count for all vmap access and
- *   calls down into the exporter's vmap function only when no vmapping exists,
- *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- *   provided by taking the dma_buf->lock mutex.
+ *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ *   count for all vmap access and calls down into the exporter's vmap function
+ *   only when no vmapping exists, and only unmaps it once. Protection against
+ *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
  *
  * - For full compatibility on the importer side with existing userspace
  *   interfaces, which might already support mmap'ing buffers. This is needed in
@@ -1074,11 +1129,12 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
  *   shootdowns would increase the complexity quite a bit.
  *
  *   Interface::
+ *
  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
  *		       unsigned long);
  *
  *   If the importing subsystem simply provides a special-purpose mmap call to
- *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
  *   equally achieve that for a dma-buf object.
  */
 
@@ -1111,6 +1167,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
  * it guaranteed to be coherent with other DMA access.
  *
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
  * Can return negative error values, returns 0 on success.
  */
 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -1121,6 +1182,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	if (WARN_ON(!dmabuf))
 		return -EINVAL;
 
+	might_lock(&dmabuf->resv->lock.base);
+
 	if (dmabuf->ops->begin_cpu_access)
 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
@@ -1154,6 +1217,8 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 
 	WARN_ON(!dmabuf);
 
+	might_lock(&dmabuf->resv->lock.base);
+
 	if (dmabuf->ops->end_cpu_access)
 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
 
@@ -1212,7 +1277,10 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
  * This call may fail due to lack of virtual mapping address space.
  * These calls are optional in drivers. The intended use for them
  * is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
  *
  * Returns 0 on success, or a negative errno code otherwise.
  */
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index e593064341c8c2a973546eeab98acc8f16ef0f71..c8a12d7ad71ab6baef2298b60619b0d25edc2d03 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -471,8 +471,11 @@ static int thread_signal_callback(void *arg)
 			dma_fence_signal(f1);
 
 		smp_store_mb(cb.seen, false);
-		if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
-			miss++, cb.seen = true;
+		if (!f2 ||
+		    dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
+			miss++;
+			cb.seen = true;
+		}
 
 		if (!t->before)
 			dma_fence_signal(f1);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0973f408d75fe388bc34e3400c8684ce75fd7bfc..8bf103de1594c813a10448bae994d314e0374050 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -214,10 +214,6 @@ config DRM_GEM_SHMEM_HELPER
 	help
 	  Choose this if you need the GEM shmem helper functions
 
-config DRM_VM
-	bool
-	depends on DRM && MMU
-
 config DRM_SCHED
 	tristate
 	depends on DRM
@@ -391,7 +387,6 @@ source "drivers/gpu/drm/xlnx/Kconfig"
 menuconfig DRM_LEGACY
 	bool "Enable legacy drivers (DANGEROUS)"
 	depends on DRM && MMU
-	select DRM_VM
 	help
 	  Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
 	  APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fefaff4c832d12e97e36e0fd0b881f4115c44a7a..926adef289db96d7c2d8c7c77f97acf81e6f799b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
 
 drm-y       :=	drm_auth.o drm_cache.o \
 		drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
-		drm_memory.o drm_drv.o \
+		drm_drv.o \
 		drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
 		drm_encoder_slave.o \
@@ -20,9 +20,9 @@ drm-y       :=	drm_auth.o drm_cache.o \
 		drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
 		drm_managed.o drm_vblank_work.o
 
-drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
+drm-$(CONFIG_DRM_LEGACY) += drm_bufs.o drm_context.o drm_dma.o drm_legacy_misc.o drm_lock.o \
+		drm_memory.o drm_scatter.o drm_vm.o
 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
-drm-$(CONFIG_DRM_VM) += drm_vm.o
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 6bf6cfaea3f19f88b25b0668c3af722b6e14ca70..13ebb1f71e49753b3ee0579502f6c32498e04e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -56,7 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
 	amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
 	amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
-	amdgpu_fw_attestation.o
+	amdgpu_fw_attestation.o amdgpu_securedisplay.o
 
 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
 
@@ -71,7 +71,7 @@ amdgpu-y += \
 	vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
 	vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
 	arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
-	nbio_v7_2.o dimgrey_cavefish_reg_init.o
+	nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o
 
 # add DF block
 amdgpu-y += \
@@ -97,6 +97,7 @@ amdgpu-y += \
 	tonga_ih.o \
 	cz_ih.o \
 	vega10_ih.o \
+	vega20_ih.o \
 	navi10_ih.o
 
 # add PSP block
@@ -170,7 +171,8 @@ amdgpu-y += \
 # add SMUIO block
 amdgpu-y += \
 	smuio_v9_0.o \
-	smuio_v11_0.o
+	smuio_v11_0.o \
+	smuio_v11_0_6.o
 
 # add amdkfd interfaces
 amdgpu-y += amdgpu_amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5993dd0fdd8e7d559fa5e6f730984ad7f3af7a4e..86452f4f3c82e2d9e7e8291ccf52398882ba7fa5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -55,7 +55,6 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 
 #include <drm/amdgpu_drm.h>
@@ -89,6 +88,7 @@
 #include "amdgpu_gfx.h"
 #include "amdgpu_sdma.h"
 #include "amdgpu_nbio.h"
+#include "amdgpu_hdp.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_virt.h"
 #include "amdgpu_csa.h"
@@ -107,6 +107,7 @@
 #include "amdgpu_gfxhub.h"
 #include "amdgpu_df.h"
 #include "amdgpu_smuio.h"
+#include "amdgpu_hdp.h"
 
 #define MAX_GPU_INSTANCE		16
 
@@ -286,7 +287,7 @@ enum amdgpu_kiq_irq {
 
 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
+#define MAX_KIQ_REG_TRY 1000
 
 int amdgpu_device_ip_set_clockgating_state(void *dev,
 					   enum amd_ip_block_type block_type,
@@ -578,7 +579,8 @@ enum amd_reset_method {
 	AMD_RESET_METHOD_MODE0,
 	AMD_RESET_METHOD_MODE1,
 	AMD_RESET_METHOD_MODE2,
-	AMD_RESET_METHOD_BACO
+	AMD_RESET_METHOD_BACO,
+	AMD_RESET_METHOD_PCI,
 };
 
 /*
@@ -608,7 +610,6 @@ struct amdgpu_asic_funcs {
 	/* invalidate hdp read cache */
 	void (*invalidate_hdp)(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring);
-	void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
 	/* check if the asic needs a full reset of if soft reset will work */
 	bool (*need_full_reset)(struct amdgpu_device *adev);
 	/* initialize doorbell layout for specific asic*/
@@ -891,6 +892,7 @@ struct amdgpu_device {
 	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
 	struct work_struct		hotplug_work;
 	struct amdgpu_irq_src		crtc_irq;
+	struct amdgpu_irq_src		vline0_irq;
 	struct amdgpu_irq_src		vupdate_irq;
 	struct amdgpu_irq_src		pageflip_irq;
 	struct amdgpu_irq_src		hpd_irq;
@@ -921,6 +923,9 @@ struct amdgpu_device {
 	/* nbio */
 	struct amdgpu_nbio		nbio;
 
+	/* hdp */
+	struct amdgpu_hdp		hdp;
+
 	/* smuio */
 	struct amdgpu_smuio		smuio;
 
@@ -1202,8 +1207,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
-#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_asic_flush_hdp(adev, r) \
+	((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
+#define amdgpu_asic_invalidate_hdp(adev, r) \
+	((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r)))
 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1222,6 +1229,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 			      struct amdgpu_job* job);
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+int amdgpu_device_pci_reset(struct amdgpu_device *adev);
 bool amdgpu_device_need_post(struct amdgpu_device *adev);
 
 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index db96d69eb45e19e730a67a21af202bccc6c522cb..c5343a5eecbeb6ac11ca5dd31b766abec0080f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void)
 	amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
 	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
 
-#ifdef CONFIG_HSA_AMD
 	ret = kgd2kfd_init();
 	amdgpu_amdkfd_gpuvm_init_mem_limits();
-#else
-	ret = -ENOENT;
-#endif
 	kfd_initialized = !ret;
 
 	return ret;
@@ -696,86 +692,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
 
 	return adev->have_atomics_support;
 }
-
-#ifndef CONFIG_HSA_AMD
-bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
-{
-	return false;
-}
-
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
-{
-}
-
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
-{
-	return 0;
-}
-
-void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
-					struct amdgpu_vm *vm)
-{
-}
-
-struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
-{
-	return NULL;
-}
-
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
-{
-	return 0;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
-			      unsigned int asic_type, bool vf)
-{
-	return NULL;
-}
-
-bool kgd2kfd_device_init(struct kfd_dev *kfd,
-			 struct drm_device *ddev,
-			 const struct kgd2kfd_shared_resources *gpu_resources)
-{
-	return false;
-}
-
-void kgd2kfd_device_exit(struct kfd_dev *kfd)
-{
-}
-
-void kgd2kfd_exit(void)
-{
-}
-
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
-{
-}
-
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
-{
-	return 0;
-}
-
-int kgd2kfd_pre_reset(struct kfd_dev *kfd)
-{
-	return 0;
-}
-
-int kgd2kfd_post_reset(struct kfd_dev *kfd)
-{
-	return 0;
-}
-
-void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
-}
-
-void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
-{
-}
-
-void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
-{
-}
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ea391ca7f2f1cfb5ded5015b8694b7b6edbc9a5a..a81d9cacf9b8fc31f324972904d1fa7ab49b0b80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -94,11 +94,6 @@ enum kgd_engine_type {
 	KGD_ENGINE_MAX
 };
 
-struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
-						       struct mm_struct *mm);
-bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
-struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
 
 struct amdkfd_process_info {
 	/* List head of all VMs that belong to a KFD process */
@@ -132,8 +127,6 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
-
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
 				uint32_t vmid, uint64_t gpu_addr,
 				uint32_t *ib_cmd, uint32_t ib_len);
@@ -153,6 +146,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
 					int queue_bit);
 
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+								struct mm_struct *mm);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+#else
+static inline
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+	return false;
+}
+
+static inline
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+	return NULL;
+}
+
+static inline
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+	return 0;
+}
+
+static inline
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+{
+	return 0;
+}
+#endif
 /* Shared API */
 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 				void **mem_obj, uint64_t *gpu_addr,
@@ -215,8 +240,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
 					struct file *filp, u32 pasid,
 					void **vm, void **process_info,
 					struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
-				struct amdgpu_vm *vm);
 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
@@ -236,23 +259,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
 		struct kgd_mem *mem, void **kptr, uint64_t *size);
 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
 					    struct dma_fence **ef);
-
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
 					      struct kfd_vm_fault_info *info);
-
 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
 				      struct dma_buf *dmabuf,
 				      uint64_t va, void *vm,
 				      struct kgd_mem **mem, uint64_t *size,
 				      uint64_t *mmap_offset);
-
-void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
-
 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
 				struct tile_config *config);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+				struct amdgpu_vm *vm);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+#else
+static inline
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+{
+}
 
+static inline
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+					struct amdgpu_vm *vm)
+{
+}
+
+static inline
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
+{
+}
+#endif
 /* KGD2KFD callbacks */
+int kgd2kfd_quiesce_mm(struct mm_struct *mm);
+int kgd2kfd_resume_mm(struct mm_struct *mm);
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+						struct dma_fence *fence);
+#if IS_ENABLED(CONFIG_HSA_AMD)
 int kgd2kfd_init(void);
 void kgd2kfd_exit(void);
 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
@@ -266,11 +309,68 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-int kgd2kfd_quiesce_mm(struct mm_struct *mm);
-int kgd2kfd_resume_mm(struct mm_struct *mm);
-int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
-					       struct dma_fence *fence);
 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
+#else
+static inline int kgd2kfd_init(void)
+{
+	return -ENOENT;
+}
 
+static inline void kgd2kfd_exit(void)
+{
+}
+
+static inline
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
+					unsigned int asic_type, bool vf)
+{
+	return NULL;
+}
+
+static inline
+bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev,
+				const struct kgd2kfd_shared_resources *gpu_resources)
+{
+	return false;
+}
+
+static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+}
+
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+{
+}
+
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+{
+	return 0;
+}
+
+static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+	return 0;
+}
+
+static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+	return 0;
+}
+
+static inline
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+}
+
+static inline
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
+
+static inline
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
+{
+}
+#endif
 #endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4763bab7a4d0dfe309752f1a4e3dafc9922efc58..62aa1a6f64edd65b563a2663282c10b39e72e2b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -23,7 +23,6 @@
 #include "amdgpu_amdkfd.h"
 #include "gc/gc_10_1_0_offset.h"
 #include "gc/gc_10_1_0_sh_mask.h"
-#include "navi10_enum.h"
 #include "athub/athub_2_0_0_offset.h"
 #include "athub/athub_2_0_0_sh_mask.h"
 #include "oss/osssys_5_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 50016bf9c4279b5d06999ac18c16eb5b0ad66461..fad3b91f74f54972a4aee067bc921c5ec4b5befb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -24,7 +24,6 @@
 #include "amdgpu_amdkfd.h"
 #include "gc/gc_10_3_0_offset.h"
 #include "gc/gc_10_3_0_sh_mask.h"
-#include "navi10_enum.h"
 #include "oss/osssys_5_0_0_offset.h"
 #include "oss/osssys_5_0_0_sh_mask.h"
 #include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d1ed4f8df2b76d7dd41286b52426dfbde175f33f..ac0a432a9bf79fc6878e616c1f93f7c05d542705 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -454,7 +454,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
 	struct amdgpu_bo *bo = mem->bo;
 	uint64_t va = mem->va;
 	struct list_head *list_bo_va = &mem->bo_va_list;
-	unsigned long bo_size = bo->tbo.mem.size;
+	unsigned long bo_size = bo->tbo.base.size;
 
 	if (!va) {
 		pr_err("Invalid VA when adding BO to VM\n");
@@ -1277,7 +1277,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 		struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
 {
 	struct amdkfd_process_info *process_info = mem->process_info;
-	unsigned long bo_size = mem->bo->tbo.mem.size;
+	unsigned long bo_size = mem->bo->tbo.base.size;
 	struct kfd_bo_va_list *entry, *tmp;
 	struct bo_vm_reservation_context ctx;
 	struct ttm_validate_buffer *bo_list_entry;
@@ -1398,7 +1398,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 	mutex_lock(&mem->lock);
 
 	domain = mem->domain;
-	bo_size = bo->tbo.mem.size;
+	bo_size = bo->tbo.base.size;
 
 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
 			mem->va,
@@ -1502,7 +1502,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct amdkfd_process_info *process_info =
 		((struct amdgpu_vm *)vm)->process_info;
-	unsigned long bo_size = mem->bo->tbo.mem.size;
+	unsigned long bo_size = mem->bo->tbo.base.size;
 	struct kfd_bo_va_list *entry;
 	struct bo_vm_reservation_context ctx;
 	int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 055f600eeed87f015893e7ac2976b5747f6eb0f4..cfb1a9a044772c828e80b8b80916330033ed61a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -155,7 +155,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
 	u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
 	int len;
 
-	if (!adev->asic_funcs->read_bios_from_rom)
+	if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom)
 		return false;
 
 	/* validate VBIOS signature */
@@ -348,7 +348,8 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
 	if (adev->flags & AMD_IS_APU)
 		return igp_read_bios_from_vram(adev);
 	else
-		return amdgpu_asic_read_disabled_bios(adev);
+		return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+			false : amdgpu_asic_read_disabled_bios(adev);
 }
 
 #ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 594a0108e90fa4e5278220eda33bdcd83311b1ef..3e240b952e7912a36365bdf7d2191e981b08598d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -98,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
 	return 0;
 
 error_free:
-	if (info)
-		kvfree(info);
+	kvfree(info);
 
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a6667a2ca0db3997f9a37de68d3a6fac19048824..0a25fecf488aba6763798bb50292ae334b994080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -35,6 +35,7 @@
 #include "amdgpu_dm_debugfs.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_rap.h"
+#include "amdgpu_securedisplay.h"
 #include "amdgpu_fw_attestation.h"
 
 /**
@@ -1427,7 +1428,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
 	struct dma_fence *fence;
 
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+	list_for_each_entry(s_job, &sched->pending_list, list) {
 		fence = sched->ops->run_job(s_job);
 		dma_fence_put(fence);
 	}
@@ -1459,10 +1460,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
 
 no_preempt:
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 		if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
 			/* remove job from ring_mirror_list */
-			list_del_init(&s_job->node);
+			list_del_init(&s_job->list);
 			sched->ops->free_job(s_job);
 			continue;
 		}
@@ -1669,6 +1670,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
 
 	amdgpu_rap_debugfs_init(adev);
 
+	amdgpu_securedisplay_debugfs_init(adev);
+
 	amdgpu_fw_attestation_debugfs_init(adev);
 
 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cab1ebaf6d6299a365c272b90055cd8e847104fb..7052dc35d2780154c8be806be4d9e8ab113eb0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -929,6 +929,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 }
 
+/**
+ * amdgpu_device_pci_reset - reset the GPU using generic PCI means
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
+ */
+int amdgpu_device_pci_reset(struct amdgpu_device *adev)
+{
+	return pci_reset_function(adev->pdev);
+}
+
 /*
  * GPU doorbell aperture helpers function.
  */
@@ -1105,8 +1117,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
  */
 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 {
-	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
-	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
+	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
 	struct pci_bus *root;
 	struct resource *res;
 	unsigned i;
@@ -1137,6 +1148,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 	if (!res)
 		return 0;
 
+	/* Limit the BAR size to what is available */
+	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
+			rbar_size);
+
 	/* Disable memory decoding while we change the BAR addresses and size */
 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
 	pci_write_config_word(adev->pdev, PCI_COMMAND,
@@ -1422,24 +1437,22 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
 		/* don't suspend or resume card normally */
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-		pci_set_power_state(dev->pdev, PCI_D0);
-		amdgpu_device_load_pci_state(dev->pdev);
-		r = pci_enable_device(dev->pdev);
+		pci_set_power_state(pdev, PCI_D0);
+		amdgpu_device_load_pci_state(pdev);
+		r = pci_enable_device(pdev);
 		if (r)
 			DRM_WARN("pci_enable_device failed (%d)\n", r);
 		amdgpu_device_resume(dev, true);
 
 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
-		drm_kms_helper_poll_enable(dev);
 	} else {
 		pr_info("switched off\n");
-		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		amdgpu_device_suspend(dev, true);
-		amdgpu_device_cache_pci_state(dev->pdev);
+		amdgpu_device_cache_pci_state(pdev);
 		/* Shut down the device */
-		pci_disable_device(dev->pdev);
-		pci_set_power_state(dev->pdev, PCI_D3cold);
+		pci_disable_device(pdev);
+		pci_set_power_state(pdev, PCI_D3cold);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1702,8 +1715,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
 	adev->enable_virtual_display = false;
 
 	if (amdgpu_virtual_display) {
-		struct drm_device *ddev = adev_to_drm(adev);
-		const char *pci_address_name = pci_name(ddev->pdev);
+		const char *pci_address_name = pci_name(adev->pdev);
 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
 
 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
@@ -3116,7 +3128,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
 	 */
 	adev->gfx_timeout = msecs_to_jiffies(10000);
 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
-	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+	if (amdgpu_sriov_vf(adev))
+		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
+					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
+	else if (amdgpu_passthrough(adev))
 		adev->compute_timeout =  msecs_to_jiffies(60000);
 	else
 		adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -3396,7 +3411,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		}
 	}
 
-	pci_enable_pcie_error_reporting(adev->ddev.pdev);
+	pci_enable_pcie_error_reporting(adev->pdev);
 
 	/* Post card if necessary */
 	if (amdgpu_device_need_post(adev)) {
@@ -3719,7 +3734,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
 	r = amdgpu_device_ip_suspend_phase1(adev);
 
-	amdgpu_amdkfd_suspend(adev, !fbcon);
+	amdgpu_amdkfd_suspend(adev, adev->in_runpm);
 
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
@@ -3803,7 +3818,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 			}
 		}
 	}
-	r = amdgpu_amdkfd_resume(adev, !fbcon);
+	r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
 	if (r)
 		return r;
 
@@ -4154,8 +4169,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
 			continue;
 
 		spin_lock(&ring->sched.job_list_lock);
-		job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
-				struct drm_sched_job, node);
+		job = list_first_entry_or_null(&ring->sched.pending_list,
+					       struct drm_sched_job, list);
 		spin_unlock(&ring->sched.job_list_lock);
 		if (job)
 			return true;
@@ -4205,6 +4220,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		case CHIP_NAVI14:
 		case CHIP_NAVI12:
 		case CHIP_SIENNA_CICHLID:
+		case CHIP_NAVY_FLOUNDER:
+		case CHIP_DIMGREY_CAVEFISH:
 			break;
 		default:
 			goto disabled;
@@ -4454,6 +4471,46 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
 	up_write(&adev->reset_sem);
 }
 
+/*
+ * to lockup a list of amdgpu devices in a hive safely, if not a hive
+ * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
+ *
+ * unlock won't require roll back.
+ */
+static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
+{
+	struct amdgpu_device *tmp_adev = NULL;
+
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		if (!hive) {
+			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
+			return -ENODEV;
+		}
+		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+			if (!amdgpu_device_lock_adev(tmp_adev, hive))
+				goto roll_back;
+		}
+	} else if (!amdgpu_device_lock_adev(adev, hive))
+		return -EAGAIN;
+
+	return 0;
+roll_back:
+	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
+		/*
+		 * if the lockup iteration break in the middle of a hive,
+		 * it may means there may has a race issue,
+		 * or a hive device locked up independently.
+		 * we may be in trouble and may not, so will try to roll back
+		 * the lock and give out a warnning.
+		 */
+		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
+		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+			amdgpu_device_unlock_adev(tmp_adev);
+		}
+	}
+	return -EAGAIN;
+}
+
 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
 {
 	struct pci_dev *p = NULL;
@@ -4567,11 +4624,29 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
 				job ? job->base.id : -1, hive->hive_id);
 			amdgpu_put_xgmi_hive(hive);
+			if (job)
+				drm_sched_increase_karma(&job->base);
 			return 0;
 		}
 		mutex_lock(&hive->hive_lock);
 	}
 
+	/*
+	 * lock the device before we try to operate the linked list
+	 * if didn't get the device lock, don't touch the linked list since
+	 * others may iterating it.
+	 */
+	r = amdgpu_device_lock_hive_adev(adev, hive);
+	if (r) {
+		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
+					job ? job->base.id : -1);
+
+		/* even we skipped this reset, still need to set the job to guilty */
+		if (job)
+			drm_sched_increase_karma(&job->base);
+		goto skip_recovery;
+	}
+
 	/*
 	 * Build list of devices to reset.
 	 * In case we are in XGMI hive mode, resort the device list
@@ -4579,8 +4654,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 	 */
 	INIT_LIST_HEAD(&device_list);
 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
-		if (!hive)
-			return -ENODEV;
 		if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
 			list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
 		device_list_handle = &hive->device_list;
@@ -4591,13 +4664,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 	/* block all schedulers and reset given job's ring */
 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-		if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
-			dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
-				  job ? job->base.id : -1);
-			r = 0;
-			goto skip_recovery;
-		}
-
 		/*
 		 * Try to put the audio codec into suspend state
 		 * before gpu reset started.
@@ -4735,7 +4801,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		amdgpu_put_xgmi_hive(hive);
 	}
 
-	if (r)
+	if (r && r != -EAGAIN)
 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
 	return r;
 }
@@ -4785,7 +4851,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
 		} else {
-			if (speed_cap == PCIE_SPEED_16_0GT)
+			if (speed_cap == PCIE_SPEED_32_0GT)
+				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
+			else if (speed_cap == PCIE_SPEED_16_0GT)
 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
@@ -4805,7 +4877,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
 		} else {
-			if (platform_speed_cap == PCIE_SPEED_16_0GT)
+			if (platform_speed_cap == PCIE_SPEED_32_0GT)
+				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
+			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
@@ -4949,8 +5027,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
 	case pci_channel_io_normal:
 		return PCI_ERS_RESULT_CAN_RECOVER;
 	/* Fatal error, prepare for slot reset */
-	case pci_channel_io_frozen:		
-		/*		
+	case pci_channel_io_frozen:
+		/*
 		 * Cancel and wait for all TDRs in progress if failing to
 		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
 		 *
@@ -5041,7 +5119,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
 		goto out;
 	}
 
-	adev->in_pci_err_recovery = true;	
+	adev->in_pci_err_recovery = true;
 	r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
 	adev->in_pci_err_recovery = false;
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e42175e1acf18b758c7ab64077c6f2496f0e4550..47e0b48dc26fd72f0a5e4869a3b0ba677e6db2da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -40,6 +40,7 @@
 #include <linux/dma-buf.h>
 #include <linux/dma-fence-array.h>
 #include <linux/pci-p2pdma.h>
+#include <linux/pm_runtime.h>
 
 /**
  * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
@@ -151,9 +152,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 	if (attach->dev->driver == adev->dev->driver)
 		return 0;
 
+	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+	if (r < 0)
+		goto out;
+
 	r = amdgpu_bo_reserve(bo, false);
 	if (unlikely(r != 0))
-		return r;
+		goto out;
 
 	/*
 	 * We only create shared fences for internal use, but importers
@@ -165,11 +170,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 	 */
 	r = __dma_resv_make_exclusive(bo->tbo.base.resv);
 	if (r)
-		return r;
+		goto out;
 
 	bo->prime_shared_count++;
 	amdgpu_bo_unreserve(bo);
 	return 0;
+
+out:
+	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+	return r;
 }
 
 /**
@@ -189,6 +198,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
 
 	if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
 		bo->prime_shared_count--;
+
+	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 }
 
 /**
@@ -269,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 	case TTM_PL_TT:
 		sgt = drm_prime_pages_to_sg(obj->dev,
 					    bo->tbo.ttm->pages,
-					    bo->tbo.num_pages);
+					    bo->tbo.ttm->num_pages);
 		if (IS_ERR(sgt))
 			return sgt;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7169fb5e3d9c47f0c8fd575b11544c31cd83cdb9..03c529630c21f3601ce022f40b8bb9575f65230f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -132,8 +132,12 @@ uint amdgpu_pg_mask = 0xffffffff;
 uint amdgpu_sdma_phase_quantum = 32;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+
+/*
+ * OverDrive(bit 14) disabled by default
+ * GFX DCS(bit 19) disabled by default
+ */
+uint amdgpu_pp_feature_mask = 0xfff7bfff;
 uint amdgpu_force_long_training;
 int amdgpu_job_hang_limit;
 int amdgpu_lbpw = -1;
@@ -789,9 +793,9 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
 
 /**
  * DOC: reset_method (int)
- * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
+ * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
  */
-MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
+MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)");
 module_param_named(reset_method, amdgpu_reset_method, int, 0444);
 
 /**
@@ -1094,6 +1098,7 @@ static const struct pci_device_id pciidlist[] = {
 
 	/* Sienna_Cichlid */
 	{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+	{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 	{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 	{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 	{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1206,7 +1211,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 	if (ret)
 		return ret;
 
-	ddev->pdev = pdev;
 	pci_set_drvdata(pdev, ddev);
 
 	ret = amdgpu_driver_load_kms(adev, ent->driver_data);
@@ -1344,11 +1348,12 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
 	adev->in_runpm = true;
 	if (amdgpu_device_supports_atpx(drm_dev))
 		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-	drm_kms_helper_poll_disable(drm_dev);
 
 	ret = amdgpu_device_suspend(drm_dev, false);
-	if (ret)
+	if (ret) {
+		adev->in_runpm = false;
 		return ret;
+	}
 
 	if (amdgpu_device_supports_atpx(drm_dev)) {
 		/* Only need to handle PCI state in the driver for ATPX
@@ -1401,7 +1406,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
 		amdgpu_device_baco_exit(drm_dev);
 	}
 	ret = amdgpu_device_resume(drm_dev, false);
-	drm_kms_helper_poll_enable(drm_dev);
 	if (amdgpu_device_supports_atpx(drm_dev))
 		drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	adev->in_runpm = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 0bf7d36c6686d4543609b1af3bb3e3480c33239f..51cd49c6f38fd06f58369e3265cafa5367573a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 	DRM_INFO("fb depth is %d\n", fb->format->depth);
 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
-	vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
+	vga_switcheroo_client_fb_set(adev->pdev, info);
 	return 0;
 
 out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 7c6e02e35573c4c0c907178052d6e406654b5d7e..8d1ad294cb026db75c56e7929b486b699cb229bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -47,10 +47,9 @@ typedef struct FW_ATT_RECORD
 	uint16_t AttFwIdV2;              /* V2 FW ID field */
 	uint32_t AttFWVersion;           /* FW Version */
 	uint16_t AttFWActiveFunctionID;  /* The VF ID (only in VF Attestation Table) */
-	uint16_t AttSource;              /* FW source indicator */
-	uint16_t RecordValid;            /* Indicates whether the record is a valid entry */
-	uint8_t  AttFwTaId;              /* Ta ID (only in TA Attestation Table) */
-	uint8_t  Reserved;
+	uint8_t  AttSource;              /* FW source indicator */
+	uint8_t  RecordValid;            /* Indicates whether the record is a valid entry */
+	uint32_t AttFwTaId;              /* Ta ID (only in TA Attestation Table) */
 } FW_ATT_RECORD;
 
 static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 174a73eb23f08117528f8a458a68e57d50f89a72..b443907afceadfebab03bec153292bff145d5a0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	int r = 0;
 
 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
-		dev_dbg(&dev->pdev->dev,
+		dev_dbg(dev->dev,
 			"va_address 0x%LX is in reserved area 0x%LX\n",
 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
 		return -EINVAL;
@@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 
 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
 	    args->va_address < AMDGPU_GMC_HOLE_END) {
-		dev_dbg(&dev->pdev->dev,
+		dev_dbg(dev->dev,
 			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
 			args->va_address, AMDGPU_GMC_HOLE_START,
 			AMDGPU_GMC_HOLE_END);
@@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 	vm_size -= AMDGPU_VA_RESERVED_SIZE;
 	if (args->va_address + args->map_size > vm_size) {
-		dev_dbg(&dev->pdev->dev,
+		dev_dbg(dev->dev,
 			"va_address 0x%llx is in top reserved area 0x%llx\n",
 			args->va_address + args->map_size, vm_size);
 		return -EINVAL;
 	}
 
 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
-		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
 			args->flags);
 		return -EINVAL;
 	}
@@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	case AMDGPU_VA_OP_REPLACE:
 		break;
 	default:
-		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
+		dev_dbg(dev->dev, "unsupported operation %d\n",
 			args->operation);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index cd2c676a2797cc084950f90a0727064c6e076181..8e0a6c62322ec9d62d569d717982580abd58901f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -193,15 +193,16 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
 }
 
 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
-					       int pipe, int queue)
+					       struct amdgpu_ring *ring)
 {
-	bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
-	int cond;
-	/* Policy: alternate between normal and high priority */
-	cond = multipipe_policy ? pipe : queue;
-
-	return ((cond % 2) != 0);
+	/* Policy: use 1st queue as high priority compute queue if we
+	 * have more than one compute queue.
+	 */
+	if (adev->gfx.num_compute_rings > 1 &&
+	    ring == &adev->gfx.compute_ring[0])
+		return true;
 
+	return false;
 }
 
 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6b5a8f4642cc9abe4d717016bca2962b357fcd46..72dbcd2bc6a630e5ff69d6a335e99fad1f95e5bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -380,7 +380,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
 				     int pipe, int queue);
 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
-					       int pipe, int queue);
+					       struct amdgpu_ring *ring);
 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
 			       int pipe, int queue);
 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6e679db5e46f0e8ceb1b5b26d6d88c3fb48f673d..fe1a39ffda724f5490791da76dff6c90e8646d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -120,7 +120,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
-	if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
+	if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
 		return AMDGPU_BO_INVALID_OFFSET;
 
 	if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
new file mode 100644
index 0000000000000000000000000000000000000000..43caf9f8cc110a8a561ba6e5a8cbdaf287be7778
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_HDP_H__
+#define __AMDGPU_HDP_H__
+
+struct amdgpu_hdp_funcs {
+	void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+	void (*invalidate_hdp)(struct amdgpu_device *adev,
+			       struct amdgpu_ring *ring);
+	void (*reset_ras_error_count)(struct amdgpu_device *adev);
+	void (*update_clock_gating)(struct amdgpu_device *adev, bool enable);
+	void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+	void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_hdp {
+	const struct amdgpu_hdp_funcs		*funcs;
+};
+
+#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 47cad23a6b9e2b6b81223a55df741e17da7cfbca..bca4dddd5a15b46dabf7946236892d143781d413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
 	i2c->rec = *rec;
 	i2c->adapter.owner = THIS_MODULE;
 	i2c->adapter.class = I2C_CLASS_DDC;
-	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->adapter.dev.parent = dev->dev;
 	i2c->dev = dev;
 	i2c_set_adapdata(&i2c->adapter, i2c);
 	mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 024d0a563a652be59bc054b9572e55af2e7c76c1..7645223ea0ef2173a96f8750aea0a57aaac90bd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -195,6 +195,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
 		ring->funcs->emit_mem_sync(ring);
 
+	if (ring->funcs->emit_wave_limit &&
+	    ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+		ring->funcs->emit_wave_limit(ring, true);
+
 	if (ring->funcs->insert_start)
 		ring->funcs->insert_start(ring);
 
@@ -295,6 +299,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	ring->current_ctx = fence_ctx;
 	if (vm && ring->funcs->emit_switch_buffer)
 		amdgpu_ring_emit_switch_buffer(ring);
+
+	if (ring->funcs->emit_wave_limit &&
+	    ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+		ring->funcs->emit_wave_limit(ring, false);
+
 	amdgpu_ring_commit(ring);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index dcd9b4a8e20bef0932376e0e9acaa4cbba3bbdab..dc852af4f3b76f6a33bfa0784addd7f80f7c443d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -205,3 +205,48 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
 	return IRQ_HANDLED;
 }
 
+/**
+ * amdgpu_ih_decode_iv_helper - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ * @entry: IV entry
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position for for Vega10
+ * and later GPUs.
+ */
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+				struct amdgpu_ih_ring *ih,
+				struct amdgpu_iv_entry *entry)
+{
+	/* wptr/rptr are in bytes! */
+	u32 ring_index = ih->rptr >> 2;
+	uint32_t dw[8];
+
+	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
+	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
+	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
+	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
+	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
+
+	entry->client_id = dw[0] & 0xff;
+	entry->src_id = (dw[0] >> 8) & 0xff;
+	entry->ring_id = (dw[0] >> 16) & 0xff;
+	entry->vmid = (dw[0] >> 24) & 0xf;
+	entry->vmid_src = (dw[0] >> 31);
+	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
+	entry->timestamp_src = dw[2] >> 31;
+	entry->pasid = dw[3] & 0xffff;
+	entry->pasid_src = dw[3] >> 31;
+	entry->src_data[0] = dw[4];
+	entry->src_data[1] = dw[5];
+	entry->src_data[2] = dw[6];
+	entry->src_data[3] = dw[7];
+
+	/* wptr/rptr are in bytes! */
+	ih->rptr += 32;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3c9cfe7eecff4f2dd22b53b51886ddf9b1b2a0bc..6ed4a85fc7c31e7916472257dc243a6d2c09afc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -30,6 +30,18 @@
 struct amdgpu_device;
 struct amdgpu_iv_entry;
 
+struct amdgpu_ih_regs {
+	uint32_t ih_rb_base;
+	uint32_t ih_rb_base_hi;
+	uint32_t ih_rb_cntl;
+	uint32_t ih_rb_wptr;
+	uint32_t ih_rb_rptr;
+	uint32_t ih_doorbell_rptr;
+	uint32_t ih_rb_wptr_addr_lo;
+	uint32_t ih_rb_wptr_addr_hi;
+	uint32_t psp_reg_id;
+};
+
 /*
  * R6xx+ IH ring
  */
@@ -53,6 +65,7 @@ struct amdgpu_ih_ring {
 	bool                    enabled;
 	unsigned		rptr;
 	atomic_t		lock;
+	struct amdgpu_ih_regs	ih_regs;
 };
 
 /* provided by the ih block */
@@ -75,5 +88,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
 void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
 			  unsigned int num_dw);
 int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+				struct amdgpu_ih_ring *ih,
+				struct amdgpu_iv_entry *entry);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index bea57e8e793f704e7da2cdd22fcf0c0be94cb85d..afbbec82a289c8300cd823b0e41ed1dc221fb166 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
 
-	} else if (adev->irq.virq[src_id]) {
+	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+		   adev->irq.virq[src_id]) {
 		generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
 
 	} else if (!adev->irq.client[client_id].sources) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dcfe8a3b03ffb9efdbeeba5a560bbc37cd777320..ff48101bab551373f7ef54682af1cac4ad728491 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 	}
 
 	/* Signal all jobs already scheduled to HW */
-	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+	list_for_each_entry(s_job, &sched->pending_list, list) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 
 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b16b32797624af9a38bb57094c19e41845a18765..3c37cf1ae8b7f13891e442b9a1fb8d4e6f2d1004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 	    (amdgpu_is_atpx_hybrid() ||
 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
 	    ((flags & AMD_IS_APU) == 0) &&
-	    !pci_is_thunderbolt_attached(dev->pdev))
+	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
 		flags |= AMD_IS_PX;
 
 	parent = pci_upstream_bridge(adev->pdev);
@@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 	 */
 	r = amdgpu_device_init(adev, flags);
 	if (r) {
-		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		dev_err(dev->dev, "Fatal error during GPU init\n");
 		goto out;
 	}
 
@@ -199,7 +199,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 
 	acpi_status = amdgpu_acpi_init(adev);
 	if (acpi_status)
-		dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
+		dev_dbg(dev->dev, "Error during ACPI methods call\n");
 
 	if (adev->runpm) {
 		/* only need to skip on ATPX */
@@ -735,10 +735,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		if (!dev_info)
 			return -ENOMEM;
 
-		dev_info->device_id = dev->pdev->device;
+		dev_info->device_id = adev->pdev->device;
 		dev_info->chip_rev = adev->rev_id;
 		dev_info->external_rev = adev->external_rev_id;
-		dev_info->pci_rev = dev->pdev->revision;
+		dev_info->pci_rev = adev->pdev->revision;
 		dev_info->family = adev->family;
 		dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
 		dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index e62cc0e1a5ad072f5079db6611af8af3b2128d93..7c11bce4514bd0c7f47d255f151603848a7f2832 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -57,7 +57,6 @@ struct amdgpu_nbio_funcs {
 	u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev);
 	u32 (*get_rev_id)(struct amdgpu_device *adev);
 	void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
-	void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
 	u32 (*get_memsize)(struct amdgpu_device *adev);
 	void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
 			bool use_doorbell, int doorbell_index, int doorbell_size);
@@ -89,6 +88,7 @@ struct amdgpu_nbio_funcs {
 	int (*ras_late_init)(struct amdgpu_device *adev);
 	void (*enable_aspm)(struct amdgpu_device *adev,
 			    bool enable);
+	void (*program_aspm)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_nbio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b4c8e5d5c763cd5416b712e60c3a7ec2a10ed44a..4b29b820544281cc3f1ab3c717866e6c0ea66b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 	if (r < 0)
 		return r;
 
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
 	if (r)
 		return r;
 
@@ -911,10 +911,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
 	if (bo->tbo.pin_count) {
 		uint32_t mem_type = bo->tbo.mem.mem_type;
+		uint32_t mem_flags = bo->tbo.mem.placement;
 
 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 			return -EINVAL;
 
+		if ((mem_type == TTM_PL_VRAM) &&
+		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
+		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
+			return -EINVAL;
+
 		ttm_bo_pin(&bo->tbo);
 
 		if (max_offset != 0) {
@@ -930,7 +936,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 	if (bo->tbo.base.import_attach)
 		dma_buf_pin(bo->tbo.base.import_attach);
 
-	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	/* force to pin into visible video ram */
 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -983,6 +988,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
  */
 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
 {
+	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 79120ec413967132dc7addb8642f4e21ee791ff4..9ac37569823ff4c7bc9ef22093c4189b6d969bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
 
 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 {
-	return bo->tbo.num_pages << PAGE_SHIFT;
+	return bo->tbo.base.size;
 }
 
 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
 {
-	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+	return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
 }
 
 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 347fec66942485414a400a4ac1ae2cb795278943..839917eb7bc3c56995f2a5d9a2b80841f461dbb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -36,6 +36,7 @@
 #include "psp_v12_0.h"
 
 #include "amdgpu_ras.h"
+#include "amdgpu_securedisplay.h"
 
 static int psp_sysfs_init(struct amdgpu_device *adev);
 static void psp_sysfs_fini(struct amdgpu_device *adev);
@@ -249,7 +250,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
 {
 	int ret;
 	int index;
-	int timeout = 2000;
+	int timeout = 20000;
 	bool ras_intr = false;
 	bool skip_unsupport = false;
 
@@ -282,7 +283,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
 		ras_intr = amdgpu_ras_intr_triggered();
 		if (ras_intr)
 			break;
-		msleep(1);
+		usleep_range(10, 100);
 		amdgpu_asic_invalidate_hdp(psp->adev, NULL);
 	}
 
@@ -1652,6 +1653,175 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 }
 // RAP end
 
+/* securedisplay start */
+static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
+{
+	int ret;
+
+	/*
+	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
+	 * physical) for sa ta <-> Driver
+	 */
+	ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
+				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+				      &psp->securedisplay_context.securedisplay_shared_bo,
+				      &psp->securedisplay_context.securedisplay_shared_mc_addr,
+				      &psp->securedisplay_context.securedisplay_shared_buf);
+
+	return ret;
+}
+
+static int psp_securedisplay_load(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+	memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+
+	psp_prep_ta_load_cmd_buf(cmd,
+				 psp->fw_pri_mc_addr,
+				 psp->ta_securedisplay_ucode_size,
+				 psp->securedisplay_context.securedisplay_shared_mc_addr,
+				 PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+	if (ret)
+		goto failed;
+
+	psp->securedisplay_context.securedisplay_initialized = true;
+	psp->securedisplay_context.session_id = cmd->resp.session_id;
+	mutex_init(&psp->securedisplay_context.mutex);
+
+failed:
+	kfree(cmd);
+	return ret;
+}
+
+static int psp_securedisplay_unload(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static int psp_securedisplay_initialize(struct psp_context *psp)
+{
+	int ret;
+	struct securedisplay_cmd *securedisplay_cmd;
+
+	/*
+	 * TODO: bypass the initialize in sriov for now
+	 */
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	if (!psp->adev->psp.ta_securedisplay_ucode_size ||
+	    !psp->adev->psp.ta_securedisplay_start_addr) {
+		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+		return 0;
+	}
+
+	if (!psp->securedisplay_context.securedisplay_initialized) {
+		ret = psp_securedisplay_init_shared_buf(psp);
+		if (ret)
+			return ret;
+	}
+
+	ret = psp_securedisplay_load(psp);
+	if (ret)
+		return ret;
+
+	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+
+	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+	if (ret) {
+		psp_securedisplay_unload(psp);
+
+		amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+			      &psp->securedisplay_context.securedisplay_shared_mc_addr,
+			      &psp->securedisplay_context.securedisplay_shared_buf);
+
+		psp->securedisplay_context.securedisplay_initialized = false;
+
+		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
+		return -EINVAL;
+	}
+
+	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
+			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+	}
+
+	return 0;
+}
+
+static int psp_securedisplay_terminate(struct psp_context *psp)
+{
+	int ret;
+
+	/*
+	 * TODO:bypass the terminate in sriov for now
+	 */
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	if (!psp->securedisplay_context.securedisplay_initialized)
+		return 0;
+
+	ret = psp_securedisplay_unload(psp);
+	if (ret)
+		return ret;
+
+	psp->securedisplay_context.securedisplay_initialized = false;
+
+	/* free securedisplay shared memory */
+	amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+			      &psp->securedisplay_context.securedisplay_shared_mc_addr,
+			      &psp->securedisplay_context.securedisplay_shared_buf);
+
+	return ret;
+}
+
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+	int ret;
+
+	if (!psp->securedisplay_context.securedisplay_initialized)
+		return -EINVAL;
+
+	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
+	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+		return -EINVAL;
+
+	mutex_lock(&psp->securedisplay_context.mutex);
+
+	ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+
+	mutex_unlock(&psp->securedisplay_context.mutex);
+
+	return ret;
+}
+/* SECUREDISPLAY end */
+
 static int psp_hw_start(struct psp_context *psp)
 {
 	struct amdgpu_device *adev = psp->adev;
@@ -2126,6 +2296,11 @@ static int psp_load_fw(struct amdgpu_device *adev)
 		if (ret)
 			dev_err(psp->adev->dev,
 				"RAP: Failed to initialize RAP\n");
+
+		ret = psp_securedisplay_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
 	}
 
 	return 0;
@@ -2176,6 +2351,7 @@ static int psp_hw_fini(void *handle)
 
 	if (psp->adev->psp.ta_fw) {
 		psp_ras_terminate(psp);
+		psp_securedisplay_terminate(psp);
 		psp_rap_terminate(psp);
 		psp_dtm_terminate(psp);
 		psp_hdcp_terminate(psp);
@@ -2240,6 +2416,11 @@ static int psp_suspend(void *handle)
 			DRM_ERROR("Failed to terminate rap ta\n");
 			return ret;
 		}
+		ret = psp_securedisplay_terminate(psp);
+		if (ret) {
+			DRM_ERROR("Failed to terminate securedisplay ta\n");
+			return ret;
+		}
 	}
 
 	ret = psp_asd_unload(psp);
@@ -2323,6 +2504,11 @@ static int psp_resume(void *handle)
 		if (ret)
 			dev_err(psp->adev->dev,
 				"RAP: Failed to initialize RAP\n");
+
+		ret = psp_securedisplay_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
 	}
 
 	mutex_unlock(&adev->firmware.mutex);
@@ -2629,6 +2815,11 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
 		psp->ta_rap_ucode_size     = le32_to_cpu(desc->size_bytes);
 		psp->ta_rap_start_addr     = ucode_start_addr;
 		break;
+	case TA_FW_TYPE_PSP_SECUREDISPLAY:
+		psp->ta_securedisplay_ucode_version  = le32_to_cpu(desc->fw_version);
+		psp->ta_securedisplay_ucode_size     = le32_to_cpu(desc->size_bytes);
+		psp->ta_securedisplay_start_addr     = ucode_start_addr;
+		break;
 	default:
 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index da250bc1ac578b9db61abe3caa8f0c6d21c60fa9..cb50ba445f8c86e46c720259f5457ffc5f32e272 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -30,6 +30,7 @@
 #include "ta_xgmi_if.h"
 #include "ta_ras_if.h"
 #include "ta_rap_if.h"
+#include "ta_secureDisplay_if.h"
 
 #define PSP_FENCE_BUFFER_SIZE	0x1000
 #define PSP_CMD_BUFFER_SIZE	0x1000
@@ -40,6 +41,7 @@
 #define PSP_HDCP_SHARED_MEM_SIZE	0x4000
 #define PSP_DTM_SHARED_MEM_SIZE	0x4000
 #define PSP_RAP_SHARED_MEM_SIZE	0x4000
+#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE	0x4000
 #define PSP_SHARED_MEM_SIZE		0x4000
 #define PSP_FW_NAME_LEN		0x24
 
@@ -171,6 +173,15 @@ struct psp_rap_context {
 	struct mutex		mutex;
 };
 
+struct psp_securedisplay_context {
+	bool			securedisplay_initialized;
+	uint32_t		session_id;
+	struct amdgpu_bo	*securedisplay_shared_bo;
+	uint64_t		securedisplay_shared_mc_addr;
+	void			*securedisplay_shared_buf;
+	struct mutex		mutex;
+};
+
 #define MEM_TRAIN_SYSTEM_SIGNATURE		0x54534942
 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES	0x1000
 #define GDDR6_MEM_TRAINING_OFFSET		0x8000
@@ -298,12 +309,17 @@ struct psp_context
 	uint32_t			ta_rap_ucode_size;
 	uint8_t				*ta_rap_start_addr;
 
+	uint32_t			ta_securedisplay_ucode_version;
+	uint32_t			ta_securedisplay_ucode_size;
+	uint8_t				*ta_securedisplay_start_addr;
+
 	struct psp_asd_context		asd_context;
 	struct psp_xgmi_context		xgmi_context;
 	struct psp_ras_context		ras;
 	struct psp_hdcp_context 	hdcp_context;
 	struct psp_dtm_context		dtm_context;
 	struct psp_rap_context		rap_context;
+	struct psp_securedisplay_context	securedisplay_context;
 	struct mutex			mutex;
 	struct psp_memory_training_context mem_train_ctx;
 };
@@ -380,6 +396,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 
 int psp_rlc_autoload_start(struct psp_context *psp);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 82e952696d24f4730936948f526d2211205cc003..1fb2a91ad30ad797e06d4ae1b26b91b289f30bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
 	if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
 		dev_warn(adev->dev, "Failed to allow XGMI power down");
 
-	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
 		dev_warn(adev->dev, "Failed to allow df cstate");
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 1a612f51ecd9e58c078411b08a0f3d1e11e599ae..b644c78475fd8de1e53677bccf3333d4db104fa0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -166,7 +166,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
 		     unsigned int irq_type, unsigned int hw_prio)
 {
-	int r, i;
+	int r;
 	int sched_hw_submission = amdgpu_sched_hw_submission;
 	u32 *num_sched;
 	u32 hw_ip;
@@ -258,8 +258,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 	}
 
 	ring->max_dw = max_dw;
-	ring->priority = DRM_SCHED_PRIORITY_NORMAL;
-	mutex_init(&ring->priority_mutex);
+	ring->hw_prio = hw_prio;
 
 	if (!ring->no_scheduler) {
 		hw_ip = ring->funcs->type;
@@ -268,9 +267,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 			&ring->sched;
 	}
 
-	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
-		atomic_set(&ring->num_jobs[i], 0);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7112137689db002b7b73ad600b08578f39b27a57..56acec1075acd7defabce37d0bf6862af174d7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -197,6 +197,7 @@ struct amdgpu_ring_funcs {
 	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
 	int (*preempt_ib)(struct amdgpu_ring *ring);
 	void (*emit_mem_sync)(struct amdgpu_ring *ring);
+	void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
 };
 
 struct amdgpu_ring {
@@ -242,11 +243,7 @@ struct amdgpu_ring {
 	struct dma_fence	*vmid_wait;
 	bool			has_compute_vm_bug;
 	bool			no_scheduler;
-
-	atomic_t		num_jobs[DRM_SCHED_PRIORITY_COUNT];
-	struct mutex		priority_mutex;
-	/* protected by priority_mutex */
-	int			priority;
+	int			hw_prio;
 
 #if defined(CONFIG_DEBUG_FS)
 	struct dentry *ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
new file mode 100644
index 0000000000000000000000000000000000000000..834440ab9ff74740820f11d35a04074947b1273a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_securedisplay.h"
+
+/**
+ * DOC: AMDGPU SECUREDISPLAY debugfs test interface
+ *
+ * how to use?
+ * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test
+ *
+ * opcode:
+ * 1:Query whether TA is responding used only for validation pupose
+ * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is
+ * send to determine which DIO scratch register should be used to get
+ * ROI and receive i2c_buf as the output.
+ *
+ * You can refer more detail from header file ta_securedisplay_if.h
+ *
+ */
+
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+	enum ta_securedisplay_status status)
+{
+	switch (status) {
+	case TA_SECUREDISPLAY_STATUS__SUCCESS:
+		break;
+	case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE:
+		dev_err(psp->adev->dev, "Secure display: Generic Failure.");
+		break;
+	case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER:
+		dev_err(psp->adev->dev, "Secure display: Invalid Parameter.");
+		break;
+	case TA_SECUREDISPLAY_STATUS__NULL_POINTER:
+		dev_err(psp->adev->dev, "Secure display: Null Pointer.");
+		break;
+	case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR:
+		dev_err(psp->adev->dev, "Secure display: Failed to write to I2C.");
+		break;
+	case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR:
+		dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register.");
+		break;
+	case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
+		dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
+		break;
+	default:
+		dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
+	}
+}
+
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+	enum ta_securedisplay_command command_id)
+{
+	*cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+	memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+	(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
+	(*cmd)->cmd_id = command_id;
+}
+
+static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf,
+		size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+	struct psp_context *psp = &adev->psp;
+	struct securedisplay_cmd *securedisplay_cmd;
+	struct drm_device *dev = adev_to_drm(adev);
+	uint32_t phy_id;
+	uint32_t op;
+	int i;
+	char str[64];
+	char i2c_output[256];
+	int ret;
+
+	if (*pos || size > sizeof(str) - 1)
+		return -EINVAL;
+
+	memset(str,  0, sizeof(str));
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(dev->dev);
+		return ret;
+	}
+
+	if (size < 3)
+		sscanf(str, "%u ", &op);
+	else
+		sscanf(str, "%u %u", &op, &phy_id);
+
+	switch (op) {
+	case 1:
+		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+		if (!ret) {
+			if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS)
+				dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n",
+					securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+			else
+				psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+		}
+		break;
+	case 2:
+		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+			TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+		securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id;
+		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+		if (!ret) {
+			if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
+				memset(i2c_output,  0, sizeof(i2c_output));
+				for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
+					sprintf(i2c_output, "%s 0x%X", i2c_output,
+						securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
+				dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+			} else {
+				psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+			}
+		}
+		break;
+	default:
+		dev_err(adev->dev, "Invalid input: %s\n", str);
+	}
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
+	return size;
+}
+
+static const struct file_operations amdgpu_securedisplay_debugfs_ops = {
+	.owner = THIS_MODULE,
+	.read = NULL,
+	.write = amdgpu_securedisplay_debugfs_write,
+	.llseek = default_llseek
+};
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+	if (!adev->psp.securedisplay_context.securedisplay_initialized)
+		return;
+
+	debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
+				adev, &amdgpu_securedisplay_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
new file mode 100644
index 0000000000000000000000000000000000000000..fe98574748f440238eab697c8b9a9190ef02a1f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_SECUREDISPLAY_H
+#define _AMDGPU_SECUREDISPLAY_H
+
+#include "amdgpu.h"
+#include "ta_secureDisplay_if.h"
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+		enum ta_securedisplay_status status);
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+		enum ta_securedisplay_command command_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 6752d8b131188471d683efde37263283001100e9..792d20261846514a5bab28d0c701c216bd86e1d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -21,7 +21,7 @@
  *
  */
 
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
 #define _AMDGPU_TRACE_H_
 
 #include <linux/stringify.h>
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
 
 	    TP_fast_assign(
 			   __entry->bo = bo;
-			   __entry->pages = bo->tbo.num_pages;
+			   __entry->pages = bo->tbo.mem.num_pages;
 			   __entry->type = bo->tbo.mem.mem_type;
 			   __entry->prefer = bo->preferred_domains;
 			   __entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 4d8f19ab10144ae66e2764c449b58662c7075e2d..9fd2157b133accc9e71894fcdc9515c764270948 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -46,7 +46,6 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
 
 #include <drm/drm_debugfs.h>
 #include <drm/amdgpu_drm.h>
@@ -637,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 
 out:
 	/* update statistics */
-	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+	atomic64_add(bo->base.size, &adev->num_bytes_moved);
 	amdgpu_bo_move_notify(bo, evict, new_mem);
 	return 0;
 }
@@ -918,8 +917,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
 		goto release_sg;
 
 	/* convert SG to linear array of pages and dma addresses */
-	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
-					 gtt->ttm.dma_address, ttm->num_pages);
+	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+				       ttm->num_pages);
 
 	return 0;
 
@@ -1265,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
 			ttm->sg = sgt;
 		}
 
-		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
-						 gtt->ttm.dma_address,
-						 ttm->num_pages);
+		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+					       ttm->num_pages);
 		return 0;
 	}
 
@@ -2124,7 +2122,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 			return r;
 	}
 
-	num_pages = bo->tbo.num_pages;
+	num_pages = bo->tbo.mem.num_pages;
 	mm_node = bo->tbo.mem.mm_node;
 	num_loops = 0;
 	while (num_pages) {
@@ -2154,7 +2152,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		}
 	}
 
-	num_pages = bo->tbo.num_pages;
+	num_pages = bo->tbo.mem.num_pages;
 	mm_node = bo->tbo.mem.mm_node;
 
 	while (num_pages) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 0e43b46d3ab50d50db20dfa224a5a43b3550feb9..46449e70348bea565a144fdb0d152b2bd6b8dda5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -122,6 +122,9 @@ struct ta_firmware_header_v1_0 {
 	uint32_t ta_dtm_ucode_version;
 	uint32_t ta_dtm_offset_bytes;
 	uint32_t ta_dtm_size_bytes;
+	uint32_t ta_securedisplay_ucode_version;
+	uint32_t ta_securedisplay_offset_bytes;
+	uint32_t ta_securedisplay_size_bytes;
 };
 
 enum ta_fw_type {
@@ -132,6 +135,7 @@ enum ta_fw_type {
 	TA_FW_TYPE_PSP_HDCP,
 	TA_FW_TYPE_PSP_DTM,
 	TA_FW_TYPE_PSP_RAP,
+	TA_FW_TYPE_PSP_SECUREDISPLAY,
 };
 
 struct ta_fw_bin_desc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed663032b6af9abd57acd24829cf38d5..e2ed4689118a9980baef80b677efaf22614a7787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1170,7 +1170,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 	int r, i;
 
 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_VRAM,
+				      AMDGPU_GEM_DOMAIN_GTT,
 				      &bo, NULL, (void **)&msg);
 	if (r)
 		return r;
@@ -1202,7 +1202,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 	int r, i;
 
 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_VRAM,
+				      AMDGPU_GEM_DOMAIN_GTT,
 				      &bo, NULL, (void **)&msg);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0d5284b936e487818c85224264b4632edd7902ad..ea6a62f67e3809cc5041bbaa9bb8f889ec3e7bbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1160,6 +1160,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 error:
 	dma_fence_put(fence);
 	amdgpu_bo_unreserve(bo);
-	amdgpu_bo_unref(&bo);
+	amdgpu_bo_free_kernel(&bo, NULL, NULL);
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4a77c7424dfc7d986e35989bd25bc5d6e02c1b24..99b82f3c26177c7d5fecc7ee93ed875d3f9f0be6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -496,6 +496,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	uint64_t addr;
+	void *msg = NULL;
 	int i, r;
 
 	r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -505,6 +506,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 
 	ib = &job->ibs[0];
 	addr = amdgpu_bo_gpu_offset(bo);
+	msg = amdgpu_bo_kptr(bo);
 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 	ib->ptr[1] = addr;
 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -523,7 +525,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 
 	amdgpu_bo_fence(bo, f, false);
 	amdgpu_bo_unreserve(bo);
-	amdgpu_bo_unref(&bo);
+	amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
 
 	if (fence)
 		*fence = dma_fence_get(f);
@@ -536,7 +538,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 
 err:
 	amdgpu_bo_unreserve(bo);
-	amdgpu_bo_unref(&bo);
+	amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
 	return r;
 }
 
@@ -890,6 +892,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 error:
 	dma_fence_put(fence);
 	amdgpu_bo_unreserve(bo);
-	amdgpu_bo_unref(&bo);
+	amdgpu_bo_free_kernel(&bo, NULL, NULL);
+
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2d51b7694d1fdf5806e0479efa39aaf34e74de88..5da04d45b63795a0305178719dcdbe2025a95cef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -560,10 +560,14 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
 {
 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+	int ret;
 
-	amdgpu_virt_read_pf2vf_data(adev);
+	ret = amdgpu_virt_read_pf2vf_data(adev);
+	if (ret)
+		goto out;
 	amdgpu_virt_write_vf2pf_data(adev);
 
+out:
 	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
 }
 
@@ -571,8 +575,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
 {
 	if (adev->virt.vf2pf_update_interval_ms != 0) {
 		DRM_INFO("clean up the vf2pf work item\n");
-		flush_delayed_work(&adev->virt.vf2pf_work);
 		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+		adev->virt.vf2pf_update_interval_ms = 0;
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0768c868698365785a3ebc36fdcf92c773d2a4f3..ad91c0c3c42357cd3499df51286a77e86c861cf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 		if (!bo->parent)
 			continue;
 
-		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
+		ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+					&vm->lru_bulk_move);
 		if (bo->shadow)
 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
+						&bo->shadow->tbo.mem,
 						&vm->lru_bulk_move);
 	}
 	spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index d2de2a720a3d86fcdca9feaed5d562fc8141238e..c89b66bb70e2e7c03de3723c304121907029e8d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -473,6 +473,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	for (i = 0; pages_left >= pages_per_node; ++i) {
 		unsigned long pages = rounddown_pow_of_two(pages_left);
 
+		/* Limit maximum size to 2GB due to SG table limitations */
+		pages = min(pages, (2UL << (30 - PAGE_SHIFT)));
+
 		r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
 						pages_per_node, 0,
 						place->fpfn, lpfn,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 541ef6be390f09d0bfe55872acb9ed39b8ef6284..659b385b27b5a04ca02b9e2a1f7b450ab1a41168 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -324,7 +324,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
 
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 {
-	struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
+	struct amdgpu_hive_info *hive = NULL;
 	int ret;
 
 	if (!adev->gmc.xgmi.hive_id)
@@ -337,11 +337,9 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 
 	mutex_lock(&xgmi_mutex);
 
-	if (!list_empty(&xgmi_hive_list)) {
-		list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node)  {
-			if (hive->hive_id == adev->gmc.xgmi.hive_id)
-				goto pro_end;
-		}
+	list_for_each_entry(hive, &xgmi_hive_list, node)  {
+		if (hive->hive_id == adev->gmc.xgmi.hive_id)
+			goto pro_end;
 	}
 
 	hive = kzalloc(sizeof(*hive), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 921a69abda55526aab2c40036a9e945547d82f4f..5b90efd6f6d04721fdf828801cabe5b56792a278 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -27,7 +27,6 @@
 #include "athub/athub_2_0_0_offset.h"
 #include "athub/athub_2_0_0_sh_mask.h"
 #include "athub/athub_2_0_0_default.h"
-#include "navi10_enum.h"
 
 #include "soc15_common.h"
 
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 66c183ddd43e58ea136377a6f2731aeb7087bf7f..7b1b18350bf92aa7b9baf4330747f8ff072c5413 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -26,7 +26,6 @@
 
 #include "athub/athub_2_1_0_offset.h"
 #include "athub/athub_2_1_0_sh_mask.h"
-#include "navi10_enum.h"
 
 #include "soc15_common.h"
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 13737b317f7c22e5784c9d09d96d200ef06dd200..4d6832cc7fb066511a21d4cd37ad7225cd268beb 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1251,13 +1251,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
 	WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
 }
 
-static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * cik_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
 {
 	struct kv_reset_save_regs kv_save = { 0 };
 	u32 i;
 	int r = -EINVAL;
 
-	dev_info(adev->dev, "GPU pci config reset\n");
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	if (adev->flags & AMD_IS_APU)
 		kv_save_regs_for_reset(adev, &kv_save);
@@ -1285,26 +1294,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 	if (adev->flags & AMD_IS_APU)
 		kv_restore_regs_for_reset(adev, &kv_save);
 
-	return r;
-}
-
-/**
- * cik_asic_pci_config_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- *
- * Use PCI Config method to reset the GPU.
- *
- * Returns 0 for success.
- */
-static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
-{
-	int r;
-
-	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
-	r = cik_gpu_pci_config_reset(adev);
-
 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index da37f8a900afb737ee1d55b1e62fd76445483542..307c01301c87a7f600e98bf0c927376028abb863 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -194,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
 
 	wptr = le32_to_cpu(*ih->wptr_cpu);
 
-	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
-		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
-		/* When a ring buffer overflow happen start parsing interrupt
-		 * from the last not overwritten vector (wptr + 16). Hopefully
-		 * this should allow us to catchup.
-		 */
-		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
-			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
-		ih->rptr = (wptr + 16) & ih->ptr_mask;
-		tmp = RREG32(mmIH_RB_CNTL);
-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
-		WREG32(mmIH_RB_CNTL, tmp);
-	}
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	/* Double check that the overflow wasn't already cleared. */
+	wptr = RREG32(mmIH_RB_WPTR);
+
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+	/* When a ring buffer overflow happen start parsing interrupt
+	 * from the last not overwritten vector (wptr + 16). Hopefully
+	 * this should allow us to catchup.
+	 */
+	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+	ih->rptr = (wptr + 16) & ih->ptr_mask;
+	tmp = RREG32(mmIH_RB_CNTL);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+	WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
 	return (wptr & ih->ptr_mask);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index ffcc64ec64733f4f99147f6667fb8e55af0eea34..9810af712cc0d1d01164b1255a0db093fa33e6f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -294,7 +294,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
 	static const struct mode_size {
 		int w;
 		int h;
-	} common_modes[21] = {
+	} common_modes[] = {
 		{ 640,  480},
 		{ 720,  480},
 		{ 800,  600},
@@ -312,13 +312,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
 		{1600, 1200},
 		{1920, 1080},
 		{1920, 1200},
+		{2560, 1440},
 		{4096, 3112},
 		{3656, 2664},
 		{3840, 2160},
 		{4096, 2160},
 	};
 
-	for (i = 0; i < 21; i++) {
+	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
 		drm_mode_probed_add(connector, mode);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index d86b42a36560183b4d1ef0bde1a199800bec4285..45d1172b7bff93e8ef38e87859467c5944e93e0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -38,7 +38,6 @@
 #include "smuio/smuio_11_0_0_offset.h"
 #include "smuio/smuio_11_0_0_sh_mask.h"
 #include "navi10_enum.h"
-#include "hdp/hdp_5_0_0_offset.h"
 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
 
 #include "soc15.h"
@@ -71,6 +70,11 @@
 #define GB_ADDR_CONFIG__NUM_PKRS__SHIFT                                                                       0x8
 #define GB_ADDR_CONFIG__NUM_PKRS_MASK                                                                         0x00000700L
 
+#define mmCGTS_TCC_DISABLE_gc_10_3                 0x5006
+#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX        1
+#define mmCGTS_USER_TCC_DISABLE_gc_10_3            0x5007
+#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX   1
+
 #define mmCP_MEC_CNTL_Sienna_Cichlid                      0x0f55
 #define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX             0
 #define mmRLC_SAFE_MODE_Sienna_Cichlid			0x4ca0
@@ -99,10 +103,6 @@
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid			0x1580
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX	0
 
-#define mmCGTS_TCC_DISABLE_Vangogh                0x5006
-#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX       1
-#define mmCGTS_USER_TCC_DISABLE_Vangogh                0x5007
-#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX       1
 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh                0x0025
 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX       1
 #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh                0x0026
@@ -125,6 +125,7 @@
 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX       1
 #define mmGCR_GENERAL_CNTL_Vangogh               0x1580
 #define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX      0
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh   0x0000FFFFL
 
 #define mmCP_HYP_PFP_UCODE_ADDR			0x5814
 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX	1
@@ -3782,9 +3783,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
 		if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 		break;
-	case CHIP_VANGOGH:
-		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-		break;
 	default:
 		break;
 	}
@@ -4494,8 +4492,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
 		+ ring->pipe;
-	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
-							    ring->queue) ?
+	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
 	/* type-2 packets are deprecated on MEC, use type-3 instead */
 	r = amdgpu_ring_init(adev, ring, 1024,
@@ -4942,15 +4939,12 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
 	/* TCCs are global (not instanced). */
 	uint32_t tcc_disable;
 
-	switch (adev->asic_type) {
-	case CHIP_VANGOGH:
-		tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
-				RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
-		break;
-	default:
+	if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+		tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
+			      RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
+	} else {
 		tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
-				RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
-		break;
+			      RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
 	}
 
 	adev->gfx.config.tcc_disabled_mask =
@@ -5715,7 +5709,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
 	}
 
 	if (amdgpu_emu_mode == 1)
-		adev->nbio.funcs->hdp_flush(adev, NULL);
+		adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -5793,7 +5787,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
 	}
 
 	if (amdgpu_emu_mode == 1)
-		adev->nbio.funcs->hdp_flush(adev, NULL);
+		adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -5870,7 +5864,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
 	}
 
 	if (amdgpu_emu_mode == 1)
-		adev->nbio.funcs->hdp_flush(adev, NULL);
+		adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6239,7 +6233,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 	}
 
 	if (amdgpu_emu_mode == 1)
-		adev->nbio.funcs->hdp_flush(adev, NULL);
+		adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -6547,8 +6541,7 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
 	struct amdgpu_device *adev = ring->adev;
 
 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
-							      ring->queue)) {
+		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
 			mqd->cp_hqd_queue_priority =
 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -7847,6 +7840,20 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
 
 	WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data);
+
+	/*
+	 * CGPG enablement required and the register to program the hysteresis value
+	 * RLC_PG_DELAY_3.CGCG_ACTIVE_BEFORE_CGPG to the desired CGPG hysteresis value
+	 * in refclk count. Note that RLC FW is modified to take 16 bits from
+	 * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits.
+	 *
+	 * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20)
+	 * as part of CGPG enablement starting point.
+	 */
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) {
+		data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
+		WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
+	}
 }
 
 static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable)
@@ -7908,6 +7915,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
 		break;
 	case CHIP_VANGOGH:
 		gfx_v10_cntl_pg(adev, enable);
+		amdgpu_gfx_off_ctrl(adev, enable);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 37639214cbbbd5edeca3e97a93360c4e4ab32020..84d2eaa381013781ec62d71a2fa63c48a5a63830 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -29,6 +29,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
+#include "amdgpu_ring.h"
 #include "vi.h"
 #include "vi_structs.h"
 #include "vid.h"
@@ -1923,8 +1924,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
 		+ ring->pipe;
 
-	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
-							    ring->queue) ?
+	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
 	/* type-2 packets are deprecated on MEC, use type-3 instead */
 	r = amdgpu_ring_init(adev, ring, 1024,
@@ -4442,8 +4442,7 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
 	struct amdgpu_device *adev = ring->adev;
 
 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
-							      ring->queue)) {
+		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
 			mqd->cp_hqd_queue_priority =
 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -6847,6 +6846,66 @@ static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
 }
 
+
+/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT	0x0000007f
+static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+					uint32_t pipe, bool enable)
+{
+	uint32_t val;
+	uint32_t wcl_cs_reg;
+
+	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
+
+	switch (pipe) {
+	case 0:
+		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
+		break;
+	case 1:
+		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
+		break;
+	case 2:
+		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
+		break;
+	case 3:
+		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
+		break;
+	default:
+		DRM_DEBUG("invalid pipe %d\n", pipe);
+		return;
+	}
+
+	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+
+#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT	0x07ffffff
+static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t val;
+	int i;
+
+	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
+	 * around 25% of gpu resources.
+	 */
+	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+	amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
+
+	/* Restrict waves for normal/low priority compute queues as well
+	 * to get best QoS for high priority compute jobs.
+	 *
+	 * amdgpu controls only 1st ME(0-3 CS pipes).
+	 */
+	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+		if (i != ring->pipe)
+			gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
+
+	}
+
+}
+
 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 	.name = "gfx_v8_0",
 	.early_init = gfx_v8_0_early_init,
@@ -6930,7 +6989,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
 		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
 		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
 		7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-		7, /* gfx_v8_0_emit_mem_sync_compute */
+		7 + /* gfx_v8_0_emit_mem_sync_compute */
+		5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+		15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
 	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
 	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6944,6 +7005,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_wreg = gfx_v8_0_ring_emit_wreg,
 	.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
+	.emit_wave_limit = gfx_v8_0_emit_wave_limit,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5f4805e4d04ac840ededc9ca72d248cc514e95d6..65db88bb6cbcd6a3d77c64524cbeddba933e7662 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,7 +38,6 @@
 #include "gc/gc_9_0_sh_mask.h"
 
 #include "vega10_enum.h"
-#include "hdp/hdp_4_0_offset.h"
 
 #include "soc15_common.h"
 #include "clearstate_gfx9.h"
@@ -53,6 +52,7 @@
 
 #include "asic_reg/pwr/pwr_10_0_offset.h"
 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+#include "asic_reg/gc/gc_9_0_default.h"
 
 #define GFX9_NUM_GFX_RINGS     1
 #define GFX9_MEC_HPD_SIZE 4096
@@ -2228,8 +2228,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
 		+ ring->pipe;
-	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
-							    ring->queue) ?
+	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
 	/* type-2 packets are deprecated on MEC, use type-3 instead */
 	return amdgpu_ring_init(adev, ring, 1024,
@@ -3391,9 +3390,7 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
 	struct amdgpu_device *adev = ring->adev;
 
 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-		if (amdgpu_gfx_is_high_priority_compute_queue(adev,
-							      ring->pipe,
-							      ring->queue)) {
+		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
 			mqd->cp_hqd_queue_priority =
 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -6671,6 +6668,65 @@ static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
 }
 
+static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+					uint32_t pipe, bool enable)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t val;
+	uint32_t wcl_cs_reg;
+
+	/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
+
+	switch (pipe) {
+	case 0:
+		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
+		break;
+	case 1:
+		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
+		break;
+	case 2:
+		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
+		break;
+	case 3:
+		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
+		break;
+	default:
+		DRM_DEBUG("invalid pipe %d\n", pipe);
+		return;
+	}
+
+	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t val;
+	int i;
+
+
+	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
+	 * around 25% of gpu resources.
+	 */
+	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+	amdgpu_ring_emit_wreg(ring,
+			      SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
+			      val);
+
+	/* Restrict waves for normal/low priority compute queues as well
+	 * to get best QoS for high priority compute jobs.
+	 *
+	 * amdgpu controls only 1st ME(0-3 CS pipes).
+	 */
+	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+		if (i != ring->pipe)
+			gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
+
+	}
+}
+
 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
 	.name = "gfx_v9_0",
 	.early_init = gfx_v9_0_early_init,
@@ -6760,7 +6816,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
 		2 + /* gfx_v9_0_ring_emit_vm_flush */
 		8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
-		7, /* gfx_v9_0_emit_mem_sync */
+		7 + /* gfx_v9_0_emit_mem_sync */
+		5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+		15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6776,6 +6834,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5648c48be77f0481cf1f78afd85285d963e0dfec..3b7c6c31fce1f17590a11b4b03b0e59db224e37c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -27,8 +27,6 @@
 #include "gmc_v10_0.h"
 #include "umc_v8_7.h"
 
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
 #include "athub/athub_2_0_0_sh_mask.h"
 #include "athub/athub_2_0_0_offset.h"
 #include "dcn/dcn_2_0_0_offset.h"
@@ -312,7 +310,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 	int r;
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	/* For SRIOV run time, driver shouldn't access the register through MMIO
 	 * Directly use kiq to do the vm invalidation instead
@@ -995,7 +993,6 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 {
 	int r;
 	bool value;
-	u32 tmp;
 
 	if (adev->gart.bo == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1014,15 +1011,10 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 	if (r)
 		return r;
 
-	tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
-	tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
-	WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
-
-	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
-	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+	adev->hdp.funcs->init_registers(adev);
 
 	/* Flush HDP after it is initialized */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
 		false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e22268f9dba7315a64e0abe52d68f06806e3ef7b..3686e777c76cb232cfbfbafc66680bb1a073db34 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -31,8 +31,6 @@
 #include "amdgpu_atomfirmware.h"
 #include "amdgpu_gem.h"
 
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
 #include "gc/gc_9_0_sh_mask.h"
 #include "dce/dce_12_0_offset.h"
 #include "dce/dce_12_0_sh_mask.h"
@@ -241,60 +239,44 @@ static const char *mmhub_client_ids_vega20[][2] = {
 };
 
 static const char *mmhub_client_ids_arcturus[][2] = {
+	[0][0] = "DBGU1",
+	[1][0] = "XDP",
 	[2][0] = "MP1",
-	[3][0] = "MP0",
-	[10][0] = "UTCL2",
-	[13][0] = "OSS",
 	[14][0] = "HDP",
-	[15][0] = "SDMA0",
-	[32+15][0] = "SDMA1",
-	[64+15][0] = "SDMA2",
-	[96+15][0] = "SDMA3",
-	[128+15][0] = "SDMA4",
-	[160+11][0] = "JPEG",
-	[160+12][0] = "VCN",
-	[160+13][0] = "VCNU",
-	[160+15][0] = "SDMA5",
-	[192+10][0] = "UTCL2",
-	[192+11][0] = "JPEG1",
-	[192+12][0] = "VCN1",
-	[192+13][0] = "VCN1U",
-	[192+15][0] = "SDMA6",
-	[224+15][0] = "SDMA7",
+	[171][0] = "JPEG",
+	[172][0] = "VCN",
+	[173][0] = "VCNU",
+	[203][0] = "JPEG1",
+	[204][0] = "VCN1",
+	[205][0] = "VCN1U",
+	[256][0] = "SDMA0",
+	[257][0] = "SDMA1",
+	[258][0] = "SDMA2",
+	[259][0] = "SDMA3",
+	[260][0] = "SDMA4",
+	[261][0] = "SDMA5",
+	[262][0] = "SDMA6",
+	[263][0] = "SDMA7",
+	[384][0] = "OSS",
 	[0][1] = "DBGU1",
 	[1][1] = "XDP",
 	[2][1] = "MP1",
-	[3][1] = "MP0",
-	[13][1] = "OSS",
 	[14][1] = "HDP",
-	[15][1] = "SDMA0",
-	[32+15][1] = "SDMA1",
-	[64+15][1] = "SDMA2",
-	[96+15][1] = "SDMA3",
-	[128+15][1] = "SDMA4",
-	[160+11][1] = "JPEG",
-	[160+12][1] = "VCN",
-	[160+13][1] = "VCNU",
-	[160+15][1] = "SDMA5",
-	[192+11][1] = "JPEG1",
-	[192+12][1] = "VCN1",
-	[192+13][1] = "VCN1U",
-	[192+15][1] = "SDMA6",
-	[224+15][1] = "SDMA7",
-};
-
-static const u32 golden_settings_vega10_hdp[] =
-{
-	0xf64, 0x0fffffff, 0x00000000,
-	0xf65, 0x0fffffff, 0x00000000,
-	0xf66, 0x0fffffff, 0x00000000,
-	0xf67, 0x0fffffff, 0x00000000,
-	0xf68, 0x0fffffff, 0x00000000,
-	0xf6a, 0x0fffffff, 0x00000000,
-	0xf6b, 0x0fffffff, 0x00000000,
-	0xf6c, 0x0fffffff, 0x00000000,
-	0xf6d, 0x0fffffff, 0x00000000,
-	0xf6e, 0x0fffffff, 0x00000000,
+	[171][1] = "JPEG",
+	[172][1] = "VCN",
+	[173][1] = "VCNU",
+	[203][1] = "JPEG1",
+	[204][1] = "VCN1",
+	[205][1] = "VCN1U",
+	[256][1] = "SDMA0",
+	[257][1] = "SDMA1",
+	[258][1] = "SDMA2",
+	[259][1] = "SDMA3",
+	[260][1] = "SDMA4",
+	[261][1] = "SDMA5",
+	[262][1] = "SDMA6",
+	[263][1] = "SDMA7",
+	[384][1] = "OSS",
 };
 
 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
@@ -1571,7 +1553,6 @@ static int gmc_v9_0_hw_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool value;
 	int r, i;
-	u32 tmp;
 
 	/* The sequence of these two function calls matters.*/
 	gmc_v9_0_init_golden_registers(adev);
@@ -1583,31 +1564,13 @@ static int gmc_v9_0_hw_init(void *handle)
 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 	}
 
-	amdgpu_device_program_register_sequence(adev,
-						golden_settings_vega10_hdp,
-						ARRAY_SIZE(golden_settings_vega10_hdp));
-
 	if (adev->mmhub.funcs->update_power_gating)
 		adev->mmhub.funcs->update_power_gating(adev, true);
 
-	switch (adev->asic_type) {
-	case CHIP_ARCTURUS:
-		WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
-		break;
-	default:
-		break;
-	}
-
-	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
-
-	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
-	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
-
-	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
-	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+	adev->hdp.funcs->init_registers(adev);
 
 	/* After HDP is initialized, flush HDP.*/
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	adev->hdp.funcs->flush_hdp(adev, NULL);
 
 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 		value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
new file mode 100644
index 0000000000000000000000000000000000000000..e46621fed5b953952e3ccddbae27fd55aac67fff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v4_0.h"
+#include "amdgpu_ras.h"
+
+#include "hdp/hdp_4_0_offset.h"
+#include "hdp/hdp_4_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL    0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK  0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK    0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK   0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK     0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX   0
+
+static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
+				struct amdgpu_ring *ring)
+{
+	if (!ring || !ring->funcs->emit_wreg)
+		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+	else
+		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
+				    struct amdgpu_ring *ring)
+{
+	if (!ring || !ring->funcs->emit_wreg)
+		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+	else
+		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+}
+
+static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+		return;
+	/*read back hdp ras counter to reset it to 0 */
+	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
+static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
+					 bool enable)
+{
+	uint32_t def, data;
+
+	if (adev->asic_type == CHIP_VEGA10 ||
+	    adev->asic_type == CHIP_VEGA12 ||
+	    adev->asic_type == CHIP_RAVEN) {
+		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+		else
+			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+		if (def != data)
+			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+	} else {
+		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
+
+		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+		else
+			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+				  HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+				  HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+				  HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
+
+		if (def != data)
+			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+	}
+}
+
+static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
+					    u32 *flags)
+{
+	int data;
+
+	/* AMD_CG_SUPPORT_HDP_LS */
+	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
+		*flags |= AMD_CG_SUPPORT_HDP_LS;
+}
+
+static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_ARCTURUS:
+		WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+		break;
+	default:
+		break;
+	}
+
+	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+
+	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+}
+
+const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
+	.flush_hdp = hdp_v4_0_flush_hdp,
+	.invalidate_hdp = hdp_v4_0_invalidate_hdp,
+	.reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
+	.update_clock_gating = hdp_v4_0_update_clock_gating,
+	.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
+	.init_registers = hdp_v4_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
new file mode 100644
index 0000000000000000000000000000000000000000..d1e6399e8c46ef9e86cf9e521f11bbdf73474a26
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V4_0_H__
+#define __HDP_V4_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
new file mode 100644
index 0000000000000000000000000000000000000000..7a15e669b68db0517fc6f19c6aad62cbdb183457
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v5_0.h"
+
+#include "hdp/hdp_5_0_0_offset.h"
+#include "hdp/hdp_5_0_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
+				struct amdgpu_ring *ring)
+{
+	if (!ring || !ring->funcs->emit_wreg)
+		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+	else
+		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
+				    struct amdgpu_ring *ring)
+{
+	if (!ring || !ring->funcs->emit_wreg) {
+		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+	} else {
+		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+	}
+}
+
+static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
+					  bool enable)
+{
+	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
+	uint32_t hdp_mem_pwr_cntl;
+
+	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+				AMD_CG_SUPPORT_HDP_DS |
+				AMD_CG_SUPPORT_HDP_SD)))
+		return;
+
+	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+
+	/* Before doing clock/power mode switch,
+	 * forced on IPH & RC clock */
+	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
+	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
+	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+
+	/* HDP 5.0 doesn't support dynamic power mode switch,
+	 * disable clock and power gating before any changing */
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 IPH_MEM_POWER_CTRL_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 IPH_MEM_POWER_LS_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 IPH_MEM_POWER_DS_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 IPH_MEM_POWER_SD_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 RC_MEM_POWER_CTRL_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 RC_MEM_POWER_LS_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 RC_MEM_POWER_DS_EN, 0);
+	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+					 RC_MEM_POWER_SD_EN, 0);
+	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+	/* only one clock gating mode (LS/DS/SD) can be enabled */
+	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+						 HDP_MEM_POWER_CTRL,
+						 IPH_MEM_POWER_LS_EN, enable);
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+						 HDP_MEM_POWER_CTRL,
+						 RC_MEM_POWER_LS_EN, enable);
+	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+						 HDP_MEM_POWER_CTRL,
+						 IPH_MEM_POWER_DS_EN, enable);
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+						 HDP_MEM_POWER_CTRL,
+						 RC_MEM_POWER_DS_EN, enable);
+	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+						 HDP_MEM_POWER_CTRL,
+						 IPH_MEM_POWER_SD_EN, enable);
+		/* RC should not use shut down mode, fallback to ds */
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+						 HDP_MEM_POWER_CTRL,
+						 RC_MEM_POWER_DS_EN, enable);
+	}
+
+	/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+	 * be set for SRAM LS/DS/SD */
+	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+			      AMD_CG_SUPPORT_HDP_SD)) {
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+						 IPH_MEM_POWER_CTRL_EN, 1);
+		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+						 RC_MEM_POWER_CTRL_EN, 1);
+	}
+
+	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+	/* restore IPH & RC clock override after clock/power mode changing */
+	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
+}
+
+static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+						      bool enable)
+{
+	uint32_t hdp_clk_cntl;
+
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+		return;
+
+	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+
+	if (enable) {
+		hdp_clk_cntl &=
+			~(uint32_t)
+			(HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+			 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+			 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+			 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+			 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+			 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+	} else {
+		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+	}
+
+	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
+					      bool enable)
+{
+	hdp_v5_0_update_mem_power_gating(adev, enable);
+	hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
+}
+
+static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
+					    u32 *flags)
+{
+	uint32_t tmp;
+
+	/* AMD_CG_SUPPORT_HDP_MGCG */
+	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
+		*flags |= AMD_CG_SUPPORT_HDP_LS;
+	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
+		*flags |= AMD_CG_SUPPORT_HDP_DS;
+	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
+		*flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
+	tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
+	WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+}
+
+const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
+	.flush_hdp = hdp_v5_0_flush_hdp,
+	.invalidate_hdp = hdp_v5_0_invalidate_hdp,
+	.update_clock_gating = hdp_v5_0_update_clock_gating,
+	.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
+	.init_registers = hdp_v5_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
new file mode 100644
index 0000000000000000000000000000000000000000..2d5ec2b419f384a402a69913b2f5252d42ee5b2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V5_0_H__
+#define __HDP_V5_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v5_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 37d8b6ca4dab8a76261d6e4cce1cf641bcdd4ed0..cc957471f31ea04b05fa4e12f4181b9680da6208 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -194,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
 
 	wptr = le32_to_cpu(*ih->wptr_cpu);
 
-	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
-		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
-		/* When a ring buffer overflow happen start parsing interrupt
-		 * from the last not overwritten vector (wptr + 16). Hopefully
-		 * this should allow us to catchup.
-		 */
-		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
-			 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
-		ih->rptr = (wptr + 16) & ih->ptr_mask;
-		tmp = RREG32(mmIH_RB_CNTL);
-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
-		WREG32(mmIH_RB_CNTL, tmp);
-	}
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	/* Double check that the overflow wasn't already cleared. */
+	wptr = RREG32(mmIH_RB_WPTR);
+
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+	/* When a ring buffer overflow happen start parsing interrupt
+	 * from the last not overwritten vector (wptr + 16). Hopefully
+	 * this should allow us to catchup.
+	 */
+	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+	ih->rptr = (wptr + 16) & ih->ptr_mask;
+	tmp = RREG32(mmIH_RB_CNTL);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+	WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
 	return (wptr & ih->ptr_mask);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 985e454463e1e2cc81d7f132c495269e5dcdfc30..7f30629f21a21dd3b90f35089eb5f27ab9f3172f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
 		return r;
 	}
 
-	memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
+	memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
 
 	amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
 	amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 1961745e89c73df00c0fcc5198b96039d0f481e7..ab9be5ad5a5fd102f20eaf7581007e2b8827f275 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -531,12 +531,12 @@ mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
 		data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
-		data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+		data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 			DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
-		data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+		data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 			DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 7767ccca526bbbe489d9313233c46f1a59801bc3..3ee481557fc9741b480284c8330e48723e0b0d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -255,6 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 	if (!down_read_trylock(&adev->reset_sem))
 		return;
 
+	amdgpu_virt_fini_data_exchange(adev);
 	atomic_set(&adev->in_gpu_reset, 1);
 
 	do {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index dd5c1e6ce0098716024897b57b2c3d828a32f818..48e588d3c4098c84dcd5900426c235463eb962f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -276,6 +276,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
 	if (!down_read_trylock(&adev->reset_sem))
 		return;
 
+	amdgpu_virt_fini_data_exchange(adev);
 	atomic_set(&adev->in_gpu_reset, 1);
 
 	do {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 7ba229e43799f214ade4bb778a08961f635d37cf..f4e4040bbd25505e5df76bf0687bae008b57bfef 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -39,6 +39,53 @@
 
 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 
+/**
+ * navi10_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (NAVI10).
+ */
+static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
+{
+	struct amdgpu_ih_regs *ih_regs;
+
+	if (adev->irq.ih.ring_size) {
+		ih_regs = &adev->irq.ih.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+		ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+		ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+	}
+
+	if (adev->irq.ih1.ring_size) {
+		ih_regs = &adev->irq.ih1.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+	}
+
+	if (adev->irq.ih2.ring_size) {
+		ih_regs = &adev->irq.ih2.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+	}
+}
+
 /**
  * force_update_wptr_for_self_int - Force update the wptr for self interrupt
  *
@@ -82,133 +129,66 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
 }
 
 /**
- * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
  *
  * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
  *
- * Enable the interrupt ring buffer (NAVI10).
+ * Toggle the interrupt ring buffer (NAVI10)
  */
-static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+					    struct amdgpu_ih_ring *ih,
+					    bool enable)
 {
-	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
-	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
-			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-			return;
-		}
-	} else {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-	}
+	struct amdgpu_ih_regs *ih_regs;
+	uint32_t tmp;
 
-	adev->irq.ih.enabled = true;
+	ih_regs = &ih->ih_regs;
 
-	if (adev->irq.ih1.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
-					   RB_ENABLE, 1);
-		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
-		}
-		adev->irq.ih1.enabled = true;
-	}
+	tmp = RREG32(ih_regs->ih_rb_cntl);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+	/* enable_intr field is only valid in ring0 */
+	if (ih == &adev->irq.ih)
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+	WREG32(ih_regs->ih_rb_cntl, tmp);
 
-	if (adev->irq.ih2.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
-					   RB_ENABLE, 1);
-		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
-		}
-		adev->irq.ih2.enabled = true;
+	if (enable) {
+		ih->enabled = true;
+	} else {
+		/* set rptr, wptr to 0 */
+		WREG32(ih_regs->ih_rb_rptr, 0);
+		WREG32(ih_regs->ih_rb_wptr, 0);
+		ih->enabled = false;
+		ih->rptr = 0;
 	}
 
-	if (adev->irq.ih_soft.ring_size)
-		adev->irq.ih_soft.enabled = true;
+	return 0;
 }
 
 /**
- * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
  *
  * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
  *
- * Disable the interrupt ring buffer (NAVI10).
+ * Toggle all the available interrupt ring buffers (NAVI10).
  */
-static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
 {
-	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
-	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
-			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-			return;
-		}
-	} else {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-	}
-
-	/* set rptr, wptr to 0 */
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-	adev->irq.ih.enabled = false;
-	adev->irq.ih.rptr = 0;
-
-	if (adev->irq.ih1.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
-					   RB_ENABLE, 0);
-		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
-		}
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
-		adev->irq.ih1.enabled = false;
-		adev->irq.ih1.rptr = 0;
-	}
+	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+	int i;
+	int r;
 
-	if (adev->irq.ih2.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
-					   RB_ENABLE, 0);
-		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+	for (i = 0; i < ARRAY_SIZE(ih); i++) {
+		if (ih[i]->ring_size) {
+			r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+			if (r)
+				return r;
 		}
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
-		adev->irq.ih2.enabled = false;
-		adev->irq.ih2.rptr = 0;
 	}
 
+	return 0;
 }
 
 static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -253,22 +233,49 @@ static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
 	return ih_doorbell_rtpr;
 }
 
-static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+/**
+ * navi10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (NAVI10)
+ */
+static int navi10_ih_enable_ring(struct amdgpu_device *adev,
+				 struct amdgpu_ih_ring *ih)
 {
+	struct amdgpu_ih_regs *ih_regs;
 	uint32_t tmp;
 
-	/* Reroute to IH ring 1 for VMC */
-	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
-	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
-	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
-	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
-
-	/* Reroute IH ring 1 for UMC */
-	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
-	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
-	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+	ih_regs = &ih->ih_regs;
+
+	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+	WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+	WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+	tmp = RREG32(ih_regs->ih_rb_cntl);
+	tmp = navi10_ih_rb_cntl(ih, tmp);
+	if (ih == &adev->irq.ih)
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+	if (ih == &adev->irq.ih1) {
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+	}
+	WREG32(ih_regs->ih_rb_cntl, tmp);
+
+	if (ih == &adev->irq.ih) {
+		/* set the ih ring 0 writeback address whether it's enabled or not */
+		WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+		WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+	}
+
+	/* set rptr, wptr to 0 */
+	WREG32(ih_regs->ih_rb_wptr, 0);
+	WREG32(ih_regs->ih_rb_rptr, 0);
+
+	WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
+
+	return 0;
 }
 
 /**
@@ -284,36 +291,21 @@ static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
  */
 static int navi10_ih_irq_init(struct amdgpu_device *adev)
 {
-	struct amdgpu_ih_ring *ih = &adev->irq.ih;
-	u32 ih_rb_cntl, ih_chicken;
+	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+	u32 ih_chicken;
 	u32 tmp;
+	int ret;
+	int i;
 
 	/* disable irqs */
-	navi10_ih_disable_interrupts(adev);
+	ret = navi10_ih_toggle_interrupts(adev, false);
+	if (ret)
+		return ret;
 
 	adev->nbio.funcs->ih_control(adev);
 
-	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
-	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-	ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
-				   !!adev->irq.msi_enabled);
-	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
-			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-			return -ETIMEDOUT;
-		}
-	} else {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-	}
-	if (adev->irq.ih1.ring_size)
-		navi10_ih_reroute_ih(adev);
-
 	if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
-		if (ih->use_bus_addr) {
+		if (ih[0]->use_bus_addr) {
 			switch (adev->asic_type) {
 			case CHIP_SIENNA_CICHLID:
 			case CHIP_NAVY_FLOUNDER:
@@ -334,77 +326,17 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
 		}
 	}
 
-	/* set the writeback address whether it's enabled or not */
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
-		     lower_32_bits(ih->wptr_addr));
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
-		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
-	/* set rptr, wptr to 0 */
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-
-	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
-			navi10_ih_doorbell_rptr(ih));
-
-	adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
-					    ih->doorbell_index);
-
-	ih = &adev->irq.ih1;
-	if (ih->ring_size) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
-			     (ih->gpu_addr >> 40) & 0xff);
-
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-		ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
-					   WPTR_OVERFLOW_ENABLE, 0);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
-					   RB_FULL_DRAIN_ENABLE, 1);
-		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-				return -ETIMEDOUT;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+	for (i = 0; i < ARRAY_SIZE(ih); i++) {
+		if (ih[i]->ring_size) {
+			ret = navi10_ih_enable_ring(adev, ih[i]);
+			if (ret)
+				return ret;
 		}
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
-		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
-				navi10_ih_doorbell_rptr(ih));
-	}
-
-	ih = &adev->irq.ih2;
-	if (ih->ring_size) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
-			     (ih->gpu_addr >> 40) & 0xff);
-
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-		ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
-
-		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-				return -ETIMEDOUT;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
-		}
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
-		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
-			     navi10_ih_doorbell_rptr(ih));
 	}
 
+	/* update doorbell range for ih ring 0*/
+	adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+					    ih[0]->doorbell_index);
 
 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -418,10 +350,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
 	pci_set_master(adev->pdev);
 
 	/* enable interrupts */
-	navi10_ih_enable_interrupts(adev);
+	ret = navi10_ih_toggle_interrupts(adev, true);
+	if (ret)
+		return ret;
 	/* enable wptr force update for self int */
 	force_update_wptr_for_self_int(adev, 0, 8, true);
 
+	if (adev->irq.ih_soft.ring_size)
+		adev->irq.ih_soft.enabled = true;
+
 	return 0;
 }
 
@@ -435,7 +372,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
 static void navi10_ih_irq_disable(struct amdgpu_device *adev)
 {
 	force_update_wptr_for_self_int(adev, 0, 8, false);
-	navi10_ih_disable_interrupts(adev);
+	navi10_ih_toggle_interrupts(adev, false);
 
 	/* Wait and acknowledge irq */
 	mdelay(1);
@@ -455,23 +392,16 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev)
 static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
 			      struct amdgpu_ih_ring *ih)
 {
-	u32 wptr, reg, tmp;
+	u32 wptr, tmp;
+	struct amdgpu_ih_regs *ih_regs;
 
 	wptr = le32_to_cpu(*ih->wptr_cpu);
+	ih_regs = &ih->ih_regs;
 
 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
 		goto out;
 
-	if (ih == &adev->irq.ih)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
-	else if (ih == &adev->irq.ih1)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
-	else if (ih == &adev->irq.ih2)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
-	else
-		BUG();
-
-	wptr = RREG32_NO_KIQ(reg);
+	wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
 		goto out;
 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -486,67 +416,13 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
 		 wptr, ih->rptr, tmp);
 	ih->rptr = tmp;
 
-	if (ih == &adev->irq.ih)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
-	else if (ih == &adev->irq.ih1)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-	else if (ih == &adev->irq.ih2)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-	else
-		BUG();
-
-	tmp = RREG32_NO_KIQ(reg);
+	tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
-	WREG32_NO_KIQ(reg, tmp);
+	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 out:
 	return (wptr & ih->ptr_mask);
 }
 
-/**
- * navi10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void navi10_ih_decode_iv(struct amdgpu_device *adev,
-				struct amdgpu_ih_ring *ih,
-				struct amdgpu_iv_entry *entry)
-{
-	/* wptr/rptr are in bytes! */
-	u32 ring_index = ih->rptr >> 2;
-	uint32_t dw[8];
-
-	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
-	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
-	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
-	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
-	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
-	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
-	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
-	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
-	entry->client_id = dw[0] & 0xff;
-	entry->src_id = (dw[0] >> 8) & 0xff;
-	entry->ring_id = (dw[0] >> 16) & 0xff;
-	entry->vmid = (dw[0] >> 24) & 0xf;
-	entry->vmid_src = (dw[0] >> 31);
-	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
-	entry->timestamp_src = dw[2] >> 31;
-	entry->pasid = dw[3] & 0xffff;
-	entry->pasid_src = dw[3] >> 31;
-	entry->src_data[0] = dw[4];
-	entry->src_data[1] = dw[5];
-	entry->src_data[2] = dw[6];
-	entry->src_data[3] = dw[7];
-
-	/* wptr/rptr are in bytes! */
-	ih->rptr += 32;
-}
-
 /**
  * navi10_ih_irq_rearm - rearm IRQ if lost
  *
@@ -557,22 +433,15 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
 static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
 			       struct amdgpu_ih_ring *ih)
 {
-	uint32_t reg_rptr = 0;
 	uint32_t v = 0;
 	uint32_t i = 0;
+	struct amdgpu_ih_regs *ih_regs;
 
-	if (ih == &adev->irq.ih)
-		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
-	else if (ih == &adev->irq.ih1)
-		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
-	else if (ih == &adev->irq.ih2)
-		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
-	else
-		return;
+	ih_regs = &ih->ih_regs;
 
 	/* Rearm IRQ / re-write doorbell if doorbell write is lost */
 	for (i = 0; i < MAX_REARM_RETRY; i++) {
-		v = RREG32_NO_KIQ(reg_rptr);
+		v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
 		if ((v < ih->ring_size) && (v != ih->rptr))
 			WDOORBELL32(ih->doorbell_index, ih->rptr);
 		else
@@ -591,6 +460,8 @@ static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
 static void navi10_ih_set_rptr(struct amdgpu_device *adev,
 			       struct amdgpu_ih_ring *ih)
 {
+	struct amdgpu_ih_regs *ih_regs;
+
 	if (ih->use_doorbell) {
 		/* XXX check if swapping is necessary on BE */
 		*ih->rptr_cpu = ih->rptr;
@@ -598,12 +469,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
 
 		if (amdgpu_sriov_vf(adev))
 			navi10_ih_irq_rearm(adev, ih);
-	} else if (ih == &adev->irq.ih) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
-	} else if (ih == &adev->irq.ih1) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
-	} else if (ih == &adev->irq.ih2) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+	} else {
+		ih_regs = &ih->ih_regs;
+		WREG32(ih_regs->ih_rb_rptr, ih->rptr);
 	}
 }
 
@@ -685,23 +553,8 @@ static int navi10_ih_sw_init(void *handle)
 	adev->irq.ih1.ring_size = 0;
 	adev->irq.ih2.ring_size = 0;
 
-	if (adev->asic_type < CHIP_NAVI10) {
-		r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
-		if (r)
-			return r;
-
-		adev->irq.ih1.use_doorbell = true;
-		adev->irq.ih1.doorbell_index =
-					(adev->doorbell_index.ih + 1) << 1;
-
-		r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
-		if (r)
-			return r;
-
-		adev->irq.ih2.use_doorbell = true;
-		adev->irq.ih2.doorbell_index =
-					(adev->doorbell_index.ih + 2) << 1;
-	}
+	/* initialize ih control registers offset */
+	navi10_ih_init_register_offset(adev);
 
 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
 	if (r)
@@ -717,6 +570,7 @@ static int navi10_ih_sw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -848,7 +702,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs navi10_ih_funcs = {
 	.get_wptr = navi10_ih_get_wptr,
-	.decode_iv = navi10_ih_decode_iv,
+	.decode_iv = amdgpu_ih_decode_iv_helper,
 	.set_rptr = navi10_ih_set_rptr
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b5c3db16c2b01a74b25bfdccc557d2612fb01ca0..05ddec7ba7e2e3d7702be889616ff5bcfeea5c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -34,6 +34,14 @@
 #define smnCPM_CONTROL		0x11180460
 #define smnPCIE_CNTL2		0x11180070
 #define smnPCIE_LC_CNTL		0x11140280
+#define smnPCIE_LC_CNTL3	0x111402d4
+#define smnPCIE_LC_CNTL6	0x111402ec
+#define smnPCIE_LC_CNTL7	0x111402f0
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2	0x1014008c
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL	0x10123538
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP	0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2		0x111402c4
+#define smnNBIF_MGCG_CTRL_LCLK			0x1013a21c
 
 #define mmBIF_SDMA2_DOORBELL_RANGE		0x01d6
 #define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX	2
@@ -80,15 +88,6 @@ static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
-				struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg)
-		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-	else
-		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
 static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
 {
 	return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
@@ -359,6 +358,111 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
 		WREG32_PCIE(smnPCIE_LC_CNTL, data);
 }
 
+static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
+{
+	uint32_t def, data;
+
+	WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+	def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
+	data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+	if (def != data)
+		WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
+
+	def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+	data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+	if (def != data)
+		WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+	def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+	data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+	if (def != data)
+		WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+{
+	uint32_t def, data;
+
+	def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+	data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+	data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+	data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+	if (def != data)
+		WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+	def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+	data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+	if (def != data)
+		WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+	def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+	data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+	if (def != data)
+		WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+	def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+	data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+	if (def != data)
+		WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+	def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
+	data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+	data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+	if (def != data)
+		WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
+
+	def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
+	data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+	if (def != data)
+		WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+
+	def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+	data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+	if (def != data)
+		WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+	WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+	def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+	data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+		PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+	data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+	if (def != data)
+		WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+	def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+	data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+		PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+	if (def != data)
+		WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+	nbio_v2_3_program_ltr(adev);
+
+	def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
+	data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+	data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+	if (def != data)
+		WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
+
+	def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
+	data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+	if (def != data)
+		WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+
+	def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+	data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+	data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+	data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+	if (def != data)
+		WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+	def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+	data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+	if (def != data)
+		WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
 const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
 	.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
 	.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
@@ -366,7 +470,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
 	.get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
 	.get_rev_id = nbio_v2_3_get_rev_id,
 	.mc_access_enable = nbio_v2_3_mc_access_enable,
-	.hdp_flush = nbio_v2_3_hdp_flush,
 	.get_memsize = nbio_v2_3_get_memsize,
 	.sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
 	.vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
@@ -380,4 +483,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
 	.init_registers = nbio_v2_3_init_registers,
 	.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
 	.enable_aspm = nbio_v2_3_enable_aspm,
+	.program_aspm =  nbio_v2_3_program_aspm,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index d2f1fe55d3888038f1e6567bb62bd38a639da801..83ea063388fddda6ab3d3090fc040d95d429d1e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -29,6 +29,15 @@
 #include "nbio/nbio_6_1_sh_mask.h"
 #include "nbio/nbio_6_1_smn.h"
 #include "vega10_enum.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
+{
+	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
 
 static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
 {
@@ -50,18 +59,6 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
-				struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg)
-		WREG32_SOC15_NO_KIQ(NBIO, 0,
-				    mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
-				    0);
-	else
-		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
-			NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
-}
-
 static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
 {
 	return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
@@ -266,7 +263,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
 	.get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
 	.get_rev_id = nbio_v6_1_get_rev_id,
 	.mc_access_enable = nbio_v6_1_mc_access_enable,
-	.hdp_flush = nbio_v6_1_hdp_flush,
 	.get_memsize = nbio_v6_1_get_memsize,
 	.sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
 	.enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
@@ -277,4 +273,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
 	.get_clockgating_state = nbio_v6_1_get_clockgating_state,
 	.ih_control = nbio_v6_1_ih_control,
 	.init_registers = nbio_v6_1_init_registers,
+	.remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index ae685813c419db8ad0e4abf60f265a59feb07296..3c00666a13e16b45213a98da1aa8151628640d5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -60,15 +60,6 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
-				struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg)
-		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-	else
-		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
 static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
 {
 	return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -292,7 +283,6 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
 	.get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
 	.get_rev_id = nbio_v7_0_get_rev_id,
 	.mc_access_enable = nbio_v7_0_mc_access_enable,
-	.hdp_flush = nbio_v7_0_hdp_flush,
 	.get_memsize = nbio_v7_0_get_memsize,
 	.sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
 	.vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index aa36022670f9dd2cb4bc3b6f75c22b02fc983e1a..598ce0e9362772a7eb0f26057be9feaadf424215 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -56,15 +56,6 @@ static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
 }
 
-static void nbio_v7_2_hdp_flush(struct amdgpu_device *adev,
-				struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg)
-		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-	else
-		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
 static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
 {
 	return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
@@ -325,7 +316,6 @@ const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
 	.get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset,
 	.get_rev_id = nbio_v7_2_get_rev_id,
 	.mc_access_enable = nbio_v7_2_mc_access_enable,
-	.hdp_flush = nbio_v7_2_hdp_flush,
 	.get_memsize = nbio_v7_2_get_memsize,
 	.sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range,
 	.vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index eadc9526d33fe6418c2c0606a6e90e9a194c9f5b..4bc1d1434065f097db3523f6a47767e120206cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -82,15 +82,6 @@ static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
-				struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg)
-		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-	else
-		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
 static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
 {
 	return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -541,7 +532,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
 	.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
 	.get_rev_id = nbio_v7_4_get_rev_id,
 	.mc_access_enable = nbio_v7_4_mc_access_enable,
-	.hdp_flush = nbio_v7_4_hdp_flush,
 	.get_memsize = nbio_v7_4_get_memsize,
 	.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
 	.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6bee3677394ac011131369778f10a4dcd56668c1..160fa5f598053b28f7d5afafd0c3e0d97e27cb1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -38,9 +38,6 @@
 
 #include "gc/gc_10_1_0_offset.h"
 #include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
-#include "smuio/smuio_11_0_0_offset.h"
 #include "mp/mp_11_0_offset.h"
 
 #include "soc15.h"
@@ -50,6 +47,7 @@
 #include "mmhub_v2_0.h"
 #include "nbio_v2_3.h"
 #include "nbio_v7_2.h"
+#include "hdp_v5_0.h"
 #include "nv.h"
 #include "navi10_ih.h"
 #include "gfx_v10_0.h"
@@ -62,6 +60,8 @@
 #include "dce_virtual.h"
 #include "mes_v10_1.h"
 #include "mxgpu_nv.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
 
 static const struct amd_ip_funcs nv_common_ip_funcs;
 
@@ -203,6 +203,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
 {
 	u32 *dw_ptr;
 	u32 i, length_dw;
+	u32 rom_index_offset, rom_data_offset;
 
 	if (bios == NULL)
 		return false;
@@ -215,11 +216,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
 	dw_ptr = (u32 *)bios;
 	length_dw = ALIGN(length_bytes, 4) / 4;
 
+	rom_index_offset =
+		adev->smuio.funcs->get_rom_index_offset(adev);
+	rom_data_offset =
+		adev->smuio.funcs->get_rom_data_offset(adev);
+
 	/* set rom index to 0 */
-	WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+	WREG32(rom_index_offset, 0);
 	/* read out the rom data */
 	for (i = 0; i < length_dw; i++)
-		dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+		dw_ptr[i] = RREG32(rom_data_offset);
 
 	return true;
 }
@@ -336,6 +342,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
 	return ret;
 }
 
+static int nv_asic_mode2_reset(struct amdgpu_device *adev)
+{
+	u32 i;
+	int ret = 0;
+
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+	/* disable BM */
+	pci_clear_master(adev->pdev);
+
+	amdgpu_device_cache_pci_state(adev->pdev);
+
+	ret = amdgpu_dpm_mode2_reset(adev);
+	if (ret)
+		dev_err(adev->dev, "GPU mode2 reset failed\n");
+
+	amdgpu_device_load_pci_state(adev->pdev);
+
+	/* wait for asic to come out of reset */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+		if (memsize != 0xffffffff)
+			break;
+		udelay(1);
+	}
+
+	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+	return ret;
+}
+
 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
 {
 	struct smu_context *smu = &adev->smu;
@@ -352,7 +390,9 @@ nv_asic_reset_method(struct amdgpu_device *adev)
 	struct smu_context *smu = &adev->smu;
 
 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
-	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
+	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
+	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
 		return amdgpu_reset_method;
 
 	if (amdgpu_reset_method != -1)
@@ -360,6 +400,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
 				  amdgpu_reset_method);
 
 	switch (adev->asic_type) {
+	case CHIP_VANGOGH:
+		return AMD_RESET_METHOD_MODE2;
 	case CHIP_SIENNA_CICHLID:
 	case CHIP_NAVY_FLOUNDER:
 	case CHIP_DIMGREY_CAVEFISH:
@@ -377,7 +419,16 @@ static int nv_asic_reset(struct amdgpu_device *adev)
 	int ret = 0;
 	struct smu_context *smu = &adev->smu;
 
-	if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+	/* skip reset on vangogh for now */
+	if (adev->asic_type == CHIP_VANGOGH)
+		return 0;
+
+	switch (nv_asic_reset_method(adev)) {
+	case AMD_RESET_METHOD_PCI:
+		dev_info(adev->dev, "PCI reset\n");
+		ret = amdgpu_device_pci_reset(adev);
+		break;
+	case AMD_RESET_METHOD_BACO:
 		dev_info(adev->dev, "BACO reset\n");
 
 		ret = smu_baco_enter(smu);
@@ -386,9 +437,15 @@ static int nv_asic_reset(struct amdgpu_device *adev)
 		ret = smu_baco_exit(smu);
 		if (ret)
 			return ret;
-	} else {
+		break;
+	case AMD_RESET_METHOD_MODE2:
+		dev_info(adev->dev, "MODE2 reset\n");
+		ret = nv_asic_mode2_reset(adev);
+		break;
+	default:
 		dev_info(adev->dev, "MODE1 reset\n");
 		ret = nv_asic_mode1_reset(adev);
+		break;
 	}
 
 	return ret;
@@ -423,11 +480,14 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
 
 static void nv_program_aspm(struct amdgpu_device *adev)
 {
-
-	if (amdgpu_aspm == 0)
+	if (amdgpu_aspm != 1)
 		return;
 
-	/* todo */
+	if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
+	    !(adev->flags & AMD_IS_APU) &&
+	    (adev->nbio.funcs->program_aspm))
+		adev->nbio.funcs->program_aspm(adev);
+
 }
 
 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
@@ -514,6 +574,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 		adev->nbio.funcs = &nbio_v2_3_funcs;
 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 	}
+	adev->hdp.funcs = &hdp_v5_0_funcs;
+
+	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+		adev->smuio.funcs = &smuio_v11_0_6_funcs;
+	else
+		adev->smuio.funcs = &smuio_v11_0_funcs;
 
 	if (adev->asic_type == CHIP_SIENNA_CICHLID)
 		adev->gmc.xgmi.supported = true;
@@ -669,22 +735,6 @@ static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
 	return adev->nbio.funcs->get_rev_id(adev);
 }
 
-static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
-	adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void nv_invalidate_hdp(struct amdgpu_device *adev,
-				struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg) {
-		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
-	} else {
-		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
-					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-	}
-}
-
 static bool nv_need_full_reset(struct amdgpu_device *adev)
 {
 	return true;
@@ -768,10 +818,10 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
 	 * The ASPM function is not fully enabled and verified on
 	 * Navi yet. Temporarily skip this until ASPM enabled.
 	 */
-#if 0
-	if (adev->nbio.funcs->enable_aspm)
+	if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
+	    !(adev->flags & AMD_IS_APU) &&
+	    (adev->nbio.funcs->enable_aspm))
 		adev->nbio.funcs->enable_aspm(adev, !enter);
-#endif
 
 	return 0;
 }
@@ -788,8 +838,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
 	.set_uvd_clocks = &nv_set_uvd_clocks,
 	.set_vce_clocks = &nv_set_vce_clocks,
 	.get_config_memsize = &nv_get_config_memsize,
-	.flush_hdp = &nv_flush_hdp,
-	.invalidate_hdp = &nv_invalidate_hdp,
 	.init_doorbell_index = &nv_init_doorbell_index,
 	.need_full_reset = &nv_need_full_reset,
 	.need_reset_on_init = &nv_need_reset_on_init,
@@ -1080,120 +1128,6 @@ static int nv_common_soft_reset(void *handle)
 	return 0;
 }
 
-static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
-					   bool enable)
-{
-	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
-	uint32_t hdp_mem_pwr_cntl;
-
-	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
-				AMD_CG_SUPPORT_HDP_DS |
-				AMD_CG_SUPPORT_HDP_SD)))
-		return;
-
-	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
-	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
-
-	/* Before doing clock/power mode switch,
-	 * forced on IPH & RC clock */
-	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
-				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
-	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
-				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
-	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-
-	/* HDP 5.0 doesn't support dynamic power mode switch,
-	 * disable clock and power gating before any changing */
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 IPH_MEM_POWER_CTRL_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 IPH_MEM_POWER_LS_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 IPH_MEM_POWER_DS_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 IPH_MEM_POWER_SD_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 RC_MEM_POWER_CTRL_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 RC_MEM_POWER_LS_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 RC_MEM_POWER_DS_EN, 0);
-	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-					 RC_MEM_POWER_SD_EN, 0);
-	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
-	/* only one clock gating mode (LS/DS/SD) can be enabled */
-	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
-						 HDP_MEM_POWER_CTRL,
-						 IPH_MEM_POWER_LS_EN, enable);
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
-						 HDP_MEM_POWER_CTRL,
-						 RC_MEM_POWER_LS_EN, enable);
-	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
-						 HDP_MEM_POWER_CTRL,
-						 IPH_MEM_POWER_DS_EN, enable);
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
-						 HDP_MEM_POWER_CTRL,
-						 RC_MEM_POWER_DS_EN, enable);
-	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
-						 HDP_MEM_POWER_CTRL,
-						 IPH_MEM_POWER_SD_EN, enable);
-		/* RC should not use shut down mode, fallback to ds */
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
-						 HDP_MEM_POWER_CTRL,
-						 RC_MEM_POWER_DS_EN, enable);
-	}
-
-	/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
-	 * be set for SRAM LS/DS/SD */
-	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
-							AMD_CG_SUPPORT_HDP_SD)) {
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-						IPH_MEM_POWER_CTRL_EN, 1);
-		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
-						RC_MEM_POWER_CTRL_EN, 1);
-	}
-
-	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
-	/* restore IPH & RC clock override after clock/power mode changing */
-	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
-}
-
-static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
-				       bool enable)
-{
-	uint32_t hdp_clk_cntl;
-
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
-		return;
-
-	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
-
-	if (enable) {
-		hdp_clk_cntl &=
-			~(uint32_t)
-			  (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
-			   HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
-			   HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
-			   HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
-			   HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
-			   HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
-	} else {
-		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
-			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
-			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
-			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
-			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
-			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
-	}
-
-	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-}
-
 static int nv_common_set_clockgating_state(void *handle,
 					   enum amd_clockgating_state state)
 {
@@ -1213,9 +1147,9 @@ static int nv_common_set_clockgating_state(void *handle,
 				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
 				state == AMD_CG_STATE_GATE);
-		nv_update_hdp_mem_power_gating(adev,
-				   state == AMD_CG_STATE_GATE);
-		nv_update_hdp_clock_gating(adev,
+		adev->hdp.funcs->update_clock_gating(adev,
+				state == AMD_CG_STATE_GATE);
+		adev->smuio.funcs->update_rom_clock_gating(adev,
 				state == AMD_CG_STATE_GATE);
 		break;
 	default:
@@ -1234,31 +1168,15 @@ static int nv_common_set_powergating_state(void *handle,
 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	uint32_t tmp;
 
 	if (amdgpu_sriov_vf(adev))
 		*flags = 0;
 
 	adev->nbio.funcs->get_clockgating_state(adev, flags);
 
-	/* AMD_CG_SUPPORT_HDP_MGCG */
-	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
-	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
-		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
-		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
-		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
-		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
-		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
-		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
-
-	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
-	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
-	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
-		*flags |= AMD_CG_SUPPORT_HDP_LS;
-	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
-		*flags |= AMD_CG_SUPPORT_HDP_DS;
-	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
-		*flags |= AMD_CG_SUPPORT_HDP_SD;
+	adev->hdp.funcs->get_clock_gating_state(adev, flags);
+
+	adev->smuio.funcs->get_clock_gating_state(adev, flags);
 
 	return;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index d7f92634eba271e40e5c1e219dfbb28a563a0bbb..4b1cc5e9ee926472d52ca02956312562309abff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -92,8 +92,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
 			(uint8_t *)ta_hdr +
 			le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
-		adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
 		adev->psp.ta_dtm_ucode_version =
 			le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
 		adev->psp.ta_dtm_ucode_size =
@@ -101,6 +99,16 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
 		adev->psp.ta_dtm_start_addr =
 			(uint8_t *)adev->psp.ta_hdcp_start_addr +
 			le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+
+		adev->psp.ta_securedisplay_ucode_version =
+			le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
+		adev->psp.ta_securedisplay_ucode_size =
+			le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
+		adev->psp.ta_securedisplay_start_addr =
+			(uint8_t *)adev->psp.ta_hdcp_start_addr +
+			le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+
+		adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd4248c93c49ff5a59d2f255a1a543524805bd66..c325d6f53a71eed8932bb40c8d42878e4a262982 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -392,37 +392,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_v11_0_reroute_ih(struct psp_context *psp)
-{
-	struct amdgpu_device *adev = psp->adev;
-	uint32_t tmp;
-
-	/* Change IH ring for VMC */
-	tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
-	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
-	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
-	mdelay(20);
-	psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-		     0x80000000, 0x8000FFFF, false);
-
-	/* Change IH ring for UMC */
-	tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
-	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
-	mdelay(20);
-	psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-		     0x80000000, 0x8000FFFF, false);
-}
-
 static int psp_v11_0_ring_init(struct psp_context *psp,
 			      enum psp_ring_type ring_type)
 {
@@ -430,11 +399,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
 	struct psp_ring *ring;
 	struct amdgpu_device *adev = psp->adev;
 
-	if ((!amdgpu_sriov_vf(adev)) &&
-	    !(adev->asic_type >= CHIP_SIENNA_CICHLID &&
-	    adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
-		psp_v11_0_reroute_ih(psp);
-
 	ring = &psp->km_ring;
 
 	ring->ring_type = ring_type;
@@ -726,7 +690,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 		}
 
 		memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
-		adev->nbio.funcs->hdp_flush(adev, NULL);
+		adev->hdp.funcs->flush_hdp(adev, NULL);
 		vfree(buf);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ce56e93c68867afddc126f14e40963a67bb6b921..c8c22c1d1e65546613a433a8f0853d0da6c6c455 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -46,7 +46,6 @@
 #include "sdma6/sdma6_4_2_2_sh_mask.h"
 #include "sdma7/sdma7_4_2_2_offset.h"
 #include "sdma7/sdma7_4_2_2_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
 #include "sdma0/sdma0_4_1_default.h"
 
 #include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b208b81005bb72aa1c251afa2c5160d94af42c84..d345e324837ddf2152757e7119cf563882770ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -32,7 +32,6 @@
 
 #include "gc/gc_10_1_0_offset.h"
 #include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
 #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
 #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f1ba36a094daf0964d1d3d8ea5d1780710713c93..690a5090475a57d93e57eca61f181899dd9a1966 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -119,15 +119,7 @@ static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
 
 static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
 {
-	int i;
-
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		release_firmware(adev->sdma.instance[i].fw);
-		adev->sdma.instance[i].fw = NULL;
-
-		if (adev->asic_type == CHIP_SIENNA_CICHLID)
-			break;
-	}
+	release_firmware(adev->sdma.instance[0].fw);
 
 	memset((void *)adev->sdma.instance, 0,
 	       sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
@@ -185,23 +177,10 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
 	if (err)
 		goto out;
 
-	for (i = 1; i < adev->sdma.num_instances; i++) {
-		if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
-		    adev->asic_type <= CHIP_DIMGREY_CAVEFISH) {
-			memcpy((void *)&adev->sdma.instance[i],
-			       (void *)&adev->sdma.instance[0],
-			       sizeof(struct amdgpu_sdma_instance));
-		} else {
-			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
-			err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
-			if (err)
-				goto out;
-
-			err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
-			if (err)
-				goto out;
-		}
-	}
+	for (i = 1; i < adev->sdma.num_instances; i++)
+		memcpy((void *)&adev->sdma.instance[i],
+		       (void *)&adev->sdma.instance[0],
+		       sizeof(struct amdgpu_sdma_instance));
 
 	DRM_DEBUG("psp_load == '%s'\n",
 		  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 3cf0589bfea5e4ecf5bbf44b2701d0773a908ceb..6b5cf7882a12d03aa133ec30a5c1b0177162c6e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1270,7 +1270,7 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
 	u32 i;
 	int r = -EINVAL;
 
-	dev_info(adev->dev, "GPU pci config reset\n");
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	/* set mclk/sclk to bypass */
 	si_set_clk_bypass_mode(adev);
@@ -1294,20 +1294,6 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
 		}
 		udelay(1);
 	}
-
-	return r;
-}
-
-static int si_asic_reset(struct amdgpu_device *adev)
-{
-	int r;
-
-	dev_info(adev->dev, "PCI CONFIG reset\n");
-
-	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
-	r = si_gpu_pci_config_reset(adev);
-
 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return r;
@@ -1321,14 +1307,34 @@ static bool si_asic_supports_baco(struct amdgpu_device *adev)
 static enum amd_reset_method
 si_asic_reset_method(struct amdgpu_device *adev)
 {
-	if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
-	    amdgpu_reset_method != -1)
+	if (amdgpu_reset_method == AMD_RESET_METHOD_PCI)
+		return amdgpu_reset_method;
+	else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
+		 amdgpu_reset_method != -1)
 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
-				  amdgpu_reset_method);
+			 amdgpu_reset_method);
 
 	return AMD_RESET_METHOD_LEGACY;
 }
 
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+	int r;
+
+	switch (si_asic_reset_method(adev)) {
+	case AMD_RESET_METHOD_PCI:
+		dev_info(adev->dev, "PCI reset\n");
+		r = amdgpu_device_pci_reset(adev);
+		break;
+	default:
+		dev_info(adev->dev, "PCI CONFIG reset\n");
+		r = si_gpu_pci_config_reset(adev);
+		break;
+	}
+
+	return r;
+}
+
 static u32 si_get_config_memsize(struct amdgpu_device *adev)
 {
 	return RREG32(mmCONFIG_MEMSIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
new file mode 100644
index 0000000000000000000000000000000000000000..3a18dbb55c32815b4ace96a3417f87fa15e7529b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v11_0_6.h"
+#include "smuio/smuio_11_0_6_offset.h"
+#include "smuio/smuio_11_0_6_sh_mask.h"
+
+static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev)
+{
+	return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+}
+
+static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev)
+{
+	return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+}
+
+static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+	u32 def, data;
+
+	/* enable/disable ROM CG is not supported on APU */
+	if (adev->flags & AMD_IS_APU)
+		return;
+
+	def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
+	else
+		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
+
+	if (def != data)
+		WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
+}
+
+static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+{
+	u32 data;
+
+	/* CGTT_ROM_CLK_CTRL0 is not available for APU */
+	if (adev->flags & AMD_IS_APU)
+		return;
+
+	data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
+	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
+		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
+}
+
+const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = {
+	.get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset,
+	.get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset,
+	.update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating,
+	.get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
new file mode 100644
index 0000000000000000000000000000000000000000..3c3f4ab0bc9b8e9a1969b403e43d37ce669a7ab5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V11_0_6_H__
+#define __SMUIO_V11_0_6_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs;
+
+#endif /* __SMUIO_V11_0_6_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0b3516c4eefb33881c235ba44d8338c0cef2515e..1221aa6b40a9f53d8b7c18d88727ad9815cebfd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -40,8 +40,6 @@
 #include "gc/gc_9_0_sh_mask.h"
 #include "sdma0/sdma0_4_0_offset.h"
 #include "sdma1/sdma1_4_0_offset.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
 #include "nbio/nbio_7_0_default.h"
 #include "nbio/nbio_7_0_offset.h"
 #include "nbio/nbio_7_0_sh_mask.h"
@@ -59,7 +57,9 @@
 #include "nbio_v6_1.h"
 #include "nbio_v7_0.h"
 #include "nbio_v7_4.h"
+#include "hdp_v4_0.h"
 #include "vega10_ih.h"
+#include "vega20_ih.h"
 #include "navi10_ih.h"
 #include "sdma_v4_0.h"
 #include "uvd_v7_0.h"
@@ -83,14 +83,6 @@
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
 
-/* for Vega20 register name change */
-#define mmHDP_MEM_POWER_CTRL	0x00d4
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK	0x00000001L
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK	0x00000002L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK	0x00010000L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK		0x00020000L
-#define mmHDP_MEM_POWER_CTRL_BASE_IDX	0
-
 /*
  * Indirect registers accessor
  */
@@ -241,6 +233,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
 {
 	u32 reference_clock = adev->clock.spll.reference_freq;
 
+	if (adev->asic_type == CHIP_RENOIR)
+		return 10000;
 	if (adev->asic_type == CHIP_RAVEN)
 		return reference_clock / 4;
 
@@ -487,7 +481,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 
 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
-		amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
+	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
 		return amdgpu_reset_method;
 
 	if (amdgpu_reset_method != -1)
@@ -532,15 +527,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 		return 0;
 
 	switch (soc15_asic_reset_method(adev)) {
-		case AMD_RESET_METHOD_BACO:
-			dev_info(adev->dev, "BACO reset\n");
-			return soc15_asic_baco_reset(adev);
-		case AMD_RESET_METHOD_MODE2:
-			dev_info(adev->dev, "MODE2 reset\n");
-			return amdgpu_dpm_mode2_reset(adev);
-		default:
-			dev_info(adev->dev, "MODE1 reset\n");
-			return soc15_asic_mode1_reset(adev);
+	case AMD_RESET_METHOD_PCI:
+		dev_info(adev->dev, "PCI reset\n");
+		return amdgpu_device_pci_reset(adev);
+	case AMD_RESET_METHOD_BACO:
+		dev_info(adev->dev, "BACO reset\n");
+		return soc15_asic_baco_reset(adev);
+	case AMD_RESET_METHOD_MODE2:
+		dev_info(adev->dev, "MODE2 reset\n");
+		return amdgpu_dpm_mode2_reset(adev);
+	default:
+		dev_info(adev->dev, "MODE1 reset\n");
+		return soc15_asic_mode1_reset(adev);
 	}
 }
 
@@ -699,6 +697,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		adev->nbio.funcs = &nbio_v6_1_funcs;
 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
 	}
+	adev->hdp.funcs = &hdp_v4_0_funcs;
 
 	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
 		adev->df.funcs = &df_v3_6_funcs;
@@ -729,12 +728,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 			}
 			if (adev->asic_type == CHIP_VEGA20)
-				amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+				amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
 			else
 				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 		} else {
 			if (adev->asic_type == CHIP_VEGA20)
-				amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+				amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
 			else
 				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
@@ -787,9 +786,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		if (amdgpu_sriov_vf(adev)) {
 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
-			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
 		} else {
-			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 		}
@@ -834,35 +833,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
-	adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void soc15_invalidate_hdp(struct amdgpu_device *adev,
-				 struct amdgpu_ring *ring)
-{
-	if (!ring || !ring->funcs->emit_wreg)
-		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
-	else
-		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
-			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-}
-
 static bool soc15_need_full_reset(struct amdgpu_device *adev)
 {
 	/* change this when we implement soft reset */
 	return true;
 }
 
-static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
-{
-	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
-		return;
-	/*read back hdp ras counter to reset it to 0 */
-	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
-}
-
 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
 				 uint64_t *count1)
 {
@@ -1011,8 +987,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
 	.set_uvd_clocks = &soc15_set_uvd_clocks,
 	.set_vce_clocks = &soc15_set_vce_clocks,
 	.get_config_memsize = &soc15_get_config_memsize,
-	.flush_hdp = &soc15_flush_hdp,
-	.invalidate_hdp = &soc15_invalidate_hdp,
 	.need_full_reset = &soc15_need_full_reset,
 	.init_doorbell_index = &vega10_doorbell_index_init,
 	.get_pcie_usage = &soc15_get_pcie_usage,
@@ -1034,9 +1008,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
 	.set_uvd_clocks = &soc15_set_uvd_clocks,
 	.set_vce_clocks = &soc15_set_vce_clocks,
 	.get_config_memsize = &soc15_get_config_memsize,
-	.flush_hdp = &soc15_flush_hdp,
-	.invalidate_hdp = &soc15_invalidate_hdp,
-	.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
 	.need_full_reset = &soc15_need_full_reset,
 	.init_doorbell_index = &vega20_doorbell_index_init,
 	.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1294,9 +1265,8 @@ static int soc15_common_late_init(void *handle)
 	if (amdgpu_sriov_vf(adev))
 		xgpu_ai_mailbox_get_irq(adev);
 
-	if (adev->asic_funcs &&
-	    adev->asic_funcs->reset_hdp_ras_error_count)
-		adev->asic_funcs->reset_hdp_ras_error_count(adev);
+	if (adev->hdp.funcs->reset_ras_error_count)
+		adev->hdp.funcs->reset_ras_error_count(adev);
 
 	if (adev->nbio.funcs->ras_late_init)
 		r = adev->nbio.funcs->ras_late_init(adev);
@@ -1422,41 +1392,6 @@ static int soc15_common_soft_reset(void *handle)
 	return 0;
 }
 
-static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
-{
-	uint32_t def, data;
-
-	if (adev->asic_type == CHIP_VEGA20 ||
-		adev->asic_type == CHIP_ARCTURUS ||
-		adev->asic_type == CHIP_RENOIR) {
-		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
-
-		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
-			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
-				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
-				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
-				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
-		else
-			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
-				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
-				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
-				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
-
-		if (def != data)
-			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
-	} else {
-		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
-
-		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
-			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-		else
-			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-
-		if (def != data)
-			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
-	}
-}
-
 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
 {
 	uint32_t def, data;
@@ -1517,7 +1452,7 @@ static int soc15_common_set_clockgating_state(void *handle,
 				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
 				state == AMD_CG_STATE_GATE);
-		soc15_update_hdp_light_sleep(adev,
+		adev->hdp.funcs->update_clock_gating(adev,
 				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_clock_gating(adev,
 				state == AMD_CG_STATE_GATE);
@@ -1534,7 +1469,7 @@ static int soc15_common_set_clockgating_state(void *handle,
 				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
 				state == AMD_CG_STATE_GATE);
-		soc15_update_hdp_light_sleep(adev,
+		adev->hdp.funcs->update_clock_gating(adev,
 				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_clock_gating(adev,
 				state == AMD_CG_STATE_GATE);
@@ -1542,7 +1477,7 @@ static int soc15_common_set_clockgating_state(void *handle,
 				state == AMD_CG_STATE_GATE);
 		break;
 	case CHIP_ARCTURUS:
-		soc15_update_hdp_light_sleep(adev,
+		adev->hdp.funcs->update_clock_gating(adev,
 				state == AMD_CG_STATE_GATE);
 		break;
 	default:
@@ -1561,10 +1496,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
 
 	adev->nbio.funcs->get_clockgating_state(adev, flags);
 
-	/* AMD_CG_SUPPORT_HDP_LS */
-	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
-	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
-		*flags |= AMD_CG_SUPPORT_HDP_LS;
+	adev->hdp.funcs->get_clock_gating_state(adev, flags);
 
 	/* AMD_CG_SUPPORT_DRM_MGCG */
 	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
new file mode 100644
index 0000000000000000000000000000000000000000..5039375bb1d42b8dc27d261d93652df85ca86287
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_SECUREDISPLAY_IF_H
+#define _TA_SECUREDISPLAY_IF_H
+
+/** Secure Display related enumerations */
+/**********************************************************/
+
+/** @enum ta_securedisplay_command
+ *    Secure Display Command ID
+ */
+enum ta_securedisplay_command {
+	/* Query whether TA is responding used only for validation purpose */
+	TA_SECUREDISPLAY_COMMAND__QUERY_TA              = 1,
+	/* Send region of Interest and CRC value to I2C */
+	TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC          = 2,
+	/* Maximum Command ID */
+	TA_SECUREDISPLAY_COMMAND__MAX_ID                = 0x7FFFFFFF,
+};
+
+/** @enum ta_securedisplay_status
+ *    Secure Display status returns in shared buffer status
+ */
+enum ta_securedisplay_status {
+	TA_SECUREDISPLAY_STATUS__SUCCESS                 = 0x00,         /* Success */
+	TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE         = 0x01,         /* Generic Failure */
+	TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER       = 0x02,         /* Invalid Parameter */
+	TA_SECUREDISPLAY_STATUS__NULL_POINTER            = 0x03,         /* Null Pointer*/
+	TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR         = 0x04,         /* Fail to Write to I2C */
+	TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR  = 0x05, /*Fail Read DIO Scratch Register*/
+	TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR          = 0x06,         /* Fail to Read CRC*/
+
+	TA_SECUREDISPLAY_STATUS__MAX                     = 0x7FFFFFFF,/* Maximum Value for status*/
+};
+
+/** @enum ta_securedisplay_max_phy
+ *    Physical ID number to use for reading corresponding DIO Scratch register for ROI
+ */
+enum  ta_securedisplay_max_phy {
+	TA_SECUREDISPLAY_PHY0                           = 0,
+	TA_SECUREDISPLAY_PHY1                           = 1,
+	TA_SECUREDISPLAY_PHY2                           = 2,
+	TA_SECUREDISPLAY_PHY3                           = 3,
+	TA_SECUREDISPLAY_MAX_PHY                        = 4,
+};
+
+/** @enum ta_securedisplay_ta_query_cmd_ret
+ *    A predefined specific reteurn value which is 0xAB only used to validate
+ *    communication to Secure Display TA is functional.
+ *    This value is used to validate whether TA is responding successfully
+ */
+enum ta_securedisplay_ta_query_cmd_ret {
+	/* This is a value to validate if TA is loaded successfully */
+	TA_SECUREDISPLAY_QUERY_CMD_RET                 = 0xAB,
+};
+
+/** @enum ta_securedisplay_buffer_size
+ *    I2C Buffer size which contains 8 bytes of ROI  (X start, X end, Y start, Y end)
+ *    and 6 bytes of CRC( R,G,B) and 1  byte for physical ID
+ */
+enum ta_securedisplay_buffer_size {
+	/* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
+	TA_SECUREDISPLAY_I2C_BUFFER_SIZE                = 15,
+};
+
+/** Input/output structures for Secure Display commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+
+/** @struct ta_securedisplay_send_roi_crc_input
+ *    Physical ID to determine which DIO scratch register should be used to get ROI
+ */
+struct ta_securedisplay_send_roi_crc_input {
+	uint32_t  phy_id;  /* Physical ID */
+};
+
+/** @union ta_securedisplay_cmd_input
+ *    Input buffer
+ */
+union ta_securedisplay_cmd_input {
+	/* send ROI and CRC input buffer format */
+	struct ta_securedisplay_send_roi_crc_input        send_roi_crc;
+	uint32_t                                          reserved[4];
+};
+
+/**
+ * Output structures
+ */
+
+/** @struct ta_securedisplay_query_ta_output
+ *  Output buffer format for query TA whether TA is responding used only for validation purpose
+ */
+struct ta_securedisplay_query_ta_output {
+	/* return value from TA when it is queried for validation purpose only */
+	uint32_t  query_cmd_ret;
+};
+
+/** @struct ta_securedisplay_send_roi_crc_output
+ *  Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA
+ *  and used to write to I2C used only for validation purpose
+ */
+struct ta_securedisplay_send_roi_crc_output {
+	uint8_t  i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE];  /* I2C buffer */
+	uint8_t  reserved;
+};
+
+/** @union ta_securedisplay_cmd_output
+ *    Output buffer
+ */
+union ta_securedisplay_cmd_output {
+	/* Query TA output buffer format used only for validation purpose*/
+	struct ta_securedisplay_query_ta_output            query_ta;
+	/* Send ROI CRC output buffer format used only for validation purpose */
+	struct ta_securedisplay_send_roi_crc_output        send_roi_crc;
+	uint32_t                                           reserved[4];
+};
+
+/** @struct securedisplay_cmd
+ *    Secure Display Command which is shared buffer memory
+ */
+struct securedisplay_cmd {
+	uint32_t                             cmd_id;                    /* +0  Bytes Command ID */
+	enum ta_securedisplay_status         status;     /* +4  Bytes Status of Secure Display TA */
+	uint32_t                             reserved[2];               /* +8  Bytes Reserved */
+	union ta_securedisplay_cmd_input     securedisplay_in_message;  /* +16 Bytes Input Buffer */
+	union ta_securedisplay_cmd_output    securedisplay_out_message;/* +32 Bytes Output Buffer */
+	/**@note Total 48 Bytes */
+};
+
+#endif   //_TA_SECUREDISPLAY_IF_H
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index ce3319993b4bd4553025a34a92cb41e7280b483c..249fcbee7871cc6525858a8b8284fde598bfb719 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -196,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
 
 	wptr = le32_to_cpu(*ih->wptr_cpu);
 
-	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
-		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
-		/* When a ring buffer overflow happen start parsing interrupt
-		 * from the last not overwritten vector (wptr + 16). Hopefully
-		 * this should allow us to catchup.
-		 */
-		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
-			 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
-		ih->rptr = (wptr + 16) & ih->ptr_mask;
-		tmp = RREG32(mmIH_RB_CNTL);
-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
-		WREG32(mmIH_RB_CNTL, tmp);
-	}
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	/* Double check that the overflow wasn't already cleared. */
+	wptr = RREG32(mmIH_RB_WPTR);
+
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+	/* When a ring buffer overflow happen start parsing interrupt
+	 * from the last not overwritten vector (wptr + 16). Hopefully
+	 * this should allow us to catchup.
+	 */
+
+	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+	ih->rptr = (wptr + 16) & ih->ptr_mask;
+	tmp = RREG32(mmIH_RB_CNTL);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+	WREG32(mmIH_RB_CNTL, tmp);
+
+out:
 	return (wptr & ih->ptr_mask);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 312ecf6d24a042d4bbd05d1e0962e158f9d6e5f2..7cd67cb2ac5f114e945b16041e79fbef6c5ab465 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -36,7 +36,6 @@
 #include "vce/vce_4_0_default.h"
 #include "vce/vce_4_0_sh_mask.h"
 #include "nbif/nbif_6_1_offset.h"
-#include "hdp/hdp_4_0_offset.h"
 #include "mmhub/mmhub_1_0_offset.h"
 #include "mmhub/mmhub_1_0_sh_mask.h"
 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c734e31a9e653cbdfbf05ba3e6903ef6e1ff2ab6..6117931fa8d798275219aceea41eee0ed15db652 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -32,7 +32,6 @@
 
 #include "vcn/vcn_1_0_offset.h"
 #include "vcn/vcn_1_0_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
 #include "mmhub/mmhub_9_1_offset.h"
 #include "mmhub/mmhub_9_1_sh_mask.h"
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index e5ae31eb744ed95a155a9ac5d30abb0ad9e6cbbf..88626d83e07bebd5eeec19fba53e4b3d0dfdb6ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -38,132 +38,120 @@
 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 
 /**
- * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * vega10_ih_init_register_offset - Initialize register offset for ih rings
  *
  * @adev: amdgpu_device pointer
  *
- * Enable the interrupt ring buffer (VEGA10).
+ * Initialize register offset ih rings (VEGA10).
  */
-static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+static void vega10_ih_init_register_offset(struct amdgpu_device *adev)
 {
-	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
-	if (amdgpu_sriov_vf(adev)) {
-		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
-			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-			return;
-		}
-	} else {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+	struct amdgpu_ih_regs *ih_regs;
+
+	if (adev->irq.ih.ring_size) {
+		ih_regs = &adev->irq.ih.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+		ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+		ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
 	}
-	adev->irq.ih.enabled = true;
 
 	if (adev->irq.ih1.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
-					   RB_ENABLE, 1);
-		if (amdgpu_sriov_vf(adev)) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
-		}
-		adev->irq.ih1.enabled = true;
+		ih_regs = &adev->irq.ih1.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
 	}
 
 	if (adev->irq.ih2.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
-					   RB_ENABLE, 1);
-		if (amdgpu_sriov_vf(adev)) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
-		}
-		adev->irq.ih2.enabled = true;
+		ih_regs = &adev->irq.ih2.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
 	}
-
-	if (adev->irq.ih_soft.ring_size)
-		adev->irq.ih_soft.enabled = true;
 }
 
 /**
- * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * vega10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
  *
  * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
  *
- * Disable the interrupt ring buffer (VEGA10).
+ * Toggle the interrupt ring buffer (VEGA10)
  */
-static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+					    struct amdgpu_ih_ring *ih,
+					    bool enable)
 {
-	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
+	struct amdgpu_ih_regs *ih_regs;
+	uint32_t tmp;
 
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
+	ih_regs = &ih->ih_regs;
+
+	tmp = RREG32(ih_regs->ih_rb_cntl);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+	/* enable_intr field is only valid in ring0 */
+	if (ih == &adev->irq.ih)
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
 	if (amdgpu_sriov_vf(adev)) {
-		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
-			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-			return;
+		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+			return -ETIMEDOUT;
 		}
 	} else {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+		WREG32(ih_regs->ih_rb_cntl, tmp);
 	}
 
-	/* set rptr, wptr to 0 */
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-	adev->irq.ih.enabled = false;
-	adev->irq.ih.rptr = 0;
-
-	if (adev->irq.ih1.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
-					   RB_ENABLE, 0);
-		if (amdgpu_sriov_vf(adev)) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
-		}
+	if (enable) {
+		ih->enabled = true;
+	} else {
 		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
-		adev->irq.ih1.enabled = false;
-		adev->irq.ih1.rptr = 0;
+		WREG32(ih_regs->ih_rb_rptr, 0);
+		WREG32(ih_regs->ih_rb_wptr, 0);
+		ih->enabled = false;
+		ih->rptr = 0;
 	}
 
-	if (adev->irq.ih2.ring_size) {
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
-					   RB_ENABLE, 0);
-		if (amdgpu_sriov_vf(adev)) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-				return;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
-		}
+	return 0;
+}
 
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
-		adev->irq.ih2.enabled = false;
-		adev->irq.ih2.rptr = 0;
+/**
+ * vega10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA10).
+ */
+static int vega10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+	int i;
+	int r;
+
+	for (i = 0; i < ARRAY_SIZE(ih); i++) {
+		if (ih[i]->ring_size) {
+			r = vega10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+			if (r)
+				return r;
+		}
 	}
+
+	return 0;
 }
 
 static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -208,6 +196,58 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
 	return ih_doorbell_rtpr;
 }
 
+/**
+ * vega10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA10)
+ */
+static int vega10_ih_enable_ring(struct amdgpu_device *adev,
+				 struct amdgpu_ih_ring *ih)
+{
+	struct amdgpu_ih_regs *ih_regs;
+	uint32_t tmp;
+
+	ih_regs = &ih->ih_regs;
+
+	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+	WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+	WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+	tmp = RREG32(ih_regs->ih_rb_cntl);
+	tmp = vega10_ih_rb_cntl(ih, tmp);
+	if (ih == &adev->irq.ih)
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+	if (ih == &adev->irq.ih1) {
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+	}
+	if (amdgpu_sriov_vf(adev)) {
+		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+			return -ETIMEDOUT;
+		}
+	} else {
+		WREG32(ih_regs->ih_rb_cntl, tmp);
+	}
+
+	if (ih == &adev->irq.ih) {
+		/* set the ih ring 0 writeback address whether it's enabled or not */
+		WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+		WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+	}
+
+	/* set rptr, wptr to 0 */
+	WREG32(ih_regs->ih_rb_wptr, 0);
+	WREG32(ih_regs->ih_rb_rptr, 0);
+
+	WREG32(ih_regs->ih_doorbell_rptr, vega10_ih_doorbell_rptr(ih));
+
+	return 0;
+}
+
 /**
  * vega10_ih_irq_init - init and enable the interrupt ring
  *
@@ -221,116 +261,34 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
  */
 static int vega10_ih_irq_init(struct amdgpu_device *adev)
 {
-	struct amdgpu_ih_ring *ih;
-	u32 ih_rb_cntl, ih_chicken;
-	int ret = 0;
+	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+	u32 ih_chicken;
+	int ret;
+	int i;
 	u32 tmp;
 
 	/* disable irqs */
-	vega10_ih_disable_interrupts(adev);
+	ret = vega10_ih_toggle_interrupts(adev, false);
+	if (ret)
+		return ret;
 
 	adev->nbio.funcs->ih_control(adev);
 
-	ih = &adev->irq.ih;
-	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
-	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-	ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
-				   !!adev->irq.msi_enabled);
-	if (amdgpu_sriov_vf(adev)) {
-		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
-			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-			return -ETIMEDOUT;
-		}
-	} else {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-	}
-
-	if ((adev->asic_type == CHIP_ARCTURUS &&
-	     adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
-	    adev->asic_type == CHIP_RENOIR) {
+	if (adev->asic_type == CHIP_RENOIR) {
 		ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
 		if (adev->irq.ih.use_bus_addr) {
 			ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
 						   MC_SPACE_GPA_ENABLE, 1);
-		} else {
-			ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
-						   MC_SPACE_FBPA_ENABLE, 1);
 		}
 		WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
 	}
 
-	/* set the writeback address whether it's enabled or not */
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
-		     lower_32_bits(ih->wptr_addr));
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
-		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
-	/* set rptr, wptr to 0 */
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-
-	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
-		     vega10_ih_doorbell_rptr(ih));
-
-	ih = &adev->irq.ih1;
-	if (ih->ring_size) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
-			     (ih->gpu_addr >> 40) & 0xff);
-
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
-					   WPTR_OVERFLOW_ENABLE, 0);
-		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
-					   RB_FULL_DRAIN_ENABLE, 1);
-		if (amdgpu_sriov_vf(adev)) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-				return -ETIMEDOUT;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+	for (i = 0; i < ARRAY_SIZE(ih); i++) {
+		if (ih[i]->ring_size) {
+			ret = vega10_ih_enable_ring(adev, ih[i]);
+			if (ret)
+				return ret;
 		}
-
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
-		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
-			     vega10_ih_doorbell_rptr(ih));
-	}
-
-	ih = &adev->irq.ih2;
-	if (ih->ring_size) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
-			     (ih->gpu_addr >> 40) & 0xff);
-
-		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-
-		if (amdgpu_sriov_vf(adev)) {
-			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
-						ih_rb_cntl)) {
-				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-				return -ETIMEDOUT;
-			}
-		} else {
-			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
-		}
-
-		/* set rptr, wptr to 0 */
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
-		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
-			     vega10_ih_doorbell_rptr(ih));
 	}
 
 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -345,9 +303,14 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
 	pci_set_master(adev->pdev);
 
 	/* enable interrupts */
-	vega10_ih_enable_interrupts(adev);
+	ret = vega10_ih_toggle_interrupts(adev, true);
+	if (ret)
+		return ret;
 
-	return ret;
+	if (adev->irq.ih_soft.ring_size)
+		adev->irq.ih_soft.enabled = true;
+
+	return 0;
 }
 
 /**
@@ -359,7 +322,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
  */
 static void vega10_ih_irq_disable(struct amdgpu_device *adev)
 {
-	vega10_ih_disable_interrupts(adev);
+	vega10_ih_toggle_interrupts(adev, false);
 
 	/* Wait and acknowledge irq */
 	mdelay(1);
@@ -379,25 +342,17 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev)
 static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
 			      struct amdgpu_ih_ring *ih)
 {
-	u32 wptr, reg, tmp;
+	u32 wptr, tmp;
+	struct amdgpu_ih_regs *ih_regs;
 
 	wptr = le32_to_cpu(*ih->wptr_cpu);
+	ih_regs = &ih->ih_regs;
 
 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
 		goto out;
 
 	/* Double check that the overflow wasn't already cleared. */
-
-	if (ih == &adev->irq.ih)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
-	else if (ih == &adev->irq.ih1)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
-	else if (ih == &adev->irq.ih2)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
-	else
-		BUG();
-
-	wptr = RREG32_NO_KIQ(reg);
+	wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
 		goto out;
 
@@ -413,68 +368,14 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
 		 wptr, ih->rptr, tmp);
 	ih->rptr = tmp;
 
-	if (ih == &adev->irq.ih)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
-	else if (ih == &adev->irq.ih1)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
-	else if (ih == &adev->irq.ih2)
-		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
-	else
-		BUG();
-
-	tmp = RREG32_NO_KIQ(reg);
+	tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
-	WREG32_NO_KIQ(reg, tmp);
+	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 
 out:
 	return (wptr & ih->ptr_mask);
 }
 
-/**
- * vega10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void vega10_ih_decode_iv(struct amdgpu_device *adev,
-				struct amdgpu_ih_ring *ih,
-				struct amdgpu_iv_entry *entry)
-{
-	/* wptr/rptr are in bytes! */
-	u32 ring_index = ih->rptr >> 2;
-	uint32_t dw[8];
-
-	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
-	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
-	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
-	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
-	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
-	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
-	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
-	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
-	entry->client_id = dw[0] & 0xff;
-	entry->src_id = (dw[0] >> 8) & 0xff;
-	entry->ring_id = (dw[0] >> 16) & 0xff;
-	entry->vmid = (dw[0] >> 24) & 0xf;
-	entry->vmid_src = (dw[0] >> 31);
-	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
-	entry->timestamp_src = dw[2] >> 31;
-	entry->pasid = dw[3] & 0xffff;
-	entry->pasid_src = dw[3] >> 31;
-	entry->src_data[0] = dw[4];
-	entry->src_data[1] = dw[5];
-	entry->src_data[2] = dw[6];
-	entry->src_data[3] = dw[7];
-
-	/* wptr/rptr are in bytes! */
-	ih->rptr += 32;
-}
-
 /**
  * vega10_ih_irq_rearm - rearm IRQ if lost
  *
@@ -485,22 +386,14 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
 static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
 			       struct amdgpu_ih_ring *ih)
 {
-	uint32_t reg_rptr = 0;
 	uint32_t v = 0;
 	uint32_t i = 0;
+	struct amdgpu_ih_regs *ih_regs;
 
-	if (ih == &adev->irq.ih)
-		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
-	else if (ih == &adev->irq.ih1)
-		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
-	else if (ih == &adev->irq.ih2)
-		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
-	else
-		return;
-
+	ih_regs = &ih->ih_regs;
 	/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
 	for (i = 0; i < MAX_REARM_RETRY; i++) {
-		v = RREG32_NO_KIQ(reg_rptr);
+		v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
 		if ((v < ih->ring_size) && (v != ih->rptr))
 			WDOORBELL32(ih->doorbell_index, ih->rptr);
 		else
@@ -519,6 +412,8 @@ static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
 static void vega10_ih_set_rptr(struct amdgpu_device *adev,
 			       struct amdgpu_ih_ring *ih)
 {
+	struct amdgpu_ih_regs *ih_regs;
+
 	if (ih->use_doorbell) {
 		/* XXX check if swapping is necessary on BE */
 		*ih->rptr_cpu = ih->rptr;
@@ -526,12 +421,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
 
 		if (amdgpu_sriov_vf(adev))
 			vega10_ih_irq_rearm(adev, ih);
-	} else if (ih == &adev->irq.ih) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
-	} else if (ih == &adev->irq.ih1) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
-	} else if (ih == &adev->irq.ih2) {
-		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+	} else {
+		ih_regs = &ih->ih_regs;
+		WREG32(ih_regs->ih_rb_rptr, ih->rptr);
 	}
 }
 
@@ -600,19 +492,23 @@ static int vega10_ih_sw_init(void *handle)
 	adev->irq.ih.use_doorbell = true;
 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
 
-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
-	if (r)
-		return r;
+	if (!(adev->flags & AMD_IS_APU)) {
+		r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+		if (r)
+			return r;
 
-	adev->irq.ih1.use_doorbell = true;
-	adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+		adev->irq.ih1.use_doorbell = true;
+		adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
 
-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
-	if (r)
-		return r;
+		r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+		if (r)
+			return r;
 
-	adev->irq.ih2.use_doorbell = true;
-	adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+		adev->irq.ih2.use_doorbell = true;
+		adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+	}
+	/* initialize ih control registers offset */
+	vega10_ih_init_register_offset(adev);
 
 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
 	if (r)
@@ -628,6 +524,7 @@ static int vega10_ih_sw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -698,15 +595,11 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
 		def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
 		field_val = enable ? 0 : 1;
 		/**
-		 * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
-		 * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+		 * Vega10/12 and RAVEN don't have IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
 		 */
-		if (adev->asic_type > CHIP_VEGA10) {
-			data = REG_SET_FIELD(data, IH_CLK_CTRL,
-				     IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+		if (adev->asic_type == CHIP_RENOIR)
 			data = REG_SET_FIELD(data, IH_CLK_CTRL,
 				     IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
-		}
 
 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
 				     DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
@@ -759,7 +652,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
 	.get_wptr = vega10_ih_get_wptr,
-	.decode_iv = vega10_ih_decode_iv,
+	.decode_iv = amdgpu_ih_decode_iv_helper,
 	.set_rptr = vega10_ih_set_rptr
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
new file mode 100644
index 0000000000000000000000000000000000000000..5a3c867d58811437f0a06ceaa398d2e552ca12fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "soc15.h"
+
+#include "oss/osssys_4_2_0_offset.h"
+#include "oss/osssys_4_2_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "vega20_ih.h"
+
+#define MAX_REARM_RETRY 10
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * vega20_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (VEGA20).
+ */
+static void vega20_ih_init_register_offset(struct amdgpu_device *adev)
+{
+	struct amdgpu_ih_regs *ih_regs;
+
+	if (adev->irq.ih.ring_size) {
+		ih_regs = &adev->irq.ih.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+		ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+		ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+	}
+
+	if (adev->irq.ih1.ring_size) {
+		ih_regs = &adev->irq.ih1.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+	}
+
+	if (adev->irq.ih2.ring_size) {
+		ih_regs = &adev->irq.ih2.ih_regs;
+		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+	}
+}
+
+/**
+ * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (VEGA20)
+ */
+static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+					    struct amdgpu_ih_ring *ih,
+					    bool enable)
+{
+	struct amdgpu_ih_regs *ih_regs;
+	uint32_t tmp;
+
+	ih_regs = &ih->ih_regs;
+
+	tmp = RREG32(ih_regs->ih_rb_cntl);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+	/* enable_intr field is only valid in ring0 */
+	if (ih == &adev->irq.ih)
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+	if (amdgpu_sriov_vf(adev)) {
+		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+			return -ETIMEDOUT;
+		}
+	} else {
+		WREG32(ih_regs->ih_rb_cntl, tmp);
+	}
+
+	if (enable) {
+		ih->enabled = true;
+	} else {
+		/* set rptr, wptr to 0 */
+		WREG32(ih_regs->ih_rb_rptr, 0);
+		WREG32(ih_regs->ih_rb_wptr, 0);
+		ih->enabled = false;
+		ih->rptr = 0;
+	}
+
+	return 0;
+}
+
+/**
+ * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA20).
+ */
+static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+	int i;
+	int r;
+
+	for (i = 0; i < ARRAY_SIZE(ih); i++) {
+		if (ih[i]->ring_size) {
+			r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable);
+			if (r)
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+	int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+				   MC_SPACE, ih->use_bus_addr ? 1 : 4);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+				   WPTR_OVERFLOW_CLEAR, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+				   WPTR_OVERFLOW_ENABLE, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+	 * value is written to memory
+	 */
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+				   WPTR_WRITEBACK_ENABLE, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+	return ih_rb_cntl;
+}
+
+static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+	u32 ih_doorbell_rtpr = 0;
+
+	if (ih->use_doorbell) {
+		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+						 IH_DOORBELL_RPTR, OFFSET,
+						 ih->doorbell_index);
+		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+						 IH_DOORBELL_RPTR,
+						 ENABLE, 1);
+	} else {
+		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+						 IH_DOORBELL_RPTR,
+						 ENABLE, 0);
+	}
+	return ih_doorbell_rtpr;
+}
+
+/**
+ * vega20_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA20)
+ */
+static int vega20_ih_enable_ring(struct amdgpu_device *adev,
+				 struct amdgpu_ih_ring *ih)
+{
+	struct amdgpu_ih_regs *ih_regs;
+	uint32_t tmp;
+
+	ih_regs = &ih->ih_regs;
+
+	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+	WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+	WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+	tmp = RREG32(ih_regs->ih_rb_cntl);
+	tmp = vega20_ih_rb_cntl(ih, tmp);
+	if (ih == &adev->irq.ih)
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+	if (ih == &adev->irq.ih1) {
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+	}
+	if (amdgpu_sriov_vf(adev)) {
+		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+			return -ETIMEDOUT;
+		}
+	} else {
+		WREG32(ih_regs->ih_rb_cntl, tmp);
+	}
+
+	if (ih == &adev->irq.ih) {
+		/* set the ih ring 0 writeback address whether it's enabled or not */
+		WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+		WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+	}
+
+	/* set rptr, wptr to 0 */
+	WREG32(ih_regs->ih_rb_wptr, 0);
+	WREG32(ih_regs->ih_rb_rptr, 0);
+
+	WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih));
+
+	return 0;
+}
+
+/**
+ * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Reroute VMC and UMC interrupts on primary ih ring to
+ * ih ring 1 so they won't lose when bunches of page faults
+ * interrupts overwhelms the interrupt handler(VEGA20)
+ */
+static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
+{
+	uint32_t tmp;
+
+	/* vega20 ih reroute will go through psp
+	 * this function is only used for arcturus
+	 */
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		/* Reroute to IH ring 1 for VMC */
+		WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+		tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+		tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+		tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+		WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+		/* Reroute IH ring 1 for UTCL2 */
+		WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+		tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+		tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+		WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+	}
+}
+
+/**
+ * vega20_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int vega20_ih_irq_init(struct amdgpu_device *adev)
+{
+	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+	u32 ih_chicken;
+	int ret;
+	int i;
+	u32 tmp;
+
+	/* disable irqs */
+	ret = vega20_ih_toggle_interrupts(adev, false);
+	if (ret)
+		return ret;
+
+	adev->nbio.funcs->ih_control(adev);
+
+	if (adev->asic_type == CHIP_ARCTURUS &&
+	    adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+		ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+		if (adev->irq.ih.use_bus_addr) {
+			ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+						   MC_SPACE_GPA_ENABLE, 1);
+		}
+		WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ih); i++) {
+		if (ih[i]->ring_size) {
+			if (i == 1)
+				vega20_ih_reroute_ih(adev);
+			ret = vega20_ih_enable_ring(adev, ih[i]);
+			if (ret)
+				return ret;
+		}
+	}
+
+	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+			    CLIENT18_IS_STORM_CLIENT, 1);
+	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
+	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
+
+	pci_set_master(adev->pdev);
+
+	/* enable interrupts */
+	ret = vega20_ih_toggle_interrupts(adev, true);
+	if (ret)
+		return ret;
+
+	if (adev->irq.ih_soft.ring_size)
+		adev->irq.ih_soft.enabled = true;
+
+	return 0;
+}
+
+/**
+ * vega20_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VEGA20).
+ */
+static void vega20_ih_irq_disable(struct amdgpu_device *adev)
+{
+	vega20_ih_toggle_interrupts(adev, false);
+
+	/* Wait and acknowledge irq */
+	mdelay(1);
+}
+
+/**
+ * vega20_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VEGA20).  Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+			      struct amdgpu_ih_ring *ih)
+{
+	u32 wptr, tmp;
+	struct amdgpu_ih_regs *ih_regs;
+
+	wptr = le32_to_cpu(*ih->wptr_cpu);
+	ih_regs = &ih->ih_regs;
+
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	/* Double check that the overflow wasn't already cleared. */
+	wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+		goto out;
+
+	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+	/* When a ring buffer overflow happen start parsing interrupt
+	 * from the last not overwritten vector (wptr + 32). Hopefully
+	 * this should allow us to catchup.
+	 */
+	tmp = (wptr + 32) & ih->ptr_mask;
+	dev_warn(adev->dev, "IH ring buffer overflow "
+		 "(0x%08X, 0x%08X, 0x%08X)\n",
+		 wptr, ih->rptr, tmp);
+	ih->rptr = tmp;
+
+	tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+out:
+	return (wptr & ih->ptr_mask);
+}
+
+/**
+ * vega20_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ */
+static void vega20_ih_irq_rearm(struct amdgpu_device *adev,
+			       struct amdgpu_ih_ring *ih)
+{
+	uint32_t v = 0;
+	uint32_t i = 0;
+	struct amdgpu_ih_regs *ih_regs;
+
+	ih_regs = &ih->ih_regs;
+
+	/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+	for (i = 0; i < MAX_REARM_RETRY; i++) {
+		v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+		if ((v < ih->ring_size) && (v != ih->rptr))
+			WDOORBELL32(ih->doorbell_index, ih->rptr);
+		else
+			break;
+	}
+}
+
+/**
+ * vega20_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void vega20_ih_set_rptr(struct amdgpu_device *adev,
+			       struct amdgpu_ih_ring *ih)
+{
+	struct amdgpu_ih_regs *ih_regs;
+
+	if (ih->use_doorbell) {
+		/* XXX check if swapping is necessary on BE */
+		*ih->rptr_cpu = ih->rptr;
+		WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+		if (amdgpu_sriov_vf(adev))
+			vega20_ih_irq_rearm(adev, ih);
+	} else {
+		ih_regs = &ih->ih_regs;
+		WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+	}
+}
+
+/**
+ * vega20_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int vega20_ih_self_irq(struct amdgpu_device *adev,
+			      struct amdgpu_irq_src *source,
+			      struct amdgpu_iv_entry *entry)
+{
+	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+	switch (entry->ring_id) {
+	case 1:
+		*adev->irq.ih1.wptr_cpu = wptr;
+		schedule_work(&adev->irq.ih1_work);
+		break;
+	case 2:
+		*adev->irq.ih2.wptr_cpu = wptr;
+		schedule_work(&adev->irq.ih2_work);
+		break;
+	default: break;
+	}
+	return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = {
+	.process = vega20_ih_self_irq,
+};
+
+static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->irq.self_irq.num_types = 0;
+	adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
+}
+
+static int vega20_ih_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	vega20_ih_set_interrupt_funcs(adev);
+	vega20_ih_set_self_irq_funcs(adev);
+	return 0;
+}
+
+static int vega20_ih_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+			      &adev->irq.self_irq);
+	if (r)
+		return r;
+
+	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+	if (r)
+		return r;
+
+	adev->irq.ih.use_doorbell = true;
+	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+	r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+	if (r)
+		return r;
+
+	adev->irq.ih1.use_doorbell = true;
+	adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+	r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+	if (r)
+		return r;
+
+	adev->irq.ih2.use_doorbell = true;
+	adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
+	/* initialize ih control registers offset */
+	vega20_ih_init_register_offset(adev);
+
+	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_init(adev);
+
+	return r;
+}
+
+static int vega20_ih_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
+	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
+	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+
+	return 0;
+}
+
+static int vega20_ih_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = vega20_ih_irq_init(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int vega20_ih_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	vega20_ih_irq_disable(adev);
+
+	return 0;
+}
+
+static int vega20_ih_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return vega20_ih_hw_fini(adev);
+}
+
+static int vega20_ih_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return vega20_ih_hw_init(adev);
+}
+
+static bool vega20_ih_is_idle(void *handle)
+{
+	/* todo */
+	return true;
+}
+
+static int vega20_ih_wait_for_idle(void *handle)
+{
+	/* todo */
+	return -ETIMEDOUT;
+}
+
+static int vega20_ih_soft_reset(void *handle)
+{
+	/* todo */
+
+	return 0;
+}
+
+static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
+					       bool enable)
+{
+	uint32_t data, def, field_val;
+
+	if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+		def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+		field_val = enable ? 0 : 1;
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     DYN_CLK_SOFT_OVERRIDE, field_val);
+		data = REG_SET_FIELD(data, IH_CLK_CTRL,
+				     REG_CLK_SOFT_OVERRIDE, field_val);
+		if (def != data)
+			WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+	}
+}
+
+static int vega20_ih_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	vega20_ih_update_clockgating_state(adev,
+				state == AMD_CG_STATE_GATE);
+	return 0;
+
+}
+
+static int vega20_ih_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs vega20_ih_ip_funcs = {
+	.name = "vega20_ih",
+	.early_init = vega20_ih_early_init,
+	.late_init = NULL,
+	.sw_init = vega20_ih_sw_init,
+	.sw_fini = vega20_ih_sw_fini,
+	.hw_init = vega20_ih_hw_init,
+	.hw_fini = vega20_ih_hw_fini,
+	.suspend = vega20_ih_suspend,
+	.resume = vega20_ih_resume,
+	.is_idle = vega20_ih_is_idle,
+	.wait_for_idle = vega20_ih_wait_for_idle,
+	.soft_reset = vega20_ih_soft_reset,
+	.set_clockgating_state = vega20_ih_set_clockgating_state,
+	.set_powergating_state = vega20_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs vega20_ih_funcs = {
+	.get_wptr = vega20_ih_get_wptr,
+	.decode_iv = amdgpu_ih_decode_iv_helper,
+	.set_rptr = vega20_ih_set_rptr
+};
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+	adev->irq.ih_funcs = &vega20_ih_funcs;
+}
+
+const struct amdgpu_ip_block_version vega20_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 4,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &vega20_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.h b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
new file mode 100644
index 0000000000000000000000000000000000000000..7af6d8758ee38cb3a6e1a1a0a8db62ab60df8f22
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VEGA20_IH_H__
+#define __VEGA20_IH_H__
+
+extern const struct amd_ip_funcs vega20_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version vega20_ih_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d56b474b3a219f26767a55c6866a0dbd6cc76599..eafb76aebd004e3b1717d2499a271d5377152001 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -642,11 +642,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 	return -EINVAL;
 }
 
-static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * vi_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
 {
 	u32 i;
+	int r = -EINVAL;
 
-	dev_info(adev->dev, "GPU pci config reset\n");
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	/* disable BM */
 	pci_clear_master(adev->pdev);
@@ -661,29 +671,11 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 			/* enable BM */
 			pci_set_master(adev->pdev);
 			adev->has_hw_reset = true;
-			return 0;
+			r = 0;
+			break;
 		}
 		udelay(1);
 	}
-	return -EINVAL;
-}
-
-/**
- * vi_asic_pci_config_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- *
- * Use PCI Config method to reset the GPU.
- *
- * Returns 0 for success.
- */
-static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
-{
-	int r;
-
-	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
-	r = vi_gpu_pci_config_reset(adev);
 
 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index e8fb10c41f16710e0383411e03e2b9539d526888..f02c938f75dad1b712a1f8a8818ef889bf2fc7c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -7,6 +7,8 @@ config HSA_AMD
 	bool "HSA kernel driver for AMD GPU devices"
 	depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
 	imply AMD_IOMMU_V2 if X86_64
+	select HMM_MIRROR
 	select MMU_NOTIFIER
+	select DRM_AMDGPU_USERPTR
 	help
 	  Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 16262e5d93f5c33a0af6041cd1f7a471e9d361bf..7351dd195274e9b15d18be51e0f85378f63b2c3e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -243,11 +243,11 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
 static inline void dqm_lock(struct device_queue_manager *dqm)
 {
 	mutex_lock(&dqm->lock_hidden);
-	dqm->saved_flags = memalloc_nofs_save();
+	dqm->saved_flags = memalloc_noreclaim_save();
 }
 static inline void dqm_unlock(struct device_queue_manager *dqm)
 {
-	memalloc_nofs_restore(dqm->saved_flags);
+	memalloc_noreclaim_restore(dqm->saved_flags);
 	mutex_unlock(&dqm->lock_hidden);
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 241bd6ff79f4add030995f015c895d25237412ce..74a460be077bf8c138e9748d10ff7022f44c7564 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -44,6 +44,25 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 	client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
 	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
 
+	/* Only handle clients we care about */
+	if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+	    client_id != SOC15_IH_CLIENTID_SDMA0 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA1 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA2 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA3 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA4 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA5 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA6 &&
+	    client_id != SOC15_IH_CLIENTID_SDMA7 &&
+	    client_id != SOC15_IH_CLIENTID_VMC &&
+	    client_id != SOC15_IH_CLIENTID_VMC1 &&
+	    client_id != SOC15_IH_CLIENTID_UTCL2 &&
+	    client_id != SOC15_IH_CLIENTID_SE0SH &&
+	    client_id != SOC15_IH_CLIENTID_SE1SH &&
+	    client_id != SOC15_IH_CLIENTID_SE2SH &&
+	    client_id != SOC15_IH_CLIENTID_SE3SH)
+		return false;
+
 	/* This is a known issue for gfx9. Under non HWS, pasid is not set
 	 * in the interrupt payload, so we need to find out the pasid on our
 	 * own.
@@ -96,17 +115,30 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
 	vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
 	context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
 
-	if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
-		kfd_signal_event_interrupt(pasid, context_id, 32);
-	else if (source_id == SOC15_INTSRC_SDMA_TRAP)
-		kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
-	else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
-		kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
-	else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
-		kfd_signal_hw_exception_event(pasid);
-	else if (client_id == SOC15_IH_CLIENTID_VMC ||
-		client_id == SOC15_IH_CLIENTID_VMC1 ||
-		 client_id == SOC15_IH_CLIENTID_UTCL2) {
+	if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+	    client_id == SOC15_IH_CLIENTID_SE0SH ||
+	    client_id == SOC15_IH_CLIENTID_SE1SH ||
+	    client_id == SOC15_IH_CLIENTID_SE2SH ||
+	    client_id == SOC15_IH_CLIENTID_SE3SH) {
+		if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+			kfd_signal_event_interrupt(pasid, context_id, 32);
+		else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
+			kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
+		else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+			kfd_signal_hw_exception_event(pasid);
+	} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA1 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA2 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA3 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA4 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA5 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA6 ||
+		   client_id == SOC15_IH_CLIENTID_SDMA7) {
+		if (source_id == SOC15_INTSRC_SDMA_TRAP)
+			kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+	} else if (client_id == SOC15_IH_CLIENTID_VMC ||
+		   client_id == SOC15_IH_CLIENTID_VMC1 ||
+		   client_id == SOC15_IH_CLIENTID_UTCL2) {
 		struct kfd_vm_fault_info info = {0};
 		uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index a3fc2387381956169043c19c2de341833c80a757..0be72789ccbc066a49f1b0589b21024628a73930 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -497,8 +497,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
 			      dev->node_props.num_sdma_queues_per_engine);
 	sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
 			      dev->node_props.num_cp_queues);
-	sysfs_show_64bit_prop(buffer, offs, "unique_id",
-			      dev->node_props.unique_id);
 
 	if (dev->gpu) {
 		log_max_watch_addr =
@@ -529,6 +527,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
 				      dev->node_props.capability);
 		sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
 				      dev->gpu->sdma_fw_version);
+		sysfs_show_64bit_prop(buffer, offs, "unique_id",
+				      amdgpu_amdkfd_get_unique_id(dev->gpu->kgd));
+
 	}
 
 	return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
@@ -1340,7 +1341,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 		dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
 		amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
 	dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
-	dev->node_props.unique_id = amdgpu_amdkfd_get_unique_id(dev->gpu->kgd);
 
 	kfd_fill_mem_clk_max_info(dev);
 	kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 326d9b26b7aa7fbbab553671ee1a0d348739d8bf..416fd910e12e1b055d579d366f3e7f31f500d1dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -57,7 +57,6 @@
 
 struct kfd_node_properties {
 	uint64_t hive_id;
-	uint64_t unique_id;
 	uint32_t cpu_cores_count;
 	uint32_t simd_count;
 	uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 961abf1cf040cc0372d0d9c74d456684f81cb07d..94cd5ddd67ef83bf2fe78059cf675babbdf49531 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -60,7 +60,6 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/version.h>
 #include <linux/types.h>
 #include <linux/pm_runtime.h>
 #include <linux/pci.h>
@@ -1016,8 +1015,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 	init_data.flags.power_down_display_on_boot = true;
 
-	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-
 	/* Display Core create. */
 	adev->dm.dc = dc_create(&init_data);
 
@@ -1131,7 +1128,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 	if (adev->dm.hdcp_workqueue) {
-		hdcp_destroy(adev->dm.hdcp_workqueue);
+		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
 		adev->dm.hdcp_workqueue = NULL;
 	}
 
@@ -1778,6 +1775,11 @@ static int dm_suspend(void *handle)
 
 	if (amdgpu_in_reset(adev)) {
 		mutex_lock(&dm->dc_lock);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+		dc_allow_idle_optimizations(adev->dm.dc, false);
+#endif
+
 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
 
 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
@@ -3719,10 +3721,53 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
 };
 
 
+static void get_min_max_dc_plane_scaling(struct drm_device *dev,
+					 struct drm_framebuffer *fb,
+					 int *min_downscale, int *max_upscale)
+{
+	struct amdgpu_device *adev = drm_to_adev(dev);
+	struct dc *dc = adev->dm.dc;
+	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
+	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_P010:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		*max_upscale = plane_cap->max_upscale_factor.nv12;
+		*min_downscale = plane_cap->max_downscale_factor.nv12;
+		break;
+
+	case DRM_FORMAT_XRGB16161616F:
+	case DRM_FORMAT_ARGB16161616F:
+	case DRM_FORMAT_XBGR16161616F:
+	case DRM_FORMAT_ABGR16161616F:
+		*max_upscale = plane_cap->max_upscale_factor.fp16;
+		*min_downscale = plane_cap->max_downscale_factor.fp16;
+		break;
+
+	default:
+		*max_upscale = plane_cap->max_upscale_factor.argb8888;
+		*min_downscale = plane_cap->max_downscale_factor.argb8888;
+		break;
+	}
+
+	/*
+	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
+	 * scaling factor of 1.0 == 1000 units.
+	 */
+	if (*max_upscale == 1)
+		*max_upscale = 1000;
+
+	if (*min_downscale == 1)
+		*min_downscale = 1000;
+}
+
+
 static int fill_dc_scaling_info(const struct drm_plane_state *state,
 				struct dc_scaling_info *scaling_info)
 {
-	int scale_w, scale_h;
+	int scale_w, scale_h, min_downscale, max_upscale;
 
 	memset(scaling_info, 0, sizeof(*scaling_info));
 
@@ -3754,17 +3799,25 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
 	/* DRM doesn't specify clipping on destination output. */
 	scaling_info->clip_rect = scaling_info->dst_rect;
 
-	/* TODO: Validate scaling per-format with DC plane caps */
+	/* Validate scaling per-format with DC plane caps */
+	if (state->plane && state->plane->dev && state->fb) {
+		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
+					     &min_downscale, &max_upscale);
+	} else {
+		min_downscale = 250;
+		max_upscale = 16000;
+	}
+
 	scale_w = scaling_info->dst_rect.width * 1000 /
 		  scaling_info->src_rect.width;
 
-	if (scale_w < 250 || scale_w > 16000)
+	if (scale_w < min_downscale || scale_w > max_upscale)
 		return -EINVAL;
 
 	scale_h = scaling_info->dst_rect.height * 1000 /
 		  scaling_info->src_rect.height;
 
-	if (scale_h < 250 || scale_h > 16000)
+	if (scale_h < min_downscale || scale_h > max_upscale)
 		return -EINVAL;
 
 	/*
@@ -5321,6 +5374,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+	struct amdgpu_display_manager *dm = &adev->dm;
 	int rc = 0;
 
 	if (enable) {
@@ -5336,7 +5390,30 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
 		return rc;
 
 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
-	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+		return -EBUSY;
+
+	if (amdgpu_in_reset(adev))
+		return 0;
+
+	mutex_lock(&dm->dc_lock);
+
+	if (enable)
+		dm->active_vblank_irq_count++;
+	else
+		dm->active_vblank_irq_count--;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+	dc_allow_idle_optimizations(
+		adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
+
+	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+#endif
+
+	mutex_unlock(&dm->dc_lock);
+
+	return 0;
 }
 
 static int dm_enable_vblank(struct drm_crtc *crtc)
@@ -5353,7 +5430,6 @@ static void dm_disable_vblank(struct drm_crtc *crtc)
 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
 	.reset = dm_crtc_reset_state,
 	.destroy = amdgpu_dm_crtc_destroy,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
 	.atomic_duplicate_state = dm_crtc_duplicate_state,
@@ -6328,12 +6404,51 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
 static int dm_plane_helper_check_state(struct drm_plane_state *state,
 				       struct drm_crtc_state *new_crtc_state)
 {
-	int max_downscale = 0;
-	int max_upscale = INT_MAX;
+	struct drm_framebuffer *fb = state->fb;
+	int min_downscale, max_upscale;
+	int min_scale = 0;
+	int max_scale = INT_MAX;
+
+	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
+	if (fb && state->crtc) {
+		/* Validate viewport to cover the case when only the position changes */
+		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
+			int viewport_width = state->crtc_w;
+			int viewport_height = state->crtc_h;
+
+			if (state->crtc_x < 0)
+				viewport_width += state->crtc_x;
+			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
+				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
+
+			if (state->crtc_y < 0)
+				viewport_height += state->crtc_y;
+			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
+				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
+
+			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
+			 * which is still OK to satisfy the condition below, thereby also covering these cases
+			 * (when plane is completely outside of screen).
+			 * x2 for width is because of pipe-split.
+			 */
+			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
+				return -EINVAL;
+		}
+
+		/* Get min/max allowed scaling factors from plane caps. */
+		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
+					     &min_downscale, &max_upscale);
+		/*
+		 * Convert to drm convention: 16.16 fixed point, instead of dc's
+		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
+		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
+		 */
+		min_scale = (1000 << 16) / max_upscale;
+		max_scale = (1000 << 16) / min_downscale;
+	}
 
-	/* TODO: These should be checked against DC plane caps */
 	return drm_atomic_helper_check_plane_state(
-		state, new_crtc_state, max_downscale, max_upscale, true, true);
+		state, new_crtc_state, min_scale, max_scale, true, true);
 }
 
 static int dm_plane_atomic_check(struct drm_plane *plane,
@@ -9588,6 +9703,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
 			amdgpu_dm_connector->pixel_clock_mhz =
 				range->pixel_clock_mhz * 10;
+
+			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
 			break;
 		}
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 1182dafcef0228fd8818cfd82e597066a7abc3c0..f72930c36c224057df29cd6791d5937d3823d915 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -58,10 +58,10 @@
 /* Forward declarations */
 struct amdgpu_device;
 struct drm_device;
-struct amdgpu_dm_irq_handler_data;
 struct dc;
 struct amdgpu_bo;
 struct dmub_srv;
+struct dc_plane_state;
 
 struct common_irq_params {
 	struct amdgpu_device *adev;
@@ -336,6 +336,13 @@ struct amdgpu_display_manager {
 	 */
 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
 
+	/**
+	 * @active_vblank_irq_count:
+	 *
+	 * number of currently active vblank irqs
+	 */
+	uint32_t active_vblank_irq_count;
+
 	/**
 	 * @mst_encoders:
 	 *
@@ -412,11 +419,6 @@ struct amdgpu_dm_connector {
 
 extern const struct amdgpu_ip_block_version dm_ip_block;
 
-struct amdgpu_framebuffer;
-struct amdgpu_display_manager;
-struct dc_validation_set;
-struct dc_plane_state;
-
 struct dm_plane_state {
 	struct drm_plane_state base;
 	struct dc_plane_state *dc_state;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 11459fb09a372a5b165aa62084344480c1638998..360952129b6d5a2b6ffe6bbc3a3004a4c1e20946 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -691,7 +691,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
 	return size;
 }
 
-/**
+/*
  * Returns the DMCUB tracebuffer contents.
  * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
  */
@@ -735,7 +735,7 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
 	return 0;
 }
 
-/**
+/*
  * Returns the DMCUB firmware state contents.
  * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
  */
@@ -1063,7 +1063,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
  *	echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
  *
  */
-static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
+static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
 							size_t size, loff_t *pos)
 {
 	struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
@@ -2214,9 +2214,9 @@ static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
 	.llseek = default_llseek
 };
 
-static const struct file_operations dp_trigger_hotplug_debugfs_fops = {
+static const struct file_operations trigger_hotplug_debugfs_fops = {
 	.owner = THIS_MODULE,
-	.write = dp_trigger_hotplug,
+	.write = trigger_hotplug,
 	.llseek = default_llseek
 };
 
@@ -2270,7 +2270,6 @@ static const struct {
 	const struct file_operations *fops;
 } dp_debugfs_entries[] = {
 		{"link_settings", &dp_link_settings_debugfs_fops},
-		{"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
 		{"phy_settings", &dp_phy_settings_debugfs_fop},
 		{"test_pattern", &dp_phy_test_pattern_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -2367,6 +2366,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
 	debugfs_create_file("output_bpc", 0644, dir, connector,
 			    &output_bpc_fops);
 
+	debugfs_create_file("trigger_hotplug", 0644, dir, connector,
+			    &trigger_hotplug_debugfs_fops);
+
 	connector->debugfs_dpcd_address = 0;
 	connector->debugfs_dpcd_size = 0;
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c2cd184f0bbd4b9f3baacda407a64e0bd22a87e7..0cdbfcd475ec90bf3c4d036ccdf512422fb2c116 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -376,7 +376,7 @@ static void event_cpirq(struct work_struct *work)
 }
 
 
-void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
 {
 	int i = 0;
 
@@ -385,6 +385,7 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
 		cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
 	}
 
+	sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
 	kfree(hdcp_work->srm);
 	kfree(hdcp_work->srm_temp);
 	kfree(hdcp_work);
@@ -449,11 +450,12 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
 		link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
 
 	display->controller = CONTROLLER_ID_D0 + config->otg_inst;
-	display->dig_fe = config->stream_enc_inst;
-	link->dig_be = config->link_enc_inst;
+	display->dig_fe = config->dig_fe;
+	link->dig_be = config->dig_be;
 	link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
 	link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
-	link->dp.mst_supported = config->mst_supported;
+	link->dp.assr_enabled = config->assr_enabled;
+	link->dp.mst_enabled = config->mst_enabled;
 	display->adjust.disable = 1;
 	link->adjust.auth_delay = 3;
 	link->adjust.hdcp1.disable = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 5159b3a5e5b03b3e2c688ffab93735eac9cf2f10..09294ff122fead72611bf7cf909ee5504f311e3c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -69,7 +69,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
 
 void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
 void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
-void hdcp_destroy(struct hdcp_workqueue *work);
+void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
 
 struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f6f487e9fe2d3ffbbeb9ceaaad53516bc76170c5..5159399f82391cebe695579d5f20bfd00c448c98 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -25,7 +25,6 @@
 
 #include <linux/string.h>
 #include <linux/acpi.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 
 #include <drm/drm_probe_helper.h>
@@ -527,11 +526,11 @@ bool dm_helpers_submit_i2c(
 bool dm_helpers_dp_write_dsc_enable(
 		struct dc_context *ctx,
 		const struct dc_stream_state *stream,
-		bool enable
-)
+		bool enable)
 {
 	uint8_t enable_dsc = enable ? 1 : 0;
 	struct amdgpu_dm_connector *aconnector;
+	uint8_t ret;
 
 	if (!stream)
 		return false;
@@ -542,13 +541,13 @@ bool dm_helpers_dp_write_dsc_enable(
 		if (!aconnector->dsc_aux)
 			return false;
 
-		return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+		ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
 	}
 
 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
 		return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
 
-	return false;
+	return (ret > 0);
 }
 
 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 26ed70e5538ae164a533c9afa1315e4c4cf93807..e0000c180ed1f035b37263c8e8d8e14e059fefa9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -662,6 +662,20 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
 		__func__);
 }
 
+static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned int crtc_id,
+					enum amdgpu_interrupt_state state)
+{
+	return dm_irq_state(
+		adev,
+		source,
+		crtc_id,
+		state,
+		IRQ_TYPE_VLINE0,
+		__func__);
+}
+
 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
 					   struct amdgpu_irq_src *source,
 					   unsigned int crtc_id,
@@ -681,6 +695,11 @@ static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
 	.process = amdgpu_dm_irq_handler,
 };
 
+static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
+	.set = amdgpu_dm_set_vline0_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
 	.set = amdgpu_dm_set_vupdate_irq_state,
 	.process = amdgpu_dm_irq_handler,
@@ -702,6 +721,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->crtc_irq.num_types = adev->mode_info.num_crtc;
 	adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
 
+	adev->vline0_irq.num_types = adev->mode_info.num_crtc;
+	adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
+
 	adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
 	adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f2d8cf34be46e92bbd2c83cf832835389a8936b6..41b09ab22233a438674807a746a31d1fa7570647 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -23,7 +23,6 @@
  *
  */
 
-#include <linux/version.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_mst_helper.h>
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index bf8fe0471b8fc0cfca5db88adbe375a6efd502dd..5bf2f2375b40df208e3f41572bffc84134e1ac23 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -69,5 +69,7 @@ AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
 AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
 
 DC_DMUB += dc_dmub_srv.o
+DC_EDID += dc_edid_parser.o
 AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
+AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 24ed03d8cda7484d69659386d818024105800344..6767fab55c260d4869095c3baaf9517c5b831dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -73,12 +73,9 @@ uint16_t fixed_point_to_int_frac(
 
 	return result;
 }
-/**
-* convert_float_matrix
-* This converts a double into HW register spec defined format S2D13.
-* @param :
-* @return None
-*/
+/*
+ * convert_float_matrix - This converts a double into HW register spec defined format S2D13.
+ */
 void convert_float_matrix(
 	uint16_t *matrix,
 	struct fixed31_32 *flt,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e64825086855b28a11099312f2d06e5f8..ad04ef98e652b1a214d73207245f40adc57fafde 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -49,20 +49,24 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
 	}
 }
 
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
 		return true;
-	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+	if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+		return true;
+	if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
 		return true;
 	return false;
 }
 
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
 		return true;
-	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+	if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+		return true;
+	if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
 		return true;
 	return false;
 }
@@ -71,9 +75,13 @@ bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
 		return true;
-	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+	if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+		return true;
+	if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+		return true;
+	if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
 		return true;
-	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+	if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
 		return true;
 	return false;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
index 7c0cbf47e8cebabb05244a00f7f8b121fd78dc1a..b061497480b8d1ec151d3159571e8c8b905945c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -30,9 +30,9 @@
 
 bool is_rgb_cspace(enum dc_color_space output_color_space);
 
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
 
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
 
 bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
 
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 23a373ca94b5ca7c387add3f984c4ba77eb2612d..c67d21a5ee52ff0e2b92da67ce0bbe2d10be850b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -911,11 +911,11 @@ static enum bp_result get_ss_info_from_tbl(
  * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
  * there is only one entry for each signal /ss id.  However, there is
  * no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb:     pointer to the DC BIOS
+ * @signal:  ASSignalType to be converted to info index
+ * @index:   number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return:   Bios parser result code
  */
 static enum bp_result bios_parser_get_spread_spectrum_info(
 	struct dc_bios *dcb,
@@ -985,10 +985,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
  * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
  * SS_Info.
  *
- * @param this
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp:      pointer to the BIOS parser
+ * @id:      spread sprectrum info index
+ * @ss_info: sprectrum information structure,
+ * return:   BIOS parser result code
  */
 static enum bp_result get_ss_info_from_tbl(
 	struct bios_parser *bp,
@@ -1011,9 +1011,10 @@ static enum bp_result get_ss_info_from_tbl(
  * from the VBIOS
  * There will not be multiple entry for Ver 2.1
  *
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp:    pointer to the Bios parser
+ * @id:    spread sprectrum info index
+ * @info:  sprectrum information structure,
+ * return: Bios parser result code
  */
 static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
 	struct bios_parser *bp,
@@ -1076,9 +1077,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
  * of entries that matches the id
  * for, the SS_Info table, there should not be more than 1 entry match.
  *
- * @param [in] id, spread sprectrum id
- * @param [out] pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp:      pointer to the Bios parser
+ * @id:      spread sprectrum id
+ * @ss_info: sprectrum information structure,
+ * return:   Bios parser result code
  */
 static enum bp_result get_ss_info_from_ss_info_table(
 	struct bios_parser *bp,
@@ -1451,16 +1453,14 @@ static enum bp_result get_embedded_panel_info_v1_3(
 }
 
 /**
- * bios_parser_get_encoder_cap_info
+ * bios_parser_get_encoder_cap_info - get encoder capability
+ *                                    information of input object id
  *
- * @brief
- *  Get encoder capability information of input object id
- *
- * @param object_id, Object id
- * @param object_id, encoder cap information structure
- *
- * @return Bios parser result code
+ * @dcb:       pointer to the DC BIOS
+ * @object_id: object id
+ * @info:      encoder cap information structure
  *
+ * return: Bios parser result code
  */
 static enum bp_result bios_parser_get_encoder_cap_info(
 	struct dc_bios *dcb,
@@ -1490,17 +1490,12 @@ static enum bp_result bios_parser_get_encoder_cap_info(
 }
 
 /**
- * get_encoder_cap_record
- *
- * @brief
- *  Get encoder cap record for the object
- *
- * @param object, ATOM object
+ * get_encoder_cap_record - Get encoder cap record for the object
  *
- * @return atom encoder cap record
- *
- * @note
- *  search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ * @bp:      pointer to the BIOS parser
+ * @object:  ATOM object
+ * return:   atom encoder cap record
+ * note:     search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
  */
 static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
 	struct bios_parser *bp,
@@ -1557,8 +1552,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
  * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
  * the VBIOS that match the SSid (to be converted from signal)
  *
- * @param[in] signal, ASSignalType to be converted to SSid
- * @return number of SS Entry that match the signal
+ * @dcb:    pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to SSid
+ * return: number of SS Entry that match the signal
  */
 static uint32_t bios_parser_get_ss_entry_number(
 	struct dc_bios *dcb,
@@ -1608,10 +1604,10 @@ static uint32_t bios_parser_get_ss_entry_number(
  * get_ss_entry_number_from_ss_info_tbl
  * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
  *
- * @note There can only be one entry for each id for SS_Info Table
- *
- * @param [in] id, spread spectrum id
- * @return number of SS Entry that match the id
+ * @bp:  pointer to the BIOS parser
+ * @id:  spread spectrum id
+ * return: number of SS Entry that match the id
+ * note: There can only be one entry for each id for SS_Info Table
  */
 static uint32_t get_ss_entry_number_from_ss_info_tbl(
 	struct bios_parser *bp,
@@ -1679,8 +1675,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
  * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
  * SS_Info.
  *
- * @param id, spread sprectrum info index
- * @return Bios parser result code
+ * @bp:    pointer to the BIOS parser
+ * @id:    spread sprectrum info index
+ * return: Bios parser result code
  */
 static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
 {
@@ -1696,8 +1693,9 @@ static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
  * Ver 2.1 from the VBIOS
  * There will not be multiple entry for Ver 2.1
  *
- * @param id, spread sprectrum info index
- * @return number of SS Entry that match the id
+ * @bp:    pointer to the BIOS parser
+ * @id:    spread sprectrum info index
+ * return: number of SS Entry that match the id
  */
 static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
 	struct bios_parser *bp,
@@ -1731,8 +1729,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
  * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
  * the VBIOS that matches id
  *
- * @param[in]  id, spread sprectrum id
- * @return number of SS Entry that match the id
+ * @bp:    pointer to the BIOS parser
+ * @id:    spread sprectrum id
+ * return: number of SS Entry that match the id
  */
 static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
 	struct bios_parser *bp,
@@ -1767,10 +1766,11 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
  * bios_parser_get_gpio_pin_info
  * Get GpioPin information of input gpio id
  *
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb:     pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info:    GpioPin information structure
+ * return:   Bios parser result code
+ * note:
  *  to get the GPIO PIN INFO, we need:
  *  1. get the GPIO_ID from other object table, see GetHPDInfo()
  *  2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
@@ -2197,13 +2197,10 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
 }
 
 /**
- * bios_parser_set_scratch_critical_state
- *
- * @brief
- *  update critical state bit in VBIOS scratch register
- *
- * @param
- *  bool - to set or reset state
+ * bios_parser_set_scratch_critical_state - update critical state
+ *                                          bit in VBIOS scratch register
+ * @dcb:    pointer to the DC BIOS
+ * @state:  set or reset state
  */
 static void bios_parser_set_scratch_critical_state(
 	struct dc_bios *dcb,
@@ -2222,7 +2219,7 @@ static void bios_parser_set_scratch_critical_state(
  * bios_parser *bp - [in]BIOS parser handler to get master data table
  * integrated_info *info - [out] store and output integrated info
  *
- * @return
+ * return:
  * enum bp_result - BP_RESULT_OK if information is available,
  *                  BP_RESULT_BADBIOSTABLE otherwise.
  */
@@ -2372,7 +2369,7 @@ static enum bp_result get_integrated_info_v8(
  * bios_parser *bp - [in]BIOS parser handler to get master data table
  * integrated_info *info - [out] store and output integrated info
  *
- * @return
+ * return:
  * enum bp_result - BP_RESULT_OK if information is available,
  *                  BP_RESULT_BADBIOSTABLE otherwise.
  */
@@ -2509,7 +2506,7 @@ static enum bp_result get_integrated_info_v9(
  * bios_parser *bp - [in]BIOS parser handler to get master data table
  * integrated_info *info - [out] store and output integrated info
  *
- * @return
+ * return:
  * enum bp_result - BP_RESULT_OK if information is available,
  *                  BP_RESULT_BADBIOSTABLE otherwise.
  */
@@ -2585,7 +2582,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
 	return NULL;
 }
 
-enum bp_result update_slot_layout_info(
+static enum bp_result update_slot_layout_info(
 	struct dc_bios *dcb,
 	unsigned int i,
 	struct slot_layout_info *slot_layout_info,
@@ -2689,7 +2686,7 @@ enum bp_result update_slot_layout_info(
 }
 
 
-enum bp_result get_bracket_layout_record(
+static enum bp_result get_bracket_layout_record(
 	struct dc_bios *dcb,
 	unsigned int bracket_layout_id,
 	struct slot_layout_info *slot_layout_info)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 670c26583817817eb5b24d3a80e0806fbe65deee..9f9fda3118d1fbc730983f6c1c0f7cea839af408 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -485,10 +485,11 @@ static struct atom_hpd_int_record *get_hpd_record(
  * bios_parser_get_gpio_pin_info
  * Get GpioPin information of input gpio id
  *
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb:     pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info:    GpioPin information structure
+ * return: Bios parser result code
+ * note:
  *  to get the GPIO PIN INFO, we need:
  *  1. get the GPIO_ID from other object table, see GetHPDInfo()
  *  2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
@@ -801,11 +802,11 @@ static enum bp_result get_ss_info_v4_2(
  * ver 3.1,
  * there is only one entry for each signal /ss id.  However, there is
  * no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb:     pointer to the DC BIOS
+ * @signal:  ASSignalType to be converted to info index
+ * @index:   number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
  */
 static enum bp_result bios_parser_get_spread_spectrum_info(
 	struct dc_bios *dcb,
@@ -1196,13 +1197,11 @@ static bool bios_parser_is_accelerated_mode(
 }
 
 /**
- * bios_parser_set_scratch_critical_state
+ * bios_parser_set_scratch_critical_state - update critical state bit
+ *                                          in VBIOS scratch register
  *
- * @brief
- *  update critical state bit in VBIOS scratch register
- *
- * @param
- *  bool - to set or reset state
+ * @dcb:   pointer to the DC BIO
+ * @state: set or reset state
  */
 static void bios_parser_set_scratch_critical_state(
 	struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 070459e3e4070dcb31fa5b678673f1e07e386231..afc10b954ffa7b67cb52131e6ac32e3bb0036e1f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
 					cntl->enable_dp_audio);
 	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
 
+	switch (cntl->color_depth) {
+	case COLOR_DEPTH_888:
+		params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_101010:
+		params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_121212:
+		params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_161616:
+		params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+		break;
+	default:
+		break;
+	}
+
 	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
 		result = BP_RESULT_OK;
 
@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
 					cntl->enable_dp_audio));
 	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
 
+	switch (cntl->color_depth) {
+	case COLOR_DEPTH_888:
+		params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_101010:
+		params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_121212:
+		params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_161616:
+		params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+		break;
+	default:
+		break;
+	}
+
 	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
 		result = BP_RESULT_OK;
 
@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
 		 * driver choose program it itself, i.e. here we program it
 		 * to 888 by default.
 		 */
+		if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+			switch (bp_params->color_depth) {
+			case TRANSMITTER_COLOR_DEPTH_30:
+				/* yes this is correct, the atom define is wrong */
+				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+				break;
+			case TRANSMITTER_COLOR_DEPTH_36:
+				/* yes this is correct, the atom define is wrong */
+				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+				break;
+			default:
+				break;
+			}
 
 		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
 			result = BP_RESULT_OK;
@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
 		 * driver choose program it itself, i.e. here we pass required
 		 * target rate that includes deep color.
 		 */
+		if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+			switch (bp_params->color_depth) {
+			case TRANSMITTER_COLOR_DEPTH_30:
+				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+				break;
+			case TRANSMITTER_COLOR_DEPTH_36:
+				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+				break;
+			case TRANSMITTER_COLOR_DEPTH_48:
+				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+				break;
+			default:
+				break;
+			}
 
 		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
 			result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 48b4ef03fc8f83f7720c1abe7e19ab3344224801..5b77251e0590924215b5a034964103728f19f459 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -114,18 +114,14 @@ bool dal_cmd_table_helper_controller_id_to_atom(
 }
 
 /**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-*  Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-*   input transmitter
-*   output digitalTransmitter
-*    // =00: Digital Transmitter1 ( UNIPHY linkAB )
-*    // =01: Digital Transmitter2 ( UNIPHY linkCD )
-*    // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom - Translate the Transmitter to the
+ *                                    corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: output digitalTransmitter
+ *    // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ *    // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ *    // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
 uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
 	enum transmitter t)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7736c92d55c402881251c41f5f29164c211afe86..455ee2be15a36f57a2d7dff8b22591b3db1a567d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -128,18 +128,14 @@ bool dal_cmd_table_helper_controller_id_to_atom2(
 }
 
 /**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-*  Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-*   input transmitter
-*   output digitalTransmitter
-*    // =00: Digital Transmitter1 ( UNIPHY linkAB )
-*    // =01: Digital Transmitter2 ( UNIPHY linkCD )
-*    // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom2 - Translate the Transmitter to the
+ *                                     corresponding ATOM BIOS value
+ *  @t: transmitter
+ *  returns: digitalTransmitter
+ *    // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ *    // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ *    // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
 uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
 	enum transmitter t)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ef41b287cbe2355c313ecf8cc84efa055690b39d..e633f8a51edb6b810cdd549d48d5982634b85bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -106,7 +106,6 @@ static void calculate_bandwidth(
 	bool lpt_enabled;
 	enum bw_defines sclk_message;
 	enum bw_defines yclk_message;
-	enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
 	enum bw_defines tiling_mode[maximum_number_of_surfaces];
 	enum bw_defines surface_type[maximum_number_of_surfaces];
 	enum bw_defines voltage;
@@ -792,12 +791,8 @@ static void calculate_bandwidth(
 				data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
 			}
 			if (data->stereo_mode[i] == bw_def_top_bottom) {
-				v_filter_init_mode[i] = bw_def_manual;
 				data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
 			}
-			else {
-				v_filter_init_mode[i] = bw_def_auto;
-			}
 			if (data->stereo_mode[i] == bw_def_top_bottom) {
 				data->num_lines_at_frame_start = bw_int_to_fixed(1);
 			}
@@ -2730,7 +2725,7 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
 
 }
 
-/**
+/*
  * Compare calculated (required) clocks against the clocks available at
  * maximum voltage (max Performance Level).
  */
@@ -3001,13 +2996,12 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[],
 	return true;
 }
 
-/**
+/*
  * Return:
  *	true -	Display(s) configuration supported.
  *		In this case 'calcs_output' contains data for HW programming
  *	false - Display(s) configuration not supported (not enough bandwidth).
  */
-
 bool bw_calcs(struct dc_context *ctx,
 	const struct bw_calcs_dceip *dceip,
 	const struct bw_calcs_vbios *vbios,
@@ -3028,7 +3022,7 @@ bool bw_calcs(struct dc_context *ctx,
 		calcs_output->all_displays_in_sync = false;
 
 	if (data->number_of_displays != 0) {
-		uint8_t yclk_lvl, sclk_lvl;
+		uint8_t yclk_lvl;
 		struct bw_fixed high_sclk = vbios->high_sclk;
 		struct bw_fixed mid1_sclk = vbios->mid1_sclk;
 		struct bw_fixed mid2_sclk = vbios->mid2_sclk;
@@ -3049,7 +3043,6 @@ bool bw_calcs(struct dc_context *ctx,
 		calculate_bandwidth(dceip, vbios, data);
 
 		yclk_lvl = data->y_clk_level;
-		sclk_lvl = data->sclk_level;
 
 		calcs_output->nbp_state_change_enable =
 			data->nbp_state_change_enable;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 75b8240ed0595b073af4622a39817e812b81c0c4..e133edc587d318098e5576acbff9f30584f8fe68 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -187,17 +187,6 @@ static void ramp_up_dispclk_with_dpp(
 	clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
 }
 
-static bool is_mpo_enabled(struct dc_state *context)
-{
-	int i;
-
-	for (i = 0; i < context->stream_count; i++) {
-		if (context->stream_status[i].plane_count > 1)
-			return true;
-	}
-	return false;
-}
-
 static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
 			struct dc_state *context,
 			bool safe_to_lower)
@@ -295,22 +284,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
 		if (pp_smu->set_hard_min_fclk_by_freq &&
 				pp_smu->set_hard_min_dcfclk_by_freq &&
 				pp_smu->set_min_deep_sleep_dcfclk) {
-			// Only increase clocks when display is active and MPO is enabled
-			if (display_count && is_mpo_enabled(context)) {
-				pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
-						((new_clocks->fclk_khz / 1000) *  101) / 100);
-				pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
-						((new_clocks->dcfclk_khz / 1000) * 101) / 100);
-				pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
-						(new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
-			} else {
-				pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
-						new_clocks->fclk_khz / 1000);
-				pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
-						new_clocks->dcfclk_khz / 1000);
-				pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
-						(new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
-			}
+			pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index f2114bc910bf0077e7453c9131c936b555ca0237..ec9dc265cde0c4d2def10eeb8f353074e219bd35 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -257,8 +257,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 			if (update_dppclk || update_dispclk)
 				dcn20_update_clocks_update_dentist(clk_mgr);
 			// always update dtos unless clock is lowered and not safe to lower
-			if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
-				dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index ab98c259ef69562a4b9b6c096fee5535a6bab022..c7e5a64e06afb0a5735c5b33ce0a90ac2e938bb0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -146,15 +146,15 @@ static noinline void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
 
 	/* Set D - MALL - SR enter and exit times adjusted for MALL */
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
-//	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
+	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
 }
 
 void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index cfa8e02cf103c7c751aff3ab728881dfb3085840..68942bbc7472847b7f13fddef0d3d1399441c93a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -103,7 +103,7 @@ int dcn301_smu_send_msg_with_param(
 	/* Trigger the message transaction by writing the message ID */
 	REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
 
-	result = dcn301_smu_wait_for_response(clk_mgr, 10, 1000);
+	result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
 
 	ASSERT(result == VBIOSSMC_Result_OK);
 
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 991b9c5beaa301ba9311c2ed6c23a62100c6bcd8..aadb801447a7cb0c3cc1eef7d56ececa1546a214 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -75,7 +75,8 @@ int vg_get_active_display_cnt_wa(
 		const struct dc_link *link = dc->links[i];
 
 		/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
-		if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
+		if (link->link_enc->funcs->is_dig_enabled &&
+				link->link_enc->funcs->is_dig_enabled(link->link_enc))
 			display_count++;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6cf1a5a2a5ecc0f5dda72f60930325b1df4de9cb..c9aede2f783d3304a8084732fba38150afe5ef04 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -175,6 +175,8 @@ static bool create_links(
 
 	connectors_num = bios->funcs->get_connectors_number(bios);
 
+	DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
+
 	if (connectors_num > ENUM_ID_COUNT) {
 		dm_error(
 			"DC: Number of connectors %d exceeds maximum of %d!\n",
@@ -193,6 +195,8 @@ static bool create_links(
 		struct link_init_data link_init_params = {0};
 		struct dc_link *link;
 
+		DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
 		link_init_params.ctx = dc->ctx;
 		/* next BIOS object table connector */
 		link_init_params.connector_index = i;
@@ -201,30 +205,14 @@ static bool create_links(
 		link = link_create(&link_init_params);
 
 		if (link) {
-			bool should_destory_link = false;
-
-			if (link->connector_signal == SIGNAL_TYPE_EDP) {
-				if (dc->config.edp_not_connected) {
-					if (!IS_DIAG_DC(dc->ctx->dce_environment))
-						should_destory_link = true;
-				} else {
-					enum dc_connection_type type;
-					dc_link_detect_sink(link, &type);
-					if (type == dc_connection_none)
-						should_destory_link = true;
-				}
-			}
-
-			if (dc->config.force_enum_edp || !should_destory_link) {
 				dc->links[dc->link_count] = link;
 				link->dc = dc;
 				++dc->link_count;
-			} else {
-				link_destroy(&link);
-			}
 		}
 	}
 
+	DC_LOG_DC("BIOS object table - end");
+
 	for (i = 0; i < num_virtual_links; i++) {
 		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
 		struct encoder_init_data enc_init = {0};
@@ -284,20 +272,16 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
 }
 
 /**
- *****************************************************************************
- *  Function: dc_stream_adjust_vmin_vmax
+ *  dc_stream_adjust_vmin_vmax:
  *
- *  @brief
- *     Looks up the pipe context of dc_stream_state and updates the
- *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
- *     Rate, which is a power-saving feature that targets reducing panel
- *     refresh rate while the screen is static
+ *  Looks up the pipe context of dc_stream_state and updates the
+ *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ *  Rate, which is a power-saving feature that targets reducing panel
+ *  refresh rate while the screen is static
  *
- *  @param [in] dc: dc reference
- *  @param [in] stream: Initial dc stream state
- *  @param [in] adjust: Updated parameters for vertical_total_min and
- *  vertical_total_max
- *****************************************************************************
+ *  @dc:     dc reference
+ *  @stream: Initial dc stream state
+ *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
  */
 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
 		struct dc_stream_state *stream,
@@ -355,6 +339,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
  * @dc: DC Object
  * @stream: The stream to configure CRC on.
  * @enable: Enable CRC if true, disable otherwise.
+ * @crc_window: CRC window (x/y start/end) information
  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
  *              once.
  *
@@ -420,7 +405,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
  * dc_stream_get_crc() - Get CRC values for the given stream.
  * @dc: DC object
  * @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
+ * @r_cr: CRC value for the first of the 3 channels stored here.
+ * @g_y:  CRC value for the second of the 3 channels stored here.
+ * @b_cb: CRC value for the third of the 3 channels stored here.
  *
  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
  * Return false if stream is not found, or if CRCs are not enabled.
@@ -707,7 +694,6 @@ static bool dc_construct(struct dc *dc,
 	}
 
 	dc->dcn_ip = dcn_ip;
-	dc->soc_bounding_box = init_params->soc_bounding_box;
 #endif
 
 	if (!dc_construct_ctx(dc, init_params)) {
@@ -757,6 +743,10 @@ static bool dc_construct(struct dc *dc,
 	if (!dc->res_pool)
 		goto fail;
 
+	/* set i2c speed if not done by the respective dcnxxx__resource.c */
+	if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+		dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+
 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
 	if (!dc->clk_mgr)
 		goto fail;
@@ -764,8 +754,6 @@ static bool dc_construct(struct dc *dc,
 	dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
 #endif
 
-	dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings;
-
 	if (dc->res_pool->funcs->update_bw_bounding_box)
 		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
 
@@ -803,7 +791,8 @@ static void disable_all_writeback_pipes_for_stream(
 		stream->writeback_info[i].wb_enabled = false;
 }
 
-void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
+					  struct dc_stream_state *stream, bool lock)
 {
 	int i = 0;
 
@@ -964,19 +953,15 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
 	unsigned int full_pipe_count;
 
-	if (NULL == dc)
-		goto alloc_fail;
+	if (!dc)
+		return NULL;
 
 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
-		if (false == dc_construct_ctx(dc, init_params)) {
-			dc_destruct(dc);
-			goto construct_fail;
-		}
+		if (!dc_construct_ctx(dc, init_params))
+			goto destruct_dc;
 	} else {
-		if (false == dc_construct(dc, init_params)) {
-			dc_destruct(dc);
-			goto construct_fail;
-		}
+		if (!dc_construct(dc, init_params))
+			goto destruct_dc;
 
 		full_pipe_count = dc->res_pool->pipe_count;
 		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
@@ -1007,15 +992,36 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 
 	return dc;
 
-construct_fail:
+destruct_dc:
+	dc_destruct(dc);
 	kfree(dc);
-
-alloc_fail:
 	return NULL;
 }
 
+static void detect_edp_presence(struct dc *dc)
+{
+	struct dc_link *edp_link = get_edp_link(dc);
+	bool edp_sink_present = true;
+
+	if (!edp_link)
+		return;
+
+	if (dc->config.edp_not_connected) {
+			edp_sink_present = false;
+	} else {
+		enum dc_connection_type type;
+		dc_link_detect_sink(edp_link, &type);
+		if (type == dc_connection_none)
+			edp_sink_present = false;
+	}
+
+	edp_link->edp_sink_present = edp_sink_present;
+}
+
 void dc_hardware_init(struct dc *dc)
 {
+
+	detect_edp_presence(dc);
 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
 		dc->hwss.init_hw(dc);
 }
@@ -1493,7 +1499,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
 	enum dc_status result = DC_ERROR_UNEXPECTED;
 	int i;
 
-	if (false == context_changed(dc, context))
+	if (!context_changed(dc, context))
 		return DC_OK;
 
 	DC_LOG_DC("%s: %d streams\n",
@@ -1540,7 +1546,7 @@ bool dc_acquire_release_mpc_3dlut(
 		if (found_pipe_idx) {
 			if (acquire && pool->funcs->acquire_post_bldn_3dlut)
 				ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
-			else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+			else if (!acquire && pool->funcs->release_post_bldn_3dlut)
 				ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
 		}
 	}
@@ -2016,7 +2022,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
 	return overall_type;
 }
 
-/**
+/*
  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
  *
  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
@@ -2270,6 +2276,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
 
 	if (update->dither_option)
 		stream->dither_option = *update->dither_option;
+
+	if (update->pending_test_pattern)
+		stream->test_pattern = *update->pending_test_pattern;
 	/* update current stream with writeback info */
 	if (update->wb_update) {
 		int i;
@@ -2366,6 +2375,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
 				}
 			}
 
+			if (stream_update->pending_test_pattern) {
+				dc_link_dp_set_test_pattern(stream->link,
+					stream->test_pattern.type,
+					stream->test_pattern.color_space,
+					stream->test_pattern.p_link_settings,
+					stream->test_pattern.p_custom_pattern,
+					stream->test_pattern.cust_pattern_size);
+			}
+
 			/* Full fe update*/
 			if (update_type == UPDATE_TYPE_FAST)
 				continue;
@@ -2830,7 +2848,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
 }
 
-/**
+/*
  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
  */
 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
@@ -2964,7 +2982,7 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
 	return true;
 }
 
-/**
+/*
  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
  *
  * EDID length is in bytes
@@ -3027,7 +3045,7 @@ struct dc_sink *dc_link_add_remote_sink(
 	return NULL;
 }
 
-/**
+/*
  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
  *
  * Note that this just removes the struct dc_sink - it doesn't
@@ -3154,9 +3172,11 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
 			core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
 }
 
-bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
-						 struct dc_plane_state *plane)
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+		struct dc_cursor_attributes *cursor_attr)
 {
+	if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
+		return true;
 	return false;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f4a2088ab17928ee2a7c14150f7b4ddab2e0a843..fa5059f71727a16463d990e1ebd54c7fcc480634 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -203,9 +203,21 @@ static bool program_hpd_filter(const struct dc_link *link)
 	return result;
 }
 
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+	if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+		link->dc->hwss.edp_wait_for_T12(link);
+
+		return true;
+	}
+
+	return false;
+}
+
 /**
  * dc_link_detect_sink() - Determine if there is a sink connected
  *
+ * @link: pointer to the dc link
  * @type: Returned connection type
  * Does not detect downstream devices, such as MST sinks
  * or display connected through active dongles
@@ -342,7 +354,7 @@ static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
 	return SIGNAL_TYPE_NONE;
 }
 
-/**
+/*
  * dc_link_is_dp_sink_present() - Check if there is a native DP
  * or passive DP-HDMI dongle connected
  */
@@ -596,8 +608,6 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
 	dc_process_hdcp_msg(signal, link, &msg22);
 
 	if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
-		enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
-
 		msg14.data = &link->hdcp_caps.bcaps.raw;
 		msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
 		msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
@@ -605,7 +615,7 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
 		msg14.link = HDCP_LINK_PRIMARY;
 		msg14.max_retries = 5;
 
-		status = dc_process_hdcp_msg(signal, link, &msg14);
+		dc_process_hdcp_msg(signal, link, &msg14);
 	}
 
 }
@@ -830,7 +840,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
 	return false;
 }
 
-/**
+/*
  * dc_link_detect() - Detect if a sink is attached to a given link
  *
  * link->local_sink is created or destroyed as needed.
@@ -1065,9 +1075,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
 			break;
 		}
 
-		if (link->local_sink->edid_caps.panel_patch.disable_fec)
-			link->ctx->dc->debug.disable_fec = true;
-
 		// Check if edid is the same
 		if ((prev_sink) &&
 		    (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
@@ -1366,13 +1373,17 @@ static bool dc_link_construct(struct dc_link *link,
 	struct dc_context *dc_ctx = init_params->ctx;
 	struct encoder_init_data enc_init_data = { 0 };
 	struct panel_cntl_init_data panel_cntl_init_data = { 0 };
-	struct integrated_info info = {{{ 0 }}};
+	struct integrated_info *info;
 	struct dc_bios *bios = init_params->dc->ctx->dc_bios;
 	const struct dc_vbios_funcs *bp_funcs = bios->funcs;
 	struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
 
 	DC_LOGGER_INIT(dc_ctx->logger);
 
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto create_fail;
+
 	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
 	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
 
@@ -1390,10 +1401,12 @@ static bool dc_link_construct(struct dc_link *link,
 	link->link_id =
 		bios->funcs->get_connector_id(bios, init_params->connector_index);
 
+	DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
 
 	if (bios->funcs->get_disp_connector_caps_info) {
 		bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
 		link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
+		DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
 	}
 
 	if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
@@ -1408,10 +1421,14 @@ static bool dc_link_construct(struct dc_link *link,
 
 	link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
 				      link->ctx->gpio_service);
+
 	if (link->hpd_gpio) {
 		dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
 		dal_gpio_unlock_pin(link->hpd_gpio);
 		link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
+
+		DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
+		DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
 	}
 
 	switch (link->link_id.id) {
@@ -1470,6 +1487,11 @@ static bool dc_link_construct(struct dc_link *link,
 		goto ddc_create_fail;
 	}
 
+	if (!link->ddc->ddc_pin) {
+		DC_ERROR("Failed to get I2C info for connector!\n");
+		goto ddc_create_fail;
+	}
+
 	link->ddc_hw_inst =
 		dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
 
@@ -1508,6 +1530,8 @@ static bool dc_link_construct(struct dc_link *link,
 		goto link_enc_create_fail;
 	}
 
+	DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+
 	link->link_enc_hw_inst = link->link_enc->transmitter;
 
 	for (i = 0; i < 4; i++) {
@@ -1530,16 +1554,20 @@ static bool dc_link_construct(struct dc_link *link,
 		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
 		    link->connector_signal == SIGNAL_TYPE_RGB)
 			continue;
+
+		DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
+		DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
+		DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
 		break;
 	}
 
 	if (bios->integrated_info)
-		info = *bios->integrated_info;
+		memcpy(info, bios->integrated_info, sizeof(*info));
 
 	/* Look for channel mapping corresponding to connector and device tag */
 	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
 		struct external_display_path *path =
-			&info.ext_disp_conn_info.path[i];
+			&info->ext_disp_conn_info.path[i];
 
 		if (path->device_connector_id.enum_id == link->link_id.enum_id &&
 		    path->device_connector_id.id == link->link_id.id &&
@@ -1548,10 +1576,14 @@ static bool dc_link_construct(struct dc_link *link,
 			    path->device_acpi_enum == link->device_tag.acpi_device) {
 				link->ddi_channel_mapping = path->channel_mapping;
 				link->chip_caps = path->caps;
+				DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+				DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
 			} else if (path->device_tag ==
 				   link->device_tag.dev_id.raw_device_tag) {
 				link->ddi_channel_mapping = path->channel_mapping;
 				link->chip_caps = path->caps;
+				DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+				DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
 			}
 			break;
 		}
@@ -1570,6 +1602,7 @@ static bool dc_link_construct(struct dc_link *link,
 
 	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
 
+	DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
 	return true;
 device_tag_fail:
 	link->link_enc->funcs->destroy(&link->link_enc);
@@ -1586,6 +1619,9 @@ static bool dc_link_construct(struct dc_link *link,
 		link->hpd_gpio = NULL;
 	}
 
+	DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
+	kfree(info);
+
 	return false;
 }
 
@@ -3133,17 +3169,17 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 {
 	struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
 	if (cp_psp && cp_psp->funcs.update_stream_config) {
-		struct cp_psp_stream_config config;
-
-		memset(&config, 0, sizeof(config));
+		struct cp_psp_stream_config config = {0};
+		enum dp_panel_mode panel_mode =
+				dp_get_panel_mode(pipe_ctx->stream->link);
 
 		config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
-		/*stream_enc_inst*/
-		config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
-		config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+		config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
+		config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
 		config.dpms_off = dpms_off;
 		config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
-		config.mst_supported = (pipe_ctx->stream->signal ==
+		config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
+		config.mst_enabled = (pipe_ctx->stream->signal ==
 				SIGNAL_TYPE_DISPLAY_PORT_MST);
 		cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
 	}
@@ -3396,10 +3432,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
 }
 
 /**
- *****************************************************************************
- *  Function: dc_link_enable_hpd_filter
- *
- *  @brief
+ *  dc_link_enable_hpd_filter:
  *     If enable is true, programs HPD filter on associated HPD line using
  *     delay_on_disconnect/delay_on_connect values dependent on
  *     link->connector_signal
@@ -3407,9 +3440,8 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
  *     If enable is false, programs HPD filter on associated HPD line with no
  *     delays on connect or disconnect
  *
- *  @param [in] link: pointer to the dc link
- *  @param [in] enable: boolean specifying whether to enable hbd
- *****************************************************************************
+ *  @link:   pointer to the dc link
+ *  @enable: boolean specifying whether to enable hbd
  */
 void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
 {
@@ -3635,7 +3667,7 @@ uint32_t dc_link_bandwidth_kbps(
 	link_bw_kbps *= 8;   /* 8 bits per byte*/
 	link_bw_kbps *= link_setting->lane_count;
 
-	if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
+	if (dc_link_should_enable_fec(link)) {
 		/* Account for FEC overhead.
 		 * We have to do it based on caps,
 		 * and not based on FEC being set ready,
@@ -3656,8 +3688,8 @@ uint32_t dc_link_bandwidth_kbps(
 		 * but the difference is minimal and is in a safe direction,
 		 * which all works well around potential ambiguity of DP 1.4a spec.
 		 */
-		link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
-					       link_bw_kbps, 32);
+		long long fec_link_bw_kbps = link_bw_kbps * 970LL;
+		link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
 	}
 
 	return link_bw_kbps;
@@ -3687,3 +3719,19 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
 			!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
 }
 
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+	bool is_fec_disable = false;
+	bool ret = false;
+
+	if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+			link->local_sink &&
+			link->local_sink->edid_caps.panel_patch.disable_fec) ||
+			link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP
+		is_fec_disable = true;
+
+	if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
+		ret = true;
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index c5936e0643606dc29c9d1cf57c1cc3acac22a356..ae6484ab567b7a2a2d2ea348fa835d0ab8a6b818 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -37,12 +37,16 @@
 #include "dc_link_ddc.h"
 #include "dce/dce_aux.h"
 
-/*DP to Dual link DVI converter*/
+#define DC_LOGGER_INIT(logger)
+
+static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga";
+/* DP to Dual link DVI converter */
 static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
 static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2";
 
 #define AUX_POWER_UP_WA_DELAY 500
 #define I2C_OVER_AUX_DEFER_WA_DELAY 70
+#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
 #define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
 
 /* CV smart dongle slave address for retrieving supported HDTV modes*/
@@ -194,6 +198,10 @@ static void ddc_service_construct(
 	if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
 		ddc_service->ddc_pin = NULL;
 	} else {
+		DC_LOGGER_INIT(ddc_service->ctx->logger);
+		DC_LOG_DC("BIOS object table - i2c_line: %d", i2c_info.i2c_line);
+		DC_LOG_DC("BIOS object table - i2c_engine_id: %d", i2c_info.i2c_engine_id);
+
 		hw_info.ddc_channel = i2c_info.i2c_line;
 		if (ddc_service->link != NULL)
 			hw_info.hw_supported = i2c_info.i2c_hw_assist;
@@ -286,6 +294,15 @@ static uint32_t defer_delay_converter_wa(
 {
 	struct dc_link *link = ddc->link;
 
+	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER &&
+		link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
+		!memcmp(link->dpcd_caps.branch_dev_name,
+		    DP_VGA_DONGLE_BRANCH_DEV_NAME,
+			sizeof(link->dpcd_caps.branch_dev_name)))
+
+		return defer_delay > DPVGA_DONGLE_AUX_DEFER_WA_DELAY ?
+			defer_delay : DPVGA_DONGLE_AUX_DEFER_WA_DELAY;
+
 	if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
 	    !memcmp(link->dpcd_caps.branch_dev_name,
 		    DP_DVI_CONVERTER_ID_4,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 1e4794e2825caf2b23b330255b899ec6c5614a05..c1391bfb7a9bc58de9460bc778e2f4fac0120318 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -126,9 +126,7 @@ static void dpcd_set_training_pattern(
 static enum dc_dp_training_pattern decide_cr_training_pattern(
 		const struct dc_link_settings *link_settings)
 {
-	enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
-
-	return pattern;
+	return DP_TRAINING_PATTERN_SEQUENCE_1;
 }
 
 static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
@@ -3710,7 +3708,7 @@ bool detect_dp_sink_caps(struct dc_link *link)
 	/* TODO save sink caps in link->sink */
 }
 
-enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
+static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
 {
 	enum dc_link_rate link_rate;
 	// LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
@@ -4348,7 +4346,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
 	struct link_encoder *link_enc = link->link_enc;
 	uint8_t fec_config = 0;
 
-	if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+	if (!dc_link_should_enable_fec(link))
 		return;
 
 	if (link_enc->funcs->fec_set_ready &&
@@ -4383,7 +4381,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 {
 	struct link_encoder *link_enc = link->link_enc;
 
-	if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+	if (!dc_link_should_enable_fec(link))
 		return;
 
 	if (link_enc->funcs->fec_set_enable &&
@@ -4409,24 +4407,39 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 void dpcd_set_source_specific_data(struct dc_link *link)
 {
 	if (!link->dc->vendor_signature.is_valid) {
-		enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED;
-		struct dpcd_amd_signature amd_signature;
-		amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
-		amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
-		amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
-		amd_signature.device_id_byte1 =
+		enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
+		struct dpcd_amd_signature amd_signature = {0};
+		struct dpcd_amd_device_id amd_device_id = {0};
+
+		amd_device_id.device_id_byte1 =
 				(uint8_t)(link->ctx->asic_id.chip_id);
-		amd_signature.device_id_byte2 =
+		amd_device_id.device_id_byte2 =
 				(uint8_t)(link->ctx->asic_id.chip_id >> 8);
-		memset(&amd_signature.zero, 0, 4);
-		amd_signature.dce_version =
+		amd_device_id.dce_version =
 				(uint8_t)(link->ctx->dce_version);
-		amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
-		amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+		amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
+		amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
 
-		core_link_write_dpcd(link, DP_SOURCE_OUI,
+		core_link_read_dpcd(link, DP_SOURCE_OUI,
+				(uint8_t *)(&amd_signature),
+				sizeof(amd_signature));
+
+		if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
+			(amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
+			(amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
+
+			amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+			amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+			amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+
+			core_link_write_dpcd(link, DP_SOURCE_OUI,
 				(uint8_t *)(&amd_signature),
 				sizeof(amd_signature));
+		}
+
+		core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
+				(uint8_t *)(&amd_device_id),
+				sizeof(amd_device_id));
 
 		if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
 			link->dc->caps.min_horizontal_blanking_period != 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 07c22556480bec65c5f522b79117e2bf61b23d57..0c26c2ade782c4ea69713e814d186e06ca755e39 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1117,7 +1117,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
  * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
  * original h_border_left value in its calculation.
  */
-int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
 {
 	int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
 
@@ -1128,8 +1128,8 @@ int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
 	return store_h_border_left;
 }
 
-void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
-                                  int store_h_border_left)
+static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+					 int store_h_border_left)
 {
 	pipe_ctx->stream->dst.x -= store_h_border_left;
 	pipe_ctx->stream->timing.h_border_left = store_h_border_left;
@@ -1153,8 +1153,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
 	calculate_viewport(pipe_ctx);
 
-	if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
-		pipe_ctx->plane_res.scl_data.viewport.width < 12) {
+	if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+		pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) {
 		if (store_h_border_left) {
 			restore_border_left_from_dst(pipe_ctx,
 				store_h_border_left);
@@ -1697,7 +1697,7 @@ static bool are_stream_backends_same(
 	return true;
 }
 
-/**
+/*
  * dc_is_stream_unchanged() - Compare two stream states for equivalence.
  *
  * Checks if there a difference between the two states
@@ -1718,7 +1718,7 @@ bool dc_is_stream_unchanged(
 	return true;
 }
 
-/**
+/*
  * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
  */
 bool dc_is_stream_scaling_unchanged(
@@ -1833,7 +1833,7 @@ static struct audio *find_first_free_audio(
 	return 0;
 }
 
-/**
+/*
  * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
  */
 enum dc_status dc_add_stream_to_ctx(
@@ -1860,7 +1860,7 @@ enum dc_status dc_add_stream_to_ctx(
 	return res;
 }
 
-/**
+/*
  * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
  */
 enum dc_status dc_remove_stream_from_ctx(
@@ -2075,6 +2075,20 @@ static int acquire_resource_from_hw_enabled_state(
 	return -1;
 }
 
+static void mark_seamless_boot_stream(
+		const struct dc  *dc,
+		struct dc_stream_state *stream)
+{
+	struct dc_bios *dcb = dc->ctx->dc_bios;
+
+	/* TODO: Check Linux */
+	if (dc->config.allow_seamless_boot_optimization &&
+			!dcb->funcs->is_accelerated_mode(dcb)) {
+		if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+			stream->apply_seamless_boot_optimization = true;
+	}
+}
+
 enum dc_status resource_map_pool_resources(
 		const struct dc  *dc,
 		struct dc_state *context,
@@ -2085,22 +2099,20 @@ enum dc_status resource_map_pool_resources(
 	struct dc_context *dc_ctx = dc->ctx;
 	struct pipe_ctx *pipe_ctx = NULL;
 	int pipe_idx = -1;
-	struct dc_bios *dcb = dc->ctx->dc_bios;
 
 	calculate_phy_pix_clks(stream);
 
-	/* TODO: Check Linux */
-	if (dc->config.allow_seamless_boot_optimization &&
-			!dcb->funcs->is_accelerated_mode(dcb)) {
-		if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
-			stream->apply_seamless_boot_optimization = true;
-	}
+	mark_seamless_boot_stream(dc, stream);
 
-	if (stream->apply_seamless_boot_optimization)
+	if (stream->apply_seamless_boot_optimization) {
 		pipe_idx = acquire_resource_from_hw_enabled_state(
 				&context->res_ctx,
 				pool,
 				stream);
+		if (pipe_idx < 0)
+			/* hw resource was assigned to other stream */
+			stream->apply_seamless_boot_optimization = false;
+	}
 
 	if (pipe_idx < 0)
 		/* acquire new resources */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index c103f858375d060f6cdd79c1ab4fc058edbc1978..25fa712a7847464b4f4215d631d6ba8a1e8e4919 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -244,7 +244,7 @@ struct dc_stream_status *dc_stream_get_status(
 }
 
 #ifndef TRIM_FSFT
-/**
+/*
  * dc_optimize_timing_for_fsft() - dc to optimize timing
  */
 bool dc_optimize_timing_for_fsft(
@@ -260,8 +260,7 @@ bool dc_optimize_timing_for_fsft(
 }
 #endif
 
-
-/**
+/*
  * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
  */
 bool dc_stream_set_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 3d7d27435f15eb099d7061a5e4707a52848c8d47..e6b9c6a718413a6cd2570c879ca11c52cf8775f8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -115,7 +115,7 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
 	return plane_state;
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: dc_plane_get_status
  *
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aedadb34548ef2be2f33ae6fed2a6a530047eda..4eee3a55fa3018ad0f65bfe50f1e949fbf4de7ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,12 +42,13 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.116"
+#define DC_VER "3.2.122"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
 #define MAX_STREAMS 6
 #define MAX_SINKS_PER_LINK 4
+#define MIN_VIEWPORT_SIZE 12
 
 /*******************************************************************************
  * Display Core Interfaces
@@ -171,6 +172,9 @@ struct dc_caps {
 	bool dmcub_support;
 	uint32_t num_of_internal_disp;
 	enum dp_protocol_version max_dp_protocol_version;
+	unsigned int mall_size_per_mem_channel;
+	unsigned int mall_size_total;
+	unsigned int cursor_cache_size;
 	struct dc_plane_cap planes[MAX_PLANES];
 	struct dc_color_caps color;
 };
@@ -481,7 +485,6 @@ struct dc_debug_options {
 	bool performance_trace;
 	bool az_endpoint_mute_only;
 	bool always_use_regamma;
-	bool p010_mpo_support;
 	bool recovery_enabled;
 	bool avoid_vbios_exec_table;
 	bool scl_reset_length10;
@@ -499,6 +502,9 @@ struct dc_debug_options {
 	bool dmcub_emulation;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 	bool disable_idle_power_optimizations;
+	unsigned int mall_size_override;
+	unsigned int mall_additional_timer_percent;
+	bool mall_error_as_fatal;
 #endif
 	bool dmub_command_table; /* for testing only */
 	struct dc_bw_validation_profile bw_val_profile;
@@ -521,7 +527,6 @@ struct dc_debug_options {
 	bool usbc_combo_phy_reset_wa;
 	bool disable_dsc;
 	bool enable_dram_clock_change_one_display_vactive;
-	bool force_ignore_link_settings;
 	union mem_low_power_enable_options enable_mem_low_power;
 };
 
@@ -633,7 +638,6 @@ struct dc {
 
 	const char *build_id;
 	struct vm_helper *vm_helper;
-	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
 };
 
 enum frame_buffer_mode {
@@ -671,16 +675,10 @@ struct dc_init_data {
 	struct dc_config flags;
 	uint64_t log_mask;
 
-	/**
-	 * gpu_info FW provided soc bounding box struct or 0 if not
-	 * available in FW
-	 */
-	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
 	struct dpcd_vendor_signature vendor_signature;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 	bool force_smu_not_present;
 #endif
-	bool force_ignore_link_settings;
 };
 
 struct dc_callback_init {
@@ -1269,8 +1267,8 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32
 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
-						 struct dc_plane_state *plane);
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+				struct dc_cursor_attributes *cursor_attr);
 
 void dc_allow_idle_optimizations(struct dc *dc, bool allow);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 80a2191a3115ca55771aa75bd8bdfc88d152630c..cc6fb838420e8c00c7fe20cabd5b88147e55f2ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -451,6 +451,9 @@ struct dpcd_amd_signature {
 	uint8_t AMD_IEEE_TxSignature_byte1;
 	uint8_t AMD_IEEE_TxSignature_byte2;
 	uint8_t AMD_IEEE_TxSignature_byte3;
+};
+
+struct dpcd_amd_device_id {
 	uint8_t device_id_byte1;
 	uint8_t device_id_byte2;
 	uint8_t zero[4];
diff --git a/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
new file mode 100644
index 0000000000000000000000000000000000000000..0db5b49e9d5e9055b65e90e61ecee4501b6f4b8a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce/dce_dmcu.h"
+#include "dc_edid_parser.h"
+
+bool dc_edid_parser_send_cea(struct dc *dc,
+		int offset,
+		int total_length,
+		uint8_t *data,
+		int length)
+{
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+
+	if (dmcu &&
+	    dmcu->funcs->is_dmcu_initialized(dmcu) &&
+	    dmcu->funcs->send_edid_cea) {
+		return dmcu->funcs->send_edid_cea(dmcu,
+				offset,
+				total_length,
+				data,
+				length);
+	}
+
+	return false;
+}
+
+bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset)
+{
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+
+	if (dmcu &&
+	    dmcu->funcs->is_dmcu_initialized(dmcu) &&
+	    dmcu->funcs->recv_edid_cea_ack) {
+		return dmcu->funcs->recv_edid_cea_ack(dmcu, offset);
+	}
+
+	return false;
+}
+
+bool dc_edid_parser_recv_amd_vsdb(struct dc *dc,
+		int *version,
+		int *min_frame_rate,
+		int *max_frame_rate)
+{
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+
+	if (dmcu &&
+	    dmcu->funcs->is_dmcu_initialized(dmcu) &&
+	    dmcu->funcs->recv_amd_vsdb) {
+		return dmcu->funcs->recv_amd_vsdb(dmcu,
+				version,
+				min_frame_rate,
+				max_frame_rate);
+	}
+
+	return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h
new file mode 100644
index 0000000000000000000000000000000000000000..da67ec06f0a2f360369c24dc0e58c737aa01baa1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_EDID_PARSER_H_
+#define _DC_EDID_PARSER_H_
+
+#include "core_types.h"
+
+bool dc_edid_parser_send_cea(struct dc *dc,
+		int offset,
+		int total_length,
+		uint8_t *data,
+		int length);
+
+bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset);
+
+bool dc_edid_parser_recv_amd_vsdb(struct dc *dc,
+		int *version,
+		int *min_frame_rate,
+		int *max_frame_rate);
+
+#endif /* _DC_EDID_PARSER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 57edb25fc3812f5c31c821512f0418e09b044f49..a612ba6dc3898526c6ad925a52b96a067cd62382 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -34,6 +34,7 @@
 
 #include "dc.h"
 #include "dc_dmub_srv.h"
+#include "reg_helper.h"
 
 static inline void submit_dmub_read_modify_write(
 	struct dc_reg_helper_state *offload,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 701aa7178a896346809b1e6628544a3755184946..b41e6367b15ef967ad8e89c59dc8232e4df497de 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -71,6 +71,7 @@ struct dc_plane_address {
 	union {
 		struct{
 			PHYSICAL_ADDRESS_LOC addr;
+			PHYSICAL_ADDRESS_LOC cursor_cache_addr;
 			PHYSICAL_ADDRESS_LOC meta_addr;
 			union large_integer dcc_const_color;
 		} grph;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 6d9a60c9dcc070e1b6a06f9133c6f598d4c06817..e189f16bc026bbf2e670a991a365fd86dd281123 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -103,6 +103,8 @@ struct dc_link {
 	bool lttpr_non_transparent_mode;
 	bool is_internal_display;
 
+	bool edp_sink_present;
+
 	/* caps is the same as reported_link_cap. link_traing use
 	 * reported_link_cap. Will clean up.  TODO
 	 */
@@ -259,6 +261,13 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
 bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
 		union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
 
+/*
+ * On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
 enum dc_status read_hpd_rx_irq_data(
 	struct dc_link *link,
 	union hpd_irq_data *irq_data);
@@ -369,5 +378,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
 	const struct dc_crtc_timing *timing);
 
 bool dc_link_is_fec_supported(const struct dc_link *link);
+bool dc_link_should_enable_fec(const struct dc_link *link);
 
 #endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e243c01b9672eee14e92e384e300d61213c4a601..a4f7ec888c6762c8802178fd9490ca0084c12217 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -130,6 +130,14 @@ union stream_update_flags {
 	uint32_t raw;
 };
 
+struct test_pattern {
+	enum dp_test_pattern type;
+	enum dp_test_pattern_color_space color_space;
+	struct link_training_settings const *p_link_settings;
+	unsigned char const *p_custom_pattern;
+	unsigned int cust_pattern_size;
+};
+
 struct dc_stream_state {
 	// sink is deprecated, new code should not reference
 	// this pointer
@@ -227,6 +235,8 @@ struct dc_stream_state {
 
 	uint32_t stream_id;
 	bool is_dsc_enabled;
+
+	struct test_pattern test_pattern;
 	union stream_update_flags update_flags;
 };
 
@@ -261,6 +271,7 @@ struct dc_stream_update {
 	struct dc_dsc_config *dsc_config;
 	struct dc_transfer_func *func_shaper;
 	struct dc_3dlut *lut3d_func;
+	struct test_pattern *pending_test_pattern;
 };
 
 bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 2a2a0fdb925393d2aa97788d1eb843fceccb1f39..7866cf2a668fa2ae0501b0038d52fbae33905b06 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -868,7 +868,7 @@ void dce_aud_wall_dto_setup(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_aud_wall_dto_setup(
+static void dce60_aud_wall_dto_setup(
 	struct audio *audio,
 	enum signal_type signal,
 	const struct audio_crtc_info *crtc_info,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index cda5fd0464bc5309cc1ffa3c19b766be2d1ba196..d51b5fe91287ddbe015e4aba12a23652e19ac2a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -388,12 +388,6 @@ static enum aux_channel_operation_result get_channel_status(
 	}
 }
 
-enum i2caux_engine_type get_engine_type(
-		const struct dce_aux *engine)
-{
-	return I2CAUX_ENGINE_TYPE_AUX;
-}
-
 static bool acquire(
 	struct dce_aux *engine,
 	struct ddc *ddc)
@@ -582,7 +576,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
 	*operation_result = get_channel_status(aux_engine, &returned_bytes);
 
 	if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
-		int bytes_replied = 0;
+		int __maybe_unused bytes_replied = 0;
 		bytes_replied = read_channel_reply(aux_engine, payload->length,
 					 payload->data, payload->reply,
 					 &status);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index 382465862f2972482be42d6fe0935735910c25bc..277484cf853e483663df86a61bf7d1cf77747b1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -124,7 +124,6 @@ struct dce110_aux_registers {
 	AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
 	AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
 	AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
-	AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
 	AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
 	AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
 	AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index fb733f573715e05f8b496d52beb26015de2eb616..dec58b3c42e4d52badf8054933f43a18f5b19706 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -113,20 +113,19 @@ static const struct spread_spectrum_data *get_ss_data_entry(
 }
 
 /**
- * Function: calculate_fb_and_fractional_fb_divider
+ * calculate_fb_and_fractional_fb_divider - Calculates feedback and fractional
+ *                                          feedback dividers values
  *
- * * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+ * @calc_pll_cs:	    Pointer to clock source information
+ * @target_pix_clk_100hz:   Desired frequency in 100 Hz
+ * @ref_divider:            Reference divider (already known)
+ * @post_divider:           Post Divider (already known)
+ * @feedback_divider_param: Pointer where to store
+ *			    calculated feedback divider value
+ * @fract_feedback_divider_param: Pointer where to store
+ *			    calculated fract feedback divider value
  *
- *PARAMETERS:
- * targetPixelClock             Desired frequency in 100 Hz
- * ref_divider                  Reference divider (already known)
- * postDivider                  Post Divider (already known)
- * feedback_divider_param       Pointer where to store
- *					calculated feedback divider value
- * fract_feedback_divider_param Pointer where to store
- *					calculated fract feedback divider value
- *
- *RETURNS:
+ * return:
  * It fills the locations pointed by feedback_divider_param
  *					and fract_feedback_divider_param
  * It returns	- true if feedback divider not 0
@@ -175,22 +174,22 @@ static bool calculate_fb_and_fractional_fb_divider(
 }
 
 /**
-*calc_fb_divider_checking_tolerance
-*
-*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
-*		for passed Reference and Post divider, checking for tolerance.
-*PARAMETERS:
-* pll_settings		Pointer to structure
-* ref_divider		Reference divider (already known)
-* postDivider		Post Divider (already known)
-* tolerance		Tolerance for Calculated Pixel Clock to be within
-*
-*RETURNS:
-* It fills the PLLSettings structure with PLL Dividers values
-* if calculated values are within required tolerance
-* It returns	- true if error is within tolerance
-*		- false if error is not within tolerance
-*/
+ * calc_fb_divider_checking_tolerance - Calculates Feedback and
+ *                                      Fractional Feedback divider values
+ *		                        for passed Reference and Post divider,
+ *                                      checking for tolerance.
+ * @calc_pll_cs:	Pointer to clock source information
+ * @pll_settings:	Pointer to PLL settings
+ * @ref_divider:	Reference divider (already known)
+ * @post_divider:	Post Divider (already known)
+ * @tolerance:		Tolerance for Calculated Pixel Clock to be within
+ *
+ * return:
+ *  It fills the PLLSettings structure with PLL Dividers values
+ *  if calculated values are within required tolerance
+ *  It returns	- true if error is within tolerance
+ *		- false if error is not within tolerance
+ */
 static bool calc_fb_divider_checking_tolerance(
 		struct calc_pll_clock_source *calc_pll_cs,
 		struct pll_settings *pll_settings,
@@ -241,7 +240,7 @@ static bool calc_fb_divider_checking_tolerance(
 		pll_settings->calculated_pix_clk_100hz =
 			actual_calculated_clock_100hz;
 		pll_settings->vco_freq =
-			actual_calculated_clock_100hz * post_divider / 10;
+			div_u64((u64)actual_calculated_clock_100hz * post_divider, 10);
 		return true;
 	}
 	return false;
@@ -460,7 +459,7 @@ static bool pll_adjust_pix_clk(
 	return false;
 }
 
-/**
+/*
  * Calculate PLL Dividers for given Clock Value.
  * First will call VBIOS Adjust Exec table to check if requested Pixel clock
  * will be Adjusted based on usage.
@@ -871,6 +870,20 @@ static bool dce110_program_pix_clk(
 	bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
 					pll_settings->use_external_clk;
 
+	switch (pix_clk_params->color_depth) {
+	case COLOR_DEPTH_101010:
+		bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
+		break;
+	case COLOR_DEPTH_121212:
+		bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
+		break;
+	case COLOR_DEPTH_161616:
+		bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
+		break;
+	default:
+		break;
+	}
+
 	if (clk_src->bios->funcs->set_pixel_clock(
 			clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
 		return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index f3ed8b619cafd5b84b05ecc5c6e4cd64d059097a..ddc789daf3b13a38ad67c6d05509902f156cdadd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -57,6 +57,9 @@
 #define MCP_SYNC_PHY_LOCK 0x90
 #define MCP_SYNC_PHY_UNLOCK 0x91
 #define MCP_BL_SET_PWM_FRAC 0x6A  /* Enable or disable Fractional PWM */
+#define MCP_SEND_EDID_CEA 0xA0
+#define EDID_CEA_CMD_ACK 1
+#define EDID_CEA_CMD_NACK 2
 #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK   0x00000001L
 
 // PSP FW version
@@ -65,13 +68,17 @@
 //Register access policy version
 #define mmMP0_SMN_C2PMSG_91				0x1609B
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static const uint32_t abm_gain_stepsize = 0x0060;
+#endif
+
 static bool dce_dmcu_init(struct dmcu *dmcu)
 {
 	// Do nothing
 	return true;
 }
 
-bool dce_dmcu_load_iram(struct dmcu *dmcu,
+static bool dce_dmcu_load_iram(struct dmcu *dmcu,
 		unsigned int start_offset,
 		const char *src,
 		unsigned int bytes)
@@ -807,6 +814,120 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)
 	return true;
 }
 
+static bool dcn10_send_edid_cea(struct dmcu *dmcu,
+		int offset,
+		int total_length,
+		uint8_t *data,
+		int length)
+{
+	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+	uint32_t header, data1, data2;
+
+	/* If microcontroller is not running, do nothing */
+	if (dmcu->dmcu_state != DMCU_RUNNING)
+		return false;
+
+	if (length > 8 || length <= 0)
+		return false;
+
+	header = ((uint32_t)offset & 0xFFFF) << 16 | (total_length & 0xFFFF);
+	data1 = (((uint32_t)data[0]) << 24) | (((uint32_t)data[1]) << 16) |
+		(((uint32_t)data[2]) << 8) | ((uint32_t)data[3]);
+	data2 = (((uint32_t)data[4]) << 24) | (((uint32_t)data[5]) << 16) |
+		(((uint32_t)data[6]) << 8) | ((uint32_t)data[7]);
+
+	/* waitDMCUReadyForCmd */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+	/* setDMCUParam_Cmd */
+	REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SEND_EDID_CEA);
+
+	REG_WRITE(MASTER_COMM_DATA_REG1, header);
+	REG_WRITE(MASTER_COMM_DATA_REG2, data1);
+	REG_WRITE(MASTER_COMM_DATA_REG3, data2);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	/* waitDMCUReadyForCmd */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+	return true;
+}
+
+static bool dcn10_get_scp_results(struct dmcu *dmcu,
+		uint32_t *cmd,
+		uint32_t *data1,
+		uint32_t *data2,
+		uint32_t *data3)
+{
+	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+	/* If microcontroller is not running, do nothing */
+	if (dmcu->dmcu_state != DMCU_RUNNING)
+		return false;
+
+	*cmd = REG_READ(SLAVE_COMM_CMD_REG);
+	*data1 =  REG_READ(SLAVE_COMM_DATA_REG1);
+	*data2 =  REG_READ(SLAVE_COMM_DATA_REG2);
+	*data3 =  REG_READ(SLAVE_COMM_DATA_REG3);
+
+	/* clear SCP interrupt */
+	REG_UPDATE(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, 0);
+
+	return true;
+}
+
+static bool dcn10_recv_amd_vsdb(struct dmcu *dmcu,
+		int *version,
+		int *min_frame_rate,
+		int *max_frame_rate)
+{
+	uint32_t data[4];
+	int cmd, ack, len;
+
+	if (!dcn10_get_scp_results(dmcu, &data[0], &data[1], &data[2], &data[3]))
+		return false;
+
+	cmd = data[0] & 0x3FF;
+	len = (data[0] >> 10) & 0x3F;
+	ack = data[1];
+
+	if (cmd != MCP_SEND_EDID_CEA || ack != EDID_CEA_CMD_ACK || len != 12)
+		return false;
+
+	if ((data[2] & 0xFF)) {
+		*version = (data[2] >> 8) & 0xFF;
+		*min_frame_rate = (data[3] >> 16) & 0xFFFF;
+		*max_frame_rate = data[3] & 0xFFFF;
+		return true;
+	}
+
+	return false;
+}
+
+static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset)
+{
+	uint32_t data[4];
+	int cmd, ack;
+
+	if (!dcn10_get_scp_results(dmcu,
+				&data[0], &data[1], &data[2], &data[3]))
+		return false;
+
+	cmd = data[0] & 0x3FF;
+	ack = data[1];
+
+	if (cmd != MCP_SEND_EDID_CEA)
+		return false;
+
+	if (ack == EDID_CEA_CMD_ACK)
+		return true;
+
+	*offset = data[2]; /* nack */
+	return false;
+}
+
 #endif //(CONFIG_DRM_AMD_DC_DCN)
 
 static const struct dmcu_funcs dce_funcs = {
@@ -829,6 +950,9 @@ static const struct dmcu_funcs dcn10_funcs = {
 	.get_psr_state = dcn10_get_dmcu_psr_state,
 	.set_psr_wait_loop = dcn10_psr_wait_loop,
 	.get_psr_wait_loop = dcn10_get_psr_wait_loop,
+	.send_edid_cea = dcn10_send_edid_cea,
+	.recv_amd_vsdb = dcn10_recv_amd_vsdb,
+	.recv_edid_cea_ack = dcn10_recv_edid_cea_ack,
 	.is_dmcu_initialized = dcn10_is_dmcu_initialized
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 93e7f34d4775e164c02db133233790b9cba2487d..ff726b35ef6abd5abbd09477f7f0aabc3744abc3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -40,6 +40,10 @@
 	SR(MASTER_COMM_DATA_REG3), \
 	SR(MASTER_COMM_CMD_REG), \
 	SR(MASTER_COMM_CNTL_REG), \
+	SR(SLAVE_COMM_DATA_REG1), \
+	SR(SLAVE_COMM_DATA_REG2), \
+	SR(SLAVE_COMM_DATA_REG3), \
+	SR(SLAVE_COMM_CMD_REG), \
 	SR(DMCU_IRAM_RD_CTRL), \
 	SR(DMCU_IRAM_RD_DATA), \
 	SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
@@ -112,6 +116,7 @@
 	DMCU_SF(MASTER_COMM_CMD_REG, \
 			MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
 	DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+	DMCU_SF(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, mask_sh), \
 	DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
 			STATIC_SCREEN1_INT_TO_UC_EN, mask_sh), \
 	DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
@@ -179,6 +184,7 @@
 	type UC_IN_RESET; \
 	type MASTER_COMM_CMD_REG_BYTE0; \
 	type MASTER_COMM_INTERRUPT; \
+	type SLAVE_COMM_INTERRUPT; \
 	type DPHY_RX_FAST_TRAINING_CAPABLE; \
 	type DPHY_LOAD_BS_COUNT; \
 	type STATIC_SCREEN1_INT_TO_UC_EN; \
@@ -211,6 +217,11 @@ struct dce_dmcu_registers {
 	uint32_t MASTER_COMM_DATA_REG3;
 	uint32_t MASTER_COMM_CMD_REG;
 	uint32_t MASTER_COMM_CNTL_REG;
+	uint32_t SLAVE_COMM_DATA_REG1;
+	uint32_t SLAVE_COMM_DATA_REG2;
+	uint32_t SLAVE_COMM_DATA_REG3;
+	uint32_t SLAVE_COMM_CMD_REG;
+	uint32_t SLAVE_COMM_CNTL_REG;
 	uint32_t DMCU_IRAM_RD_CTRL;
 	uint32_t DMCU_IRAM_RD_DATA;
 	uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
@@ -317,6 +328,4 @@ struct dmcu *dcn21_dmcu_create(
 
 void dce_dmcu_destroy(struct dmcu **dmcu);
 
-static const uint32_t abm_gain_stepsize = 0x0060;
-
 #endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 7fbd92fbc63a98e9eb453e1c3030dbcdd521a526..a524f471e0d75c70bcf7222f59d7927ba8185d6b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -435,7 +435,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
 	return dce_i2c_hw;
 }
 
-enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
 	struct dce_i2c_hw *dce_i2c_hw,
 	uint32_t timeout,
 	enum i2c_channel_operation_result expected_result)
@@ -502,7 +502,7 @@ static uint32_t get_transaction_timeout_hw(
 	return period_timeout * num_of_clock_stretches;
 }
 
-bool dce_i2c_hw_engine_submit_payload(
+static bool dce_i2c_hw_engine_submit_payload(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct i2c_payload *payload,
 	bool middle_of_transaction,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index 87d8428df6c46a19ee1a229626d3be16f66ce17f..6846afd83701ba5f68af2a49cebaa9e94ac3dc0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -339,7 +339,7 @@ static bool start_sync_sw(
 	return false;
 }
 
-void dce_i2c_sw_engine_set_speed(
+static void dce_i2c_sw_engine_set_speed(
 	struct dce_i2c_sw *engine,
 	uint32_t speed)
 {
@@ -353,7 +353,7 @@ void dce_i2c_sw_engine_set_speed(
 		engine->clock_delay = 12;
 }
 
-bool dce_i2c_sw_engine_acquire_engine(
+static bool dce_i2c_sw_engine_acquire_engine(
 	struct dce_i2c_sw *engine,
 	struct ddc *ddc)
 {
@@ -397,7 +397,7 @@ bool dce_i2c_engine_acquire_sw(
 
 
 
-void dce_i2c_sw_engine_submit_channel_request(
+static void dce_i2c_sw_engine_submit_channel_request(
 	struct dce_i2c_sw *engine,
 	struct i2c_request_transaction_data *req)
 {
@@ -440,7 +440,8 @@ void dce_i2c_sw_engine_submit_channel_request(
 		I2C_CHANNEL_OPERATION_SUCCEEDED :
 		I2C_CHANNEL_OPERATION_FAILED;
 }
-bool dce_i2c_sw_engine_submit_payload(
+
+static bool dce_i2c_sw_engine_submit_payload(
 	struct dce_i2c_sw *engine,
 	struct i2c_payload *payload,
 	bool middle_of_transaction)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 210466b2d8631f0aadec3160b969cac8897e4604..1e77ffee71b30752006fc1202a2356d427f147f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1197,7 +1197,7 @@ void dce110_link_encoder_enable_dp_mst_output(
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
 /* enables DP PHY output */
-void dce60_link_encoder_enable_dp_output(
+static void dce60_link_encoder_enable_dp_output(
 	struct link_encoder *enc,
 	const struct dc_link_settings *link_settings,
 	enum clock_source_id clock_source)
@@ -1236,7 +1236,7 @@ void dce60_link_encoder_enable_dp_output(
 }
 
 /* enables DP PHY output in MST mode */
-void dce60_link_encoder_enable_dp_mst_output(
+static void dce60_link_encoder_enable_dp_mst_output(
 	struct link_encoder *enc,
 	const struct dc_link_settings *link_settings,
 	enum clock_source_id clock_source)
@@ -1426,7 +1426,7 @@ void dce110_link_encoder_dp_set_phy_pattern(
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
 /* set DP PHY test and training patterns */
-void dce60_link_encoder_dp_set_phy_pattern(
+static void dce60_link_encoder_dp_set_phy_pattern(
 	struct link_encoder *enc,
 	const struct encoder_set_dp_phy_pattern_param *param)
 {
@@ -1503,7 +1503,6 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
 	const struct link_mst_stream_allocation_table *table)
 {
 	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
-	uint32_t value0 = 0;
 	uint32_t value1 = 0;
 	uint32_t value2 = 0;
 	uint32_t slots = 0;
@@ -1604,7 +1603,7 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
 	do {
 		udelay(10);
 
-		value0 = REG_READ(DP_MSE_SAT_UPDATE);
+		REG_READ(DP_MSE_SAT_UPDATE);
 
 		REG_GET(DP_MSE_SAT_UPDATE,
 				DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index e459ae65aaf7641368eb3465caa078470820f83a..4600231da6cbb2637aa0469710ca1740ec604e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -97,7 +97,7 @@ enum {
 
 
 
-/**
+/*
  *	set_truncation
  *	1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
  *	2) enable truncation
@@ -142,7 +142,7 @@ static void set_truncation(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
  *	dce60_set_truncation
  *	1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
  *	2) enable truncation
@@ -183,7 +183,7 @@ static void dce60_set_truncation(
 }
 #endif
 
-/**
+/*
  *	set_spatial_dither
  *	1) set spatial dithering mode: pattern of seed
  *	2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
@@ -291,7 +291,7 @@ static void set_spatial_dither(
 		FMT_SPATIAL_DITHER_EN, 1);
 }
 
-/**
+/*
  *	SetTemporalDither (Frame Modulation)
  *	1) set temporal dither depth
  *	2) select pattern: from hard-coded pattern or programmable pattern
@@ -355,7 +355,7 @@ static void set_temporal_dither(
 		FMT_TEMPORAL_DITHER_EN, 1);
 }
 
-/**
+/*
  *	Set Clamping
  *	1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
  *		1 for 8 bpc
@@ -415,7 +415,7 @@ void dce110_opp_set_clamping(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
  *	Set Clamping for DCE6 parts
  *	1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
  *		1 for 8 bpc
@@ -424,7 +424,7 @@ void dce110_opp_set_clamping(
  *		7 for programable
  *	2) Enable clamp if Limited range requested
  */
-void dce60_opp_set_clamping(
+static void dce60_opp_set_clamping(
 	struct dce110_opp *opp110,
 	const struct clamping_and_pixel_encoding_params *params)
 {
@@ -465,7 +465,7 @@ void dce60_opp_set_clamping(
 }
 #endif
 
-/**
+/*
  *	set_pixel_encoding
  *
  *	Set Pixel Encoding
@@ -501,7 +501,7 @@ static void set_pixel_encoding(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
  *	dce60_set_pixel_encoding
  *	DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg
  *	Set Pixel Encoding
@@ -545,7 +545,7 @@ void dce110_opp_program_bit_depth_reduction(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_bit_depth_reduction(
+static void dce60_opp_program_bit_depth_reduction(
 	struct output_pixel_processor *opp,
 	const struct bit_depth_reduction_params *params)
 {
@@ -568,7 +568,7 @@ void dce110_opp_program_clamping_and_pixel_encoding(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_clamping_and_pixel_encoding(
+static void dce60_opp_program_clamping_and_pixel_encoding(
 	struct output_pixel_processor *opp,
 	const struct clamping_and_pixel_encoding_params *params)
 {
@@ -678,7 +678,7 @@ void dce110_opp_program_fmt(
 }
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_fmt(
+static void dce60_opp_program_fmt(
 	struct output_pixel_processor *opp,
 	struct bit_depth_reduction_params *fmt_bit_depth,
 	struct clamping_and_pixel_encoding_params *clamping)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index 4d484ef60f35727a41e56ea225ccdb337f617d28..bf1ffc3629c7feab74e462ee5e14a36d41010098 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,7 +111,6 @@ enum dce110_opp_reg_type {
 	OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
 	OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
 	OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
-	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
 	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
 	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
 	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
@@ -219,7 +218,6 @@ enum dce110_opp_reg_type {
 	OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
 	OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
 	OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
-	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
 	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
 	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
 	OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index 761fdfc1f5bd04effe3edfae9f6ee6379d6726bb..e9233923586316349952ad5dc9914be364044a74 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -50,16 +50,16 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
 {
 	uint64_t current_backlight;
 	uint32_t round_result;
-	uint32_t pwm_period_cntl, bl_period, bl_int_count;
-	uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+	uint32_t bl_period, bl_int_count;
+	uint32_t bl_pwm, fractional_duty_cycle_en;
 	uint32_t bl_period_mask, bl_pwm_mask;
 	struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
 
-	pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+	REG_READ(BL_PWM_PERIOD_CNTL);
 	REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
 	REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
 
-	bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+	REG_READ(BL_PWM_CNTL);
 	REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
 	REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index ada57f745fd764e87a325327446ce7c9b887778d..8d4263da59f2542c2df8f630dbe176ca9f82970c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -67,7 +67,6 @@ static void dce110_update_generic_info_packet(
 	uint32_t packet_index,
 	const struct dc_info_packet *info_packet)
 {
-	uint32_t regval;
 	/* TODOFPGA Figure out a proper number for max_retries polling for lock
 	 * use 50 for now.
 	 */
@@ -99,7 +98,7 @@ static void dce110_update_generic_info_packet(
 	}
 	/* choose which generic packet to use */
 	{
-		regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+		REG_READ(AFMT_VBI_PACKET_CONTROL);
 		REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
 				AFMT_GENERIC_INDEX, packet_index);
 	}
@@ -564,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
 	cntl.enable_dp_audio = enable_audio;
 	cntl.pixel_clock = actual_pix_clk_khz;
 	cntl.lanes_number = LANE_COUNT_FOUR;
+	cntl.color_depth = crtc_timing->display_color_depth;
 
 	if (enc110->base.bp->funcs->encoder_control(
 			enc110->base.bp, &cntl) != BP_RESULT_OK)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 130a0a0c8332918de5ce9541cad2cf4ca2537604..151dc7bf6d235676f196c2ca5837455499ffdfb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -493,7 +493,6 @@ static void dce60_transform_set_scaler(
 {
 	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
 	bool is_scaling_required;
-	bool filter_updated = false;
 	const uint16_t *coeffs_v, *coeffs_h;
 
 	/*Use whole line buffer memory always*/
@@ -558,7 +557,6 @@ static void dce60_transform_set_scaler(
 
 			xfm_dce->filter_v = coeffs_v;
 			xfm_dce->filter_h = coeffs_h;
-			filter_updated = true;
 		}
 	}
 
@@ -601,12 +599,12 @@ static void set_clamp(
 		clamp_max = 0x3FC0;
 		break;
 	case COLOR_DEPTH_101010:
-		/* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
-		clamp_max = 0x3FFC;
+		/* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
+		clamp_max = 0x3FF0;
 		break;
 	case COLOR_DEPTH_121212:
-		/* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
-		clamp_max = 0x3FFF;
+		/* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+		clamp_max = 0x3FFC;
 		break;
 	default:
 		clamp_max = 0x3FC0;
@@ -1037,34 +1035,23 @@ static void dce60_transform_set_pixel_storage_depth(
 	const struct bit_depth_reduction_params *bit_depth_params)
 {
 	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
-	int pixel_depth, expan_mode;
 	enum dc_color_depth color_depth;
 
 	switch (depth) {
 	case LB_PIXEL_DEPTH_18BPP:
 		color_depth = COLOR_DEPTH_666;
-		pixel_depth = 2;
-		expan_mode  = 1;
 		break;
 	case LB_PIXEL_DEPTH_24BPP:
 		color_depth = COLOR_DEPTH_888;
-		pixel_depth = 1;
-		expan_mode  = 1;
 		break;
 	case LB_PIXEL_DEPTH_30BPP:
 		color_depth = COLOR_DEPTH_101010;
-		pixel_depth = 0;
-		expan_mode  = 1;
 		break;
 	case LB_PIXEL_DEPTH_36BPP:
 		color_depth = COLOR_DEPTH_121212;
-		pixel_depth = 3;
-		expan_mode  = 0;
 		break;
 	default:
 		color_depth = COLOR_DEPTH_101010;
-		pixel_depth = 0;
-		expan_mode  = 1;
 		BREAK_TO_DEBUGGER();
 		break;
 	}
@@ -1113,7 +1100,7 @@ static void program_gamut_remap(
 
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: dal_transform_wide_gamut_set_gamut_remap
  *
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 0cf130dc4e520cae0310388b4290d12fbd6777df..453aaa5757bd003b90610dd331b051f858350c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -57,6 +57,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
 	union dmub_rb_cmd cmd;
 	uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
 	cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
 	cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
@@ -135,6 +136,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
 	union dmub_rb_cmd cmd;
 	struct dc_context *dc = abm->ctx;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.abm_set_level.header.type = DMUB_CMD__ABM;
 	cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
 	cmd.abm_set_level.abm_set_level_data.level = level;
@@ -160,6 +162,7 @@ static bool dmub_abm_init_config(struct abm *abm,
 	// Copy iramtable into cw7
 	memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
 
+	memset(&cmd, 0, sizeof(cmd));
 	// Fw will copy from cw7 to fw_state
 	cmd.abm_init_config.header.type = DMUB_CMD__ABM;
 	cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d399270fd17e05c33792483f37d2d5e3fc5294ce..c97ee5abc0efd9f442964bff80857a04d78d132c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -33,8 +33,9 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
 				union dmub_hw_lock_flags *hw_locks,
 				struct dmub_hw_lock_inst_flags *inst_flags)
 {
-	union dmub_rb_cmd cmd = { 0 };
+	union dmub_rb_cmd cmd;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK;
 	cmd.lock_hw.header.sub_type = 0;
 	cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 17e84f34ceba1d8c708670ffb3c63f9c4702f7df..69e34bef274c1a28c177e0c5824247e2fc3524be 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -31,7 +31,7 @@
 
 #define MAX_PIPES 6
 
-/**
+/*
  * Convert dmcub psr state to dmcu psr state.
  */
 static enum dc_psr_state convert_psr_state(uint32_t raw_state)
@@ -74,7 +74,7 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
 	return state;
 }
 
-/**
+/*
  * Get PSR state from firmware.
  */
 static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
@@ -90,7 +90,7 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
 	*state = convert_psr_state(raw_state);
 }
 
-/**
+/*
  * Set PSR version.
  */
 static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
@@ -101,6 +101,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
 	if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
 		return false;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.psr_set_version.header.type = DMUB_CMD__PSR;
 	cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
 	switch (stream->link->psr_settings.psr_version) {
@@ -121,7 +122,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
 	return true;
 }
 
-/**
+/*
  * Enable/Disable PSR.
  */
 static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
@@ -131,7 +132,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
 	uint32_t retry_count;
 	enum dc_psr_state state = PSR_STATE0;
 
-
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.psr_enable.header.type = DMUB_CMD__PSR;
 
 	if (enable)
@@ -170,7 +171,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
 	}
 }
 
-/**
+/*
  * Set PSR level.
  */
 static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
@@ -184,6 +185,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
 	if (state == PSR_STATE0)
 		return;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.psr_set_level.header.type = DMUB_CMD__PSR;
 	cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
 	cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
@@ -194,7 +196,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
 	dc_dmub_srv_wait_idle(dc->dmub_srv);
 }
 
-/**
+/*
  * Setup PSR by programming phy registers and sending psr hw context values to firmware.
  */
 static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
@@ -233,6 +235,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
 	link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
 			psr_context->sdpTransmitLineNumDeadline);
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
 	cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
 	cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
@@ -277,7 +280,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
 	return true;
 }
 
-/**
+/*
  * Send command to PSR to force static ENTER and ignore all state changes until exit
  */
 static void dmub_psr_force_static(struct dmub_psr *dmub)
@@ -285,6 +288,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
 	union dmub_rb_cmd cmd;
 	struct dc_context *dc = dmub->ctx;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.psr_force_static.header.type = DMUB_CMD__PSR;
 	cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
 	cmd.psr_enable.header.payload_bytes = 0;
@@ -294,7 +298,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
 	dc_dmub_srv_wait_idle(dc->dmub_srv);
 }
 
-/**
+/*
  * Get PSR residency from firmware.
  */
 static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency)
@@ -316,7 +320,7 @@ static const struct dmub_psr_funcs psr_funcs = {
 	.psr_get_residency		= dmub_psr_get_residency,
 };
 
-/**
+/*
  * Construct PSR object.
  */
 static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
@@ -325,7 +329,7 @@ static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
 	psr->funcs = &psr_funcs;
 }
 
-/**
+/*
  * Allocate and initialize PSR object.
  */
 struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
@@ -342,7 +346,7 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
 	return psr;
 }
 
-/**
+/*
  * Deallocate PSR object.
  */
 void dmub_psr_destroy(struct dmub_psr **dmub)
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
index a822d4e2a1693881f090d776f6fbc11ab68d5f24..ff20c47f559e309fe0b2c7a689e129d091f49d66 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -23,6 +23,8 @@
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
+CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
+
 DCE100 = dce100_resource.o dce100_hw_sequencer.o
 
 AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ab9d6c7980817e01e9edef156f6cf67080b00ef..635ef0e7c7826deb152b81329aba91b292ce4858 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -58,6 +58,8 @@
 #include "dce/dce_abm.h"
 #include "dce/dce_i2c.h"
 
+#include "dce100_resource.h"
+
 #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
 #include "gmc/gmc_8_2_d.h"
 #include "gmc/gmc_8_2_sh_mask.h"
@@ -385,7 +387,7 @@ static const struct dc_plane_cap plane_cap = {
 	.pixel_format_support = {
 			.argb8888 = true,
 			.nv12 = false,
-			.fp16 = false
+			.fp16 = true
 	},
 
 	.max_upscale_factor = {
@@ -611,7 +613,7 @@ static const struct encoder_feature_support link_enc_feature = {
 		.flags.bits.IS_TPS3_CAPABLE = true
 };
 
-struct link_encoder *dce100_link_encoder_create(
+static struct link_encoder *dce100_link_encoder_create(
 	const struct encoder_init_data *enc_init_data)
 {
 	struct dce110_link_encoder *enc110 =
@@ -650,7 +652,7 @@ static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_
 	return &panel_cntl->base;
 }
 
-struct output_pixel_processor *dce100_opp_create(
+static struct output_pixel_processor *dce100_opp_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -665,7 +667,7 @@ struct output_pixel_processor *dce100_opp_create(
 	return &opp->base;
 }
 
-struct dce_aux *dce100_aux_engine_create(
+static struct dce_aux *dce100_aux_engine_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -703,7 +705,7 @@ static const struct dce_i2c_mask i2c_masks = {
 		I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
-struct dce_i2c_hw *dce100_i2c_hw_create(
+static struct dce_i2c_hw *dce100_i2c_hw_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -718,7 +720,7 @@ struct dce_i2c_hw *dce100_i2c_hw_create(
 
 	return dce_i2c_hw;
 }
-struct clock_source *dce100_clock_source_create(
+static struct clock_source *dce100_clock_source_create(
 	struct dc_context *ctx,
 	struct dc_bios *bios,
 	enum clock_source_id id,
@@ -742,7 +744,7 @@ struct clock_source *dce100_clock_source_create(
 	return NULL;
 }
 
-void dce100_clock_source_destroy(struct clock_source **clk_src)
+static void dce100_clock_source_destroy(struct clock_source **clk_src)
 {
 	kfree(TO_DCE110_CLK_SRC(*clk_src));
 	*clk_src = NULL;
@@ -831,7 +833,7 @@ static enum dc_status build_mapped_resource(
 	return DC_OK;
 }
 
-bool dce100_validate_bandwidth(
+static bool dce100_validate_bandwidth(
 	struct dc  *dc,
 	struct dc_state *context,
 	bool fast_validate)
@@ -876,7 +878,7 @@ static bool dce100_validate_surface_sets(
 	return true;
 }
 
-enum dc_status dce100_validate_global(
+static enum dc_status dce100_validate_global(
 		struct dc  *dc,
 		struct dc_state *context)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index d564c0eb8b045393ae235c6169b6c85b6af94693..84ab48df0c26178c47a1aea3529657fbad19a4ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,6 +23,8 @@
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+
 DCE110 = dce110_timing_generator.o \
 dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
 dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 72b580a4eb856830b278cd917e56f3cd36453ae9..44564a4742b52331cea63bd7ae3143623edfe4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -412,36 +412,6 @@ void dce110_compressor_destroy(struct compressor **compressor)
 	*compressor = NULL;
 }
 
-bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
-						struct fbc_requested_compressed_size size)
-{
-	bool result = false;
-
-	unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
-
-	get_max_support_fbc_buffersize(&max_x, &max_y);
-
-	if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
-		/*
-		 * For DCE11 here use Max HW supported size:  HW Support up to 3840x2400 resolution
-		 * or 18000 chunks.
-		 */
-		size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4;  /* (For FBC when LPT not supported). */
-		size.preferred_size_alignment = size.min_size_alignment = 0x100;       /* For FBC when LPT not supported */
-		size.bits.preferred_must_be_framebuffer_pool = 1;
-		size.bits.min_must_be_framebuffer_pool = 1;
-
-		result = true;
-	}
-	/*
-	 * Maybe to add registry key support with optional size here to override above
-	 * for debugging purposes
-	 */
-
-	return result;
-}
-
-
 void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
 {
 	*max_x = FBC_MAX_X;
@@ -455,31 +425,6 @@ void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
 	 */
 }
 
-
-unsigned int controller_id_to_index(enum controller_id controller_id)
-{
-	unsigned int index = 0;
-
-	switch (controller_id) {
-	case CONTROLLER_ID_D0:
-		index = 0;
-		break;
-	case CONTROLLER_ID_D1:
-		index = 1;
-		break;
-	case CONTROLLER_ID_D2:
-		index = 2;
-		break;
-	case CONTROLLER_ID_D3:
-		index = 3;
-		break;
-	default:
-		break;
-	}
-	return index;
-}
-
-
 static const struct compressor_funcs dce110_compressor_funcs = {
 	.power_up_fbc = dce110_compressor_power_up_fbc,
 	.enable_fbc = dce110_compressor_enable_fbc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4c230f1de9a30e7daf1c1c828036213aa4cdb7c8..caee1c9f54bdb2c873913b5632efc2fc9de384fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -921,6 +921,37 @@ void dce110_edp_power_control(
 	}
 }
 
+void dce110_edp_wait_for_T12(
+		struct dc_link *link)
+{
+	struct dc_context *ctx = link->ctx;
+
+	if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
+			!= CONNECTOR_ID_EDP) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	if (!link->panel_cntl)
+		return;
+
+	if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
+			link->link_trace.time_stamp.edp_poweroff != 0) {
+		unsigned int t12_duration = 500; // Default T12 as per spec
+		unsigned long long current_ts = dm_get_timestamp(ctx);
+		unsigned long long time_since_edp_poweroff_ms =
+				div64_u64(dm_get_elapse_time_in_ns(
+						ctx,
+						current_ts,
+						link->link_trace.time_stamp.edp_poweroff), 1000000);
+
+		t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+
+		if (time_since_edp_poweroff_ms < t12_duration)
+			msleep(t12_duration - time_since_edp_poweroff_ms);
+	}
+}
+
 /*todo: cloned in stream enc, fix*/
 /*
  * @brief
@@ -1628,7 +1659,7 @@ static struct dc_link *get_edp_link_with_sink(
 	return link;
 }
 
-/**
+/*
  * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
  *  1. Power down all DC HW blocks
  *  2. Disable VGA engine on all controllers
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index d54172d88f5f3b826f917560a286b2f6d75a5579..8bbb499067f747a168178e54bb5859200dd8d65f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -34,6 +34,7 @@
 #include "inc/dce_calcs.h"
 
 #include "dce/dce_mem_input.h"
+#include "dce110_mem_input_v.h"
 
 static void set_flip_control(
 	struct dce_mem_input *mem_input110,
@@ -468,7 +469,7 @@ static void program_pixel_format(
 	}
 }
 
-bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+static bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
 {
 	struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
 	uint32_t value;
@@ -483,7 +484,7 @@ bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
 	return false;
 }
 
-bool dce_mem_input_v_program_surface_flip_and_addr(
+static bool dce_mem_input_v_program_surface_flip_and_addr(
 	struct mem_input *mem_input,
 	const struct dc_plane_address *address,
 	bool flip_immediate)
@@ -560,7 +561,7 @@ static const unsigned int *get_dvmm_hw_setting(
 	}
 }
 
-void dce_mem_input_v_program_pte_vm(
+static void dce_mem_input_v_program_pte_vm(
 		struct mem_input *mem_input,
 		enum surface_pixel_format format,
 		union dc_tiling_info *tiling_info,
@@ -633,7 +634,7 @@ void dce_mem_input_v_program_pte_vm(
 	dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
 }
 
-void dce_mem_input_v_program_surface_config(
+static void dce_mem_input_v_program_surface_config(
 	struct mem_input *mem_input,
 	enum surface_pixel_format format,
 	union dc_tiling_info *tiling_info,
@@ -919,7 +920,7 @@ static void program_nbp_watermark_c(
 			marks);
 }
 
-void dce_mem_input_v_program_display_marks(
+static void dce_mem_input_v_program_display_marks(
 	struct mem_input *mem_input,
 	struct dce_watermarks nbp,
 	struct dce_watermarks stutter,
@@ -942,7 +943,7 @@ void dce_mem_input_v_program_display_marks(
 
 }
 
-void dce_mem_input_program_chroma_display_marks(
+static void dce_mem_input_program_chroma_display_marks(
 	struct mem_input *mem_input,
 	struct dce_watermarks nbp,
 	struct dce_watermarks stutter,
@@ -963,7 +964,7 @@ void dce_mem_input_program_chroma_display_marks(
 		stutter);
 }
 
-void dce110_allocate_mem_input_v(
+static void dce110_allocate_mem_input_v(
 	struct mem_input *mi,
 	uint32_t h_total,/* for current stream */
 	uint32_t v_total,/* for current stream */
@@ -1005,7 +1006,7 @@ void dce110_allocate_mem_input_v(
 
 }
 
-void dce110_free_mem_input_v(
+static void dce110_free_mem_input_v(
 	struct mem_input *mi,
 	uint32_t total_stream_num)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 3f63822b8e28c6f7b696eb938ce952b298d46a7e..d7fcc5cccdce11a6f7a8d65f3db108916754618d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -410,7 +410,7 @@ static const struct dc_plane_cap plane_cap = {
 		.pixel_format_support = {
 				.argb8888 = true,
 				.nv12 = false,
-				.fp16 = false
+				.fp16 = true
 		},
 
 		.max_upscale_factor = {
@@ -715,7 +715,7 @@ static struct output_pixel_processor *dce110_opp_create(
 	return &opp->base;
 }
 
-struct dce_aux *dce110_aux_engine_create(
+static struct dce_aux *dce110_aux_engine_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -753,7 +753,7 @@ static const struct dce_i2c_mask i2c_masks = {
 		I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
 };
 
-struct dce_i2c_hw *dce110_i2c_hw_create(
+static struct dce_i2c_hw *dce110_i2c_hw_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -768,7 +768,7 @@ struct dce_i2c_hw *dce110_i2c_hw_create(
 
 	return dce_i2c_hw;
 }
-struct clock_source *dce110_clock_source_create(
+static struct clock_source *dce110_clock_source_create(
 	struct dc_context *ctx,
 	struct dc_bios *bios,
 	enum clock_source_id id,
@@ -792,7 +792,7 @@ struct clock_source *dce110_clock_source_create(
 	return NULL;
 }
 
-void dce110_clock_source_destroy(struct clock_source **clk_src)
+static void dce110_clock_source_destroy(struct clock_source **clk_src)
 {
 	struct dce110_clk_src *dce110_clk_src;
 
@@ -1034,8 +1034,8 @@ static bool dce110_validate_bandwidth(
 	return result;
 }
 
-enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
-				     struct dc_caps *caps)
+static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+					    struct dc_caps *caps)
 {
 	if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
 	    ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
@@ -1089,7 +1089,7 @@ static bool dce110_validate_surface_sets(
 	return true;
 }
 
-enum dc_status dce110_validate_global(
+static enum dc_status dce110_validate_global(
 		struct dc *dc,
 		struct dc_state *context)
 {
@@ -1272,7 +1272,6 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
 
 	/* update the public caps to indicate an underlay is available */
 	ctx->dc->caps.max_slave_planes = 1;
-	ctx->dc->caps.max_slave_planes = 1;
 
 	return true;
 }
@@ -1333,7 +1332,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 		1000);
 }
 
-const struct resource_caps *dce110_resource_cap(
+static const struct resource_caps *dce110_resource_cap(
 	struct hw_asic_id *asic_id)
 {
 	if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 1ea7db8eeb988469fd13fe4d3ca4be2f94cf1a03..d88a74559edd75ae59a10b248aefd95cc2455460 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -75,7 +75,7 @@ static void dce110_timing_generator_apply_front_porch_workaround(
 	}
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: is_in_vertical_blank
  *
@@ -116,7 +116,7 @@ void dce110_timing_generator_set_early_control(
 	dm_write_reg(tg->ctx, address, regval);
 }
 
-/**
+/*
  * Enable CRTC
  * Enable CRTC - call ASIC Control Object to enable Timing generator.
  */
@@ -175,7 +175,7 @@ void dce110_timing_generator_program_blank_color(
 	dm_write_reg(tg->ctx, addr, value);
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: disable_stereo
  *
@@ -226,7 +226,7 @@ static void disable_stereo(struct timing_generator *tg)
 }
 #endif
 
-/**
+/*
  * disable_crtc - call ASIC Control Object to disable Timing generator.
  */
 bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
@@ -247,11 +247,10 @@ bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
 	return result == BP_RESULT_OK;
 }
 
-/**
-* program_horz_count_by_2
-* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
-*
-*/
+/*
+ * program_horz_count_by_2
+ * Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+ */
 static void program_horz_count_by_2(
 	struct timing_generator *tg,
 	const struct dc_crtc_timing *timing)
@@ -273,7 +272,7 @@ static void program_horz_count_by_2(
 			CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
 }
 
-/**
+/*
  * program_timing_generator
  * Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
  * Call ASIC Control Object to program Timings.
@@ -352,7 +351,7 @@ bool dce110_timing_generator_program_timing_generator(
 	return result == BP_RESULT_OK;
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: set_drr
  *
@@ -521,7 +520,7 @@ uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
 	return field;
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: dce110_timing_generator_get_position
  *
@@ -557,7 +556,7 @@ void dce110_timing_generator_get_position(struct timing_generator *tg,
 			CRTC_VERT_COUNT_NOM);
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: get_crtc_scanoutpos
  *
@@ -1106,11 +1105,11 @@ void dce110_timing_generator_set_test_pattern(
 	}
 }
 
-/**
-* dce110_timing_generator_validate_timing
-* The timing generators support a maximum display size of is 8192 x 8192 pixels,
-* including both active display and blanking periods. Check H Total and V Total.
-*/
+/*
+ * dce110_timing_generator_validate_timing
+ * The timing generators support a maximum display size of is 8192 x 8192 pixels,
+ * including both active display and blanking periods. Check H Total and V Total.
+ */
 bool dce110_timing_generator_validate_timing(
 	struct timing_generator *tg,
 	const struct dc_crtc_timing *timing,
@@ -1167,9 +1166,9 @@ bool dce110_timing_generator_validate_timing(
 	return true;
 }
 
-/**
-* Wait till we are at the beginning of VBlank.
-*/
+/*
+ * Wait till we are at the beginning of VBlank.
+ */
 void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
 {
 	/* We want to catch beginning of VBlank here, so if the first try are
@@ -1191,9 +1190,9 @@ void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
 	}
 }
 
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
 void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
 {
 	while (dce110_timing_generator_is_in_vertical_blank(tg)) {
@@ -1204,7 +1203,7 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
 	}
 }
 
-/**
+/*
  *****************************************************************************
  *  Function: dce110_timing_generator_setup_global_swap_lock
  *
@@ -1215,7 +1214,6 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
  *  @param [in] gsl_params: setup data
  *****************************************************************************
  */
-
 void dce110_timing_generator_setup_global_swap_lock(
 	struct timing_generator *tg,
 	const struct dcp_gsl_params *gsl_params)
@@ -1351,10 +1349,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
 
 	/* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
 	{
-		uint32_t value_crtc_vtotal;
-
-		value_crtc_vtotal = dm_read_reg(tg->ctx,
-				CRTC_REG(mmCRTC_V_TOTAL));
+		dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_TOTAL));
 
 		set_reg_field_value(value,
 				0,
@@ -1385,7 +1380,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
 
 	dm_write_reg(tg->ctx, address, value);
 }
-/**
+/*
  *****************************************************************************
  *  Function: is_counter_moving
  *
@@ -1767,7 +1762,7 @@ void dce110_timing_generator_disable_reset_trigger(
 	dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
 }
 
-/**
+/*
  *****************************************************************************
  *  @brief
  *     Checks whether CRTC triggered reset occurred
@@ -1794,7 +1789,7 @@ bool dce110_timing_generator_did_triggered_reset_occur(
 	return (force || vert_sync);
 }
 
-/**
+/*
  * dce110_timing_generator_disable_vga
  * Turn OFF VGA Mode and Timing  - DxVGA_CONTROL
  * VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
@@ -1840,14 +1835,13 @@ void dce110_timing_generator_disable_vga(
 	dm_write_reg(tg->ctx, addr, value);
 }
 
-/**
-* set_overscan_color_black
-*
-* @param :black_color is one of the color space
-*    :this routine will set overscan black color according to the color space.
-* @return none
-*/
-
+/*
+ * set_overscan_color_black
+ *
+ * @param :black_color is one of the color space
+ *    :this routine will set overscan black color according to the color space.
+ * @return none
+ */
 void dce110_timing_generator_set_overscan_color_black(
 	struct timing_generator *tg,
 	const struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index a13a2f58944e3d99861405dbec30acf3cf85b0ef..c509384fff5439b22ba8484cf72f3ee176526732 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -46,17 +46,16 @@
  *
  **********************************************************************************/
 
-/**
-* Enable CRTCV
-*/
+/*
+ * Enable CRTCV
+ */
 
 static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
 {
 /*
-* Set MASTER_UPDATE_MODE to 0
-* This is needed for DRR, and also suggested to be default value by Syed.
-*/
-
+ * Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.
+ */
 	uint32_t value;
 
 	value = 0;
@@ -209,9 +208,9 @@ static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *t
 	}
 }
 
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
 static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
 {
 	while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index b1aaab5590cc6cd185dd9adec41f83dfe4cf765b..29438c6050dbbf5529f88ea05545d9d69f34eb0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -217,16 +217,15 @@ static bool setup_scaling_configuration(
 	return is_scaling_needed;
 }
 
-/**
-* Function:
-* void program_overscan
-*
-* Purpose: Programs overscan border
-* Input:   overscan
-*
-* Output:
-   void
-*/
+/*
+ * Function:
+ * void program_overscan
+ *
+ * Purpose: Programs overscan border
+ * Input:   overscan
+ *
+ * Output: void
+ */
 static void program_overscan(
 		struct dce_transform *xfm_dce,
 		const struct scaler_data *data)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 8e090446d511937361db77ed56471a44d6ed897b..9de6501702d2c559e5ad6f195ce8c4aa20be662f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,6 +23,8 @@
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+
 DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
 dce112_resource.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index f99b1c0845908819ce7c28ee15189a70ea185c0f..ee55cda854bf41178db799e7cf6e152b15c99486 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -59,7 +59,9 @@
 #include "dce/dce_11_2_sh_mask.h"
 
 #include "dce100/dce100_resource.h"
-#define DC_LOGGER \
+#include "dce112_resource.h"
+
+#define DC_LOGGER				\
 		dc->ctx->logger
 
 #ifndef mmDP_DPHY_INTERNAL_CTRL
@@ -617,7 +619,7 @@ static const struct encoder_feature_support link_enc_feature = {
 		.flags.bits.IS_TPS4_CAPABLE = true
 };
 
-struct link_encoder *dce112_link_encoder_create(
+static struct link_encoder *dce112_link_encoder_create(
 	const struct encoder_init_data *enc_init_data)
 {
 	struct dce110_link_encoder *enc110 =
@@ -671,7 +673,7 @@ static struct input_pixel_processor *dce112_ipp_create(
 	return &ipp->base;
 }
 
-struct output_pixel_processor *dce112_opp_create(
+static struct output_pixel_processor *dce112_opp_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -686,7 +688,7 @@ struct output_pixel_processor *dce112_opp_create(
 	return &opp->base;
 }
 
-struct dce_aux *dce112_aux_engine_create(
+static struct dce_aux *dce112_aux_engine_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -724,7 +726,7 @@ static const struct dce_i2c_mask i2c_masks = {
 		I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
 };
 
-struct dce_i2c_hw *dce112_i2c_hw_create(
+static struct dce_i2c_hw *dce112_i2c_hw_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -739,7 +741,7 @@ struct dce_i2c_hw *dce112_i2c_hw_create(
 
 	return dce_i2c_hw;
 }
-struct clock_source *dce112_clock_source_create(
+static struct clock_source *dce112_clock_source_create(
 	struct dc_context *ctx,
 	struct dc_bios *bios,
 	enum clock_source_id id,
@@ -763,7 +765,7 @@ struct clock_source *dce112_clock_source_create(
 	return NULL;
 }
 
-void dce112_clock_source_destroy(struct clock_source **clk_src)
+static void dce112_clock_source_destroy(struct clock_source **clk_src)
 {
 	kfree(TO_DCE110_CLK_SRC(*clk_src));
 	*clk_src = NULL;
@@ -1024,7 +1026,7 @@ enum dc_status dce112_add_stream_to_ctx(
 	return result;
 }
 
-enum dc_status dce112_validate_global(
+static enum dc_status dce112_validate_global(
 		struct dc *dc,
 		struct dc_state *context)
 {
@@ -1202,7 +1204,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
 }
 
-const struct resource_caps *dce112_resource_cap(
+static const struct resource_caps *dce112_resource_cap(
 	struct hw_asic_id *asic_id)
 {
 	if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 37db1f8d45ea547862b07b2503c4a55d6485c8ff..a9cc4b73270bb22330fd1af9fe48940cfdeecbb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,6 +24,8 @@
 # It provides the control and status of HW CRTC block.
 
 
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+
 DCE120 = dce120_resource.o dce120_timing_generator.o \
 dce120_hw_sequencer.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 66a13aa39c951ddcf696e6e44694c022d7b75f7c..d4afe6c824d2cb75974056f11ff21f874bdfb05b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -50,6 +50,7 @@ struct dce120_hw_seq_reg_offsets {
 	uint32_t crtc;
 };
 
+#if 0
 static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
 {
 	.crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
@@ -79,7 +80,6 @@ static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
 /*******************************************************************************
  * Private definitions
  ******************************************************************************/
-#if 0
 static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
 {
 	uint32_t addr;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index f1e3d2888eacc2887e64663fad5dc11aaa2c0314..c65e4d125c8e2e00ac5ea12aaae62695c0f3b506 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -423,7 +423,7 @@ static const struct dce110_clk_src_mask cs_mask = {
 		CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
 };
 
-struct output_pixel_processor *dce120_opp_create(
+static struct output_pixel_processor *dce120_opp_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -437,7 +437,7 @@ struct output_pixel_processor *dce120_opp_create(
 			     ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
 	return &opp->base;
 }
-struct dce_aux *dce120_aux_engine_create(
+static struct dce_aux *dce120_aux_engine_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -475,7 +475,7 @@ static const struct dce_i2c_mask i2c_masks = {
 		I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
 };
 
-struct dce_i2c_hw *dce120_i2c_hw_create(
+static struct dce_i2c_hw *dce120_i2c_hw_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 915fbb8e8168c54472f46ee35bc8978affbd834a..b57c466124e765c2543aba8bdd7cca253da148b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -69,7 +69,7 @@
 #define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3)	\
 		CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
 
-/**
+/*
  *****************************************************************************
  *  Function: is_in_vertical_blank
  *
@@ -98,7 +98,7 @@ static bool dce120_timing_generator_is_in_vertical_blank(
 
 
 /* determine if given timing can be supported by TG */
-bool dce120_timing_generator_validate_timing(
+static bool dce120_timing_generator_validate_timing(
 	struct timing_generator *tg,
 	const struct dc_crtc_timing *timing,
 	enum signal_type signal)
@@ -125,7 +125,7 @@ bool dce120_timing_generator_validate_timing(
 	return true;
 }
 
-bool dce120_tg_validate_timing(struct timing_generator *tg,
+static bool dce120_tg_validate_timing(struct timing_generator *tg,
 	const struct dc_crtc_timing *timing)
 {
 	return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
@@ -133,7 +133,7 @@ bool dce120_tg_validate_timing(struct timing_generator *tg,
 
 /******** HW programming ************/
 /* Disable/Enable Timing Generator */
-bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+static bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
 {
 	enum bp_result result;
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -153,7 +153,7 @@ bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
 	return result == BP_RESULT_OK;
 }
 
-void dce120_timing_generator_set_early_control(
+static void dce120_timing_generator_set_early_control(
 		struct timing_generator *tg,
 		uint32_t early_cntl)
 {
@@ -166,7 +166,7 @@ void dce120_timing_generator_set_early_control(
 /**************** TG current status ******************/
 
 /* return the current frame counter. Used by Linux kernel DRM */
-uint32_t dce120_timing_generator_get_vblank_counter(
+static uint32_t dce120_timing_generator_get_vblank_counter(
 		struct timing_generator *tg)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -181,7 +181,7 @@ uint32_t dce120_timing_generator_get_vblank_counter(
 }
 
 /* Get current H and V position */
-void dce120_timing_generator_get_crtc_position(
+static void dce120_timing_generator_get_crtc_position(
 	struct timing_generator *tg,
 	struct crtc_position *position)
 {
@@ -207,7 +207,7 @@ void dce120_timing_generator_get_crtc_position(
 }
 
 /* wait until TG is in beginning of vertical blank region */
-void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
 {
 	/* We want to catch beginning of VBlank here, so if the first try are
 	 * in VBlank, we might be very close to Active, in this case wait for
@@ -229,7 +229,7 @@ void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
 }
 
 /* wait until TG is in beginning of active region */
-void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
 {
 	while (dce120_timing_generator_is_in_vertical_blank(tg)) {
 		if (!tg->funcs->is_counter_moving(tg)) {
@@ -242,7 +242,7 @@ void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
 /*********** Timing Generator Synchronization routines ****/
 
 /* Setups Global Swap Lock group, TimingServer or TimingClient*/
-void dce120_timing_generator_setup_global_swap_lock(
+static void dce120_timing_generator_setup_global_swap_lock(
 	struct timing_generator *tg,
 	const struct dcp_gsl_params *gsl_params)
 {
@@ -279,7 +279,7 @@ void dce120_timing_generator_setup_global_swap_lock(
 }
 
 /* Clear all the register writes done by setup_global_swap_lock */
-void dce120_timing_generator_tear_down_global_swap_lock(
+static void dce120_timing_generator_tear_down_global_swap_lock(
 	struct timing_generator *tg)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -300,7 +300,7 @@ void dce120_timing_generator_tear_down_global_swap_lock(
 }
 
 /* Reset slave controllers on master VSync */
-void dce120_timing_generator_enable_reset_trigger(
+static void dce120_timing_generator_enable_reset_trigger(
 	struct timing_generator *tg,
 	int source)
 {
@@ -347,7 +347,7 @@ void dce120_timing_generator_enable_reset_trigger(
 }
 
 /* disabling trigger-reset */
-void dce120_timing_generator_disable_reset_trigger(
+static void dce120_timing_generator_disable_reset_trigger(
 	struct timing_generator *tg)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -367,7 +367,7 @@ void dce120_timing_generator_disable_reset_trigger(
 }
 
 /* Checks whether CRTC triggered reset occurred */
-bool dce120_timing_generator_did_triggered_reset_occur(
+static bool dce120_timing_generator_did_triggered_reset_occur(
 	struct timing_generator *tg)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -384,7 +384,7 @@ bool dce120_timing_generator_did_triggered_reset_occur(
 
 /******** Stuff to move to other virtual HW objects *****************/
 /* Move to enable accelerated mode */
-void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+static void dce120_timing_generator_disable_vga(struct timing_generator *tg)
 {
 	uint32_t offset = 0;
 	uint32_t value = 0;
@@ -425,7 +425,7 @@ void dce120_timing_generator_disable_vga(struct timing_generator *tg)
 }
 /* TODO: Should we move it to transform */
 /* Fully program CRTC timing in timing generator */
-void dce120_timing_generator_program_blanking(
+static void dce120_timing_generator_program_blanking(
 	struct timing_generator *tg,
 	const struct dc_crtc_timing *timing)
 {
@@ -485,7 +485,7 @@ void dce120_timing_generator_program_blanking(
 
 /* TODO: Should we move it to opp? */
 /* Combine with below and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_program_blank_color(
+static void dce120_timing_generator_program_blank_color(
 	struct timing_generator *tg,
 	const struct tg_color *black_color)
 {
@@ -498,7 +498,7 @@ void dce120_timing_generator_program_blank_color(
 		CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
 }
 /* Combine with above and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_set_overscan_color_black(
+static void dce120_timing_generator_set_overscan_color_black(
 	struct timing_generator *tg,
 	const struct tg_color *color)
 {
@@ -540,7 +540,7 @@ void dce120_timing_generator_set_overscan_color_black(
 	 */
 }
 
-void dce120_timing_generator_set_drr(
+static void dce120_timing_generator_set_drr(
 	struct timing_generator *tg,
 	const struct drr_params *params)
 {
@@ -589,50 +589,7 @@ void dce120_timing_generator_set_drr(
 	}
 }
 
-/**
- *****************************************************************************
- *  Function: dce120_timing_generator_get_position
- *
- *  @brief
- *     Returns CRTC vertical/horizontal counters
- *
- *  @param [out] position
- *****************************************************************************
- */
-void dce120_timing_generator_get_position(struct timing_generator *tg,
-	struct crtc_position *position)
-{
-	uint32_t value;
-	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
-
-	value = dm_read_reg_soc15(
-			tg->ctx,
-			mmCRTC0_CRTC_STATUS_POSITION,
-			tg110->offsets.crtc);
-
-	position->horizontal_count = get_reg_field_value(
-			value,
-			CRTC0_CRTC_STATUS_POSITION,
-			CRTC_HORZ_COUNT);
-
-	position->vertical_count = get_reg_field_value(
-			value,
-			CRTC0_CRTC_STATUS_POSITION,
-			CRTC_VERT_COUNT);
-
-	value = dm_read_reg_soc15(
-			tg->ctx,
-			mmCRTC0_CRTC_NOM_VERT_POSITION,
-			tg110->offsets.crtc);
-
-	position->nominal_vcount = get_reg_field_value(
-			value,
-			CRTC0_CRTC_NOM_VERT_POSITION,
-			CRTC_VERT_COUNT_NOM);
-}
-
-
-void dce120_timing_generator_get_crtc_scanoutpos(
+static void dce120_timing_generator_get_crtc_scanoutpos(
 	struct timing_generator *tg,
 	uint32_t *v_blank_start,
 	uint32_t *v_blank_end,
@@ -661,7 +618,7 @@ void dce120_timing_generator_get_crtc_scanoutpos(
 	*v_position = position.vertical_count;
 }
 
-void dce120_timing_generator_enable_advanced_request(
+static void dce120_timing_generator_enable_advanced_request(
 	struct timing_generator *tg,
 	bool enable,
 	const struct dc_crtc_timing *timing)
@@ -699,7 +656,7 @@ void dce120_timing_generator_enable_advanced_request(
 			value);
 }
 
-void dce120_tg_program_blank_color(struct timing_generator *tg,
+static void dce120_tg_program_blank_color(struct timing_generator *tg,
 	const struct tg_color *black_color)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -722,7 +679,7 @@ void dce120_tg_program_blank_color(struct timing_generator *tg,
 		value);
 }
 
-void dce120_tg_set_overscan_color(struct timing_generator *tg,
+static void dce120_tg_set_overscan_color(struct timing_generator *tg,
 	const struct tg_color *overscan_color)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -749,7 +706,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
 		dce120_timing_generator_program_blanking(tg, timing);
 }
 
-bool dce120_tg_is_blanked(struct timing_generator *tg)
+static bool dce120_tg_is_blanked(struct timing_generator *tg)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 	uint32_t value = dm_read_reg_soc15(
@@ -770,7 +727,7 @@ bool dce120_tg_is_blanked(struct timing_generator *tg)
 	return false;
 }
 
-void dce120_tg_set_blank(struct timing_generator *tg,
+static void dce120_tg_set_blank(struct timing_generator *tg,
 		bool enable_blanking)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -789,7 +746,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
 bool dce120_tg_validate_timing(struct timing_generator *tg,
 	const struct dc_crtc_timing *timing);
 
-void dce120_tg_wait_for_state(struct timing_generator *tg,
+static void dce120_tg_wait_for_state(struct timing_generator *tg,
 	enum crtc_state state)
 {
 	switch (state) {
@@ -806,7 +763,7 @@ void dce120_tg_wait_for_state(struct timing_generator *tg,
 	}
 }
 
-void dce120_tg_set_colors(struct timing_generator *tg,
+static void dce120_tg_set_colors(struct timing_generator *tg,
 	const struct tg_color *blank_color,
 	const struct tg_color *overscan_color)
 {
@@ -833,7 +790,7 @@ static void dce120_timing_generator_set_static_screen_control(
 			CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
 }
 
-void dce120_timing_generator_set_test_pattern(
+static void dce120_timing_generator_set_test_pattern(
 	struct timing_generator *tg,
 	/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
 	 * because this is not DP-specific (which is probably somewhere in DP
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index 7036c3bd0f871d8689af6a0a3b58cf247f9214f9..dda596fa1cd763e14ffb5f3e7cfe2c199d5a1344 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,6 +23,8 @@
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
+CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+
 DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
 	dce60_resource.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
index e9dd78c484d6e5fa6cfcbedcce73c6a9908b55de..dcfa0a3efa00d17510d558d6e0ddb5bc883126c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -60,6 +60,8 @@
 #include "dce/dce_i2c.h"
 /* TODO remove this include */
 
+#include "dce60_resource.h"
+
 #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
 #include "gmc/gmc_6_0_d.h"
 #include "gmc/gmc_6_0_sh_mask.h"
@@ -519,7 +521,7 @@ static struct output_pixel_processor *dce60_opp_create(
 	return &opp->base;
 }
 
-struct dce_aux *dce60_aux_engine_create(
+static struct dce_aux *dce60_aux_engine_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -557,7 +559,7 @@ static const struct dce_i2c_mask i2c_masks = {
 		I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
-struct dce_i2c_hw *dce60_i2c_hw_create(
+static struct dce_i2c_hw *dce60_i2c_hw_create(
 	struct dc_context *ctx,
 	uint32_t inst)
 {
@@ -573,7 +575,7 @@ struct dce_i2c_hw *dce60_i2c_hw_create(
 	return dce_i2c_hw;
 }
 
-struct dce_i2c_sw *dce60_i2c_sw_create(
+static struct dce_i2c_sw *dce60_i2c_sw_create(
 	struct dc_context *ctx)
 {
 	struct dce_i2c_sw *dce_i2c_sw =
@@ -707,7 +709,7 @@ static const struct encoder_feature_support link_enc_feature = {
 		.flags.bits.IS_TPS3_CAPABLE = true
 };
 
-struct link_encoder *dce60_link_encoder_create(
+static struct link_encoder *dce60_link_encoder_create(
 	const struct encoder_init_data *enc_init_data)
 {
 	struct dce110_link_encoder *enc110 =
@@ -746,7 +748,7 @@ static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_d
 	return &panel_cntl->base;
 }
 
-struct clock_source *dce60_clock_source_create(
+static struct clock_source *dce60_clock_source_create(
 	struct dc_context *ctx,
 	struct dc_bios *bios,
 	enum clock_source_id id,
@@ -770,7 +772,7 @@ struct clock_source *dce60_clock_source_create(
 	return NULL;
 }
 
-void dce60_clock_source_destroy(struct clock_source **clk_src)
+static void dce60_clock_source_destroy(struct clock_source **clk_src)
 {
 	kfree(TO_DCE110_CLK_SRC(*clk_src));
 	*clk_src = NULL;
@@ -860,7 +862,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
 	}
 }
 
-bool dce60_validate_bandwidth(
+static bool dce60_validate_bandwidth(
 	struct dc *dc,
 	struct dc_state *context,
 	bool fast_validate)
@@ -905,7 +907,7 @@ static bool dce60_validate_surface_sets(
 	return true;
 }
 
-enum dc_status dce60_validate_global(
+static enum dc_status dce60_validate_global(
 		struct dc *dc,
 		struct dc_state *context)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index fc1af0ff0ca4c5de361121f72cb545108cb31eba..c1a85ee374d9dab850a9f102217f2b3cb2cad4a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -189,8 +189,8 @@ static bool dce60_is_tg_enabled(struct timing_generator *tg)
 	return field == 1;
 }
 
-bool dce60_configure_crc(struct timing_generator *tg,
-			  const struct crc_params *params)
+static bool dce60_configure_crc(struct timing_generator *tg,
+				const struct crc_params *params)
 {
 	/* Cannot configure crc on a CRTC that is disabled */
 	if (!dce60_is_tg_enabled(tg))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 666fcb2bdbba00091a16170f7058a227f51d99f5..0a9d1a350d8bdf4b33c88aa63887cbcd0b9eed40 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,6 +23,8 @@
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+
 DCE80 = dce80_timing_generator.o dce80_hw_sequencer.o \
 	dce80_resource.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 390a0fa37239cab6440c49b1211a49e524389402..612450f992782ac9aae6602f568928ba89b26914 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -60,6 +60,8 @@
 #include "dce/dce_i2c.h"
 /* TODO remove this include */
 
+#include "dce80_resource.h"
+
 #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
 #include "gmc/gmc_7_1_d.h"
 #include "gmc/gmc_7_1_sh_mask.h"
@@ -402,7 +404,7 @@ static const struct dc_plane_cap plane_cap = {
 	.pixel_format_support = {
 			.argb8888 = true,
 			.nv12 = false,
-			.fp16 = false
+			.fp16 = true
 	},
 
 	.max_upscale_factor = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4d3f7d5e1473afa1f61b379729f6528eb5702c64..904c2d2789987bce097fa805182a49dc9675a71e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -577,7 +577,7 @@ void dpp1_power_on_degamma_lut(
 	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
 
 	REG_SET(CM_MEM_PWR_CTRL, 0,
-			SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+			SHARED_MEM_PWR_DIS, power_on ? 0:1);
 
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 017b67b830e669a05acd66619484787960f97383..89912bb5014f86fe5c57d756b6c209ea8eb02155 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1224,6 +1224,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 			// signals when OTG blanked. This is to prevent pipe from
 			// requesting data while in PSR.
 			tg->funcs->tg_init(tg);
+			hubp->power_gated = true;
 			continue;
 		}
 
@@ -2634,7 +2635,7 @@ static void dcn10_update_dchubp_dpp(
 	hws->funcs.update_plane_addr(dc, pipe_ctx);
 
 	if (is_pipe_tree_visible(pipe_ctx))
-		hubp->funcs->set_blank(hubp, false);
+		dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
 }
 
 void dcn10_blank_pixel_data(
@@ -3134,7 +3135,7 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
 
 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
 		pipe_ctx->stream_res.opp,
-		flags.PROGRAM_STEREO == 1 ? true:false,
+		flags.PROGRAM_STEREO == 1,
 		&stream->timing);
 
 	pipe_ctx->stream_res.tg->funcs->program_stereo(
@@ -3145,13 +3146,16 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
 	return;
 }
 
-static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+static struct pipe_ctx *get_pipe_ctx_by_hubp_inst(struct dc_state *context, int mpcc_inst)
 {
 	int i;
 
-	for (i = 0; i < res_pool->pipe_count; i++) {
-		if (res_pool->hubps[i]->inst == mpcc_inst)
-			return res_pool->hubps[i];
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (context->res_ctx.pipe_ctx[i].plane_res.hubp
+				&& context->res_ctx.pipe_ctx[i].plane_res.hubp->inst == mpcc_inst) {
+			return &context->res_ctx.pipe_ctx[i];
+		}
+
 	}
 	ASSERT(false);
 	return NULL;
@@ -3174,11 +3178,23 @@ void dcn10_wait_for_mpcc_disconnect(
 
 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
-			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+			struct pipe_ctx *restore_bottom_pipe;
+			struct pipe_ctx *restore_top_pipe;
+			struct pipe_ctx *inst_pipe_ctx = get_pipe_ctx_by_hubp_inst(dc->current_state, mpcc_inst);
 
+			ASSERT(inst_pipe_ctx);
 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
-			hubp->funcs->set_blank(hubp, true);
+			/*
+			 * Set top and bottom pipes NULL, as we don't want
+			 * to blank those pipes when disconnecting from MPCC
+			 */
+			restore_bottom_pipe = inst_pipe_ctx->bottom_pipe;
+			restore_top_pipe = inst_pipe_ctx->top_pipe;
+			inst_pipe_ctx->top_pipe = inst_pipe_ctx->bottom_pipe = NULL;
+			dc->hwss.set_hubp_blank(dc, inst_pipe_ctx, true);
+			inst_pipe_ctx->top_pipe = restore_top_pipe;
+			inst_pipe_ctx->bottom_pipe = restore_bottom_pipe;
 		}
 	}
 
@@ -3731,3 +3747,10 @@ void dcn10_get_clock(struct dc *dc,
 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
 
 }
+
+void dcn10_set_hubp_blank(const struct dc *dc,
+				struct pipe_ctx *pipe_ctx,
+				bool blank_enable)
+{
+	pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, blank_enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index e5691e49902315f0165dc017ab68ff51b781c067..89e6dfb63da0c1990ef4705aaa4f132ca7aec725 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -163,6 +163,8 @@ void dcn10_wait_for_mpcc_disconnect(
 void dce110_edp_backlight_control(
 		struct dc_link *link,
 		bool enable);
+void dce110_edp_wait_for_T12(
+		struct dc_link *link);
 void dce110_edp_power_control(
 		struct dc_link *link,
 		bool power_up);
@@ -202,5 +204,8 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
 		struct dc_state *context);
 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
 void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+void dcn10_set_hubp_blank(const struct dc *dc,
+				struct pipe_ctx *pipe_ctx,
+				bool blank_enable);
 
 #endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 7f4766e45dff0d37ff08503ace356fdd161fd0d2..e8b6065fffad4f70f3b6522ce1a0966748372301 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -47,7 +47,7 @@
 
 unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
 {
-	unsigned int ret_vsnprintf;
+	int ret_vsnprintf;
 	unsigned int chars_printed;
 
 	va_list args;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 254300b06b434b0332df1f93e5c56a18d17fcc58..2f1b802e66a13e7afab10a6fa1ca1f104e643188 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.set_backlight_level = dce110_set_backlight_level,
 	.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 	.set_pipe = dce110_set_pipe,
+	.set_hubp_blank = dcn10_set_hubp_blank,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 81db0179f7ea8ac381b976c0ccbd28cbc36baa43..59024653430cd714f3a7e5346001028e383c6bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -956,6 +956,21 @@ void dcn10_link_encoder_enable_tmds_output(
 	}
 }
 
+void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
+	struct link_encoder *enc,
+	enum clock_source_id clock_source,
+	enum dc_color_depth color_depth,
+	enum signal_type signal,
+	uint32_t pixel_clock)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	dcn10_link_encoder_enable_tmds_output(
+		enc, clock_source, color_depth, signal, pixel_clock);
+
+	REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+}
+
 /* enables DP PHY output */
 void dcn10_link_encoder_enable_dp_output(
 	struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d4caad67085547eafb75d9b4e7d90691fd57dccd..3e1a582e4b88fd58708ad663fde21159ea8e4200 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
 #define LE_DCN_COMMON_REG_LIST(id) \
 	SRI(DIG_BE_CNTL, DIG, id), \
 	SRI(DIG_BE_EN_CNTL, DIG, id), \
+	SRI(DIG_CLOCK_PATTERN, DIG, id), \
 	SRI(TMDS_CTL_BITS, DIG, id), \
 	SRI(DP_CONFIG, DP, id), \
 	SRI(DP_DPHY_CNTL, DP, id), \
@@ -83,6 +84,7 @@ struct dcn10_link_enc_hpd_registers {
 struct dcn10_link_enc_registers {
 	uint32_t DIG_BE_CNTL;
 	uint32_t DIG_BE_EN_CNTL;
+	uint32_t DIG_CLOCK_PATTERN;
 	uint32_t DP_CONFIG;
 	uint32_t DP_DPHY_CNTL;
 	uint32_t DP_DPHY_INTERNAL_CTRL;
@@ -168,6 +170,7 @@ struct dcn10_link_enc_registers {
 	LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
 	LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
 	LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+	LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
 	LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
 	LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
 	LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
@@ -218,6 +221,7 @@ struct dcn10_link_enc_registers {
 	type DIG_HPD_SELECT;\
 	type DIG_MODE;\
 	type DIG_FE_SOURCE_SELECT;\
+	type DIG_CLOCK_PATTERN;\
 	type DPHY_BYPASS;\
 	type DPHY_ATEST_SEL_LANE0;\
 	type DPHY_ATEST_SEL_LANE1;\
@@ -536,6 +540,13 @@ void dcn10_link_encoder_enable_tmds_output(
 	enum signal_type signal,
 	uint32_t pixel_clock);
 
+void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
+	struct link_encoder *enc,
+	enum clock_source_id clock_source,
+	enum dc_color_depth color_depth,
+	enum signal_type signal,
+	uint32_t pixel_clock);
+
 /* enables DP PHY output */
 void dcn10_link_encoder_enable_dp_output(
 	struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f033397a84e9a2eeb03ef3c320024ee641879e29..6138f4887de7fa597d385059e2801de54370c4bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -659,6 +659,16 @@ void optc1_unlock(struct timing_generator *optc)
 			OTG_MASTER_UPDATE_LOCK, 0);
 }
 
+bool optc1_is_locked(struct timing_generator *optc)
+{
+	struct optc *optc1 = DCN10TG_FROM_TG(optc);
+	uint32_t locked;
+
+	REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
+
+	return (locked == 1);
+}
+
 void optc1_get_position(struct timing_generator *optc,
 		struct crtc_position *position)
 {
@@ -1513,6 +1523,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
 		.enable_crtc_reset = optc1_enable_crtc_reset,
 		.disable_reset_trigger = optc1_disable_reset_trigger,
 		.lock = optc1_lock,
+		.is_locked = optc1_is_locked,
 		.unlock = optc1_unlock,
 		.enable_optc_clock = optc1_enable_optc_clock,
 		.set_drr = optc1_set_drr,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index b12bd9aae52f0159197dbece6f60c47756a95839..b222c67973d469e0c65a56bdee9a2ef743cd3f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -638,6 +638,7 @@ void optc1_set_blank(struct timing_generator *optc,
 		bool enable_blanking);
 
 bool optc1_is_blanked(struct timing_generator *optc);
+bool optc1_is_locked(struct timing_generator *optc);
 
 void optc1_program_blank_color(
 		struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 480d928cb1ca6edb6a3c1dd9e19ac9575521c199..0726fb435e2a3c175138262f9f3151223aabf77e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1575,8 +1575,8 @@ static void dcn20_update_dchubp_dpp(
 
 
 
-	if (pipe_ctx->update_flags.bits.enable)
-		hubp->funcs->set_blank(hubp, false);
+	if (is_pipe_tree_visible(pipe_ctx))
+		dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
 }
 
 
@@ -1770,6 +1770,14 @@ void dcn20_post_unlock_program_front_end(
 		}
 	}
 
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (pipe->vtp_locked) {
+			dc->hwss.set_hubp_blank(dc, pipe, true);
+			pipe->vtp_locked = false;
+		}
+	}
 	/* WA to apply WM setting*/
 	if (hwseq->wa.DEGVIDCN21)
 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index de9dcbeea150d4a395d9f33eedbe05017bb455cf..51a4166e975079caa604c2d9414d59d80a15a9fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -94,6 +94,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
 	.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
 #endif
 	.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+	.set_hubp_blank = dcn10_set_hubp_blank,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index 15c2ff264ff601f559a9fba0eabcf15ae68f9c36..fa013496e26baf8ae1aa817d6c9fcf389633420b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -363,7 +363,7 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
 		dcn10_link_encoder_validate_output_with_stream,
 	.hw_init = enc2_hw_init,
 	.setup = dcn10_link_encoder_setup,
-	.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+	.enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
 	.enable_dp_output = dcn20_link_encoder_enable_dp_output,
 	.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
 	.disable_output = dcn10_link_encoder_disable_output,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d6b4885618713e2c964279e89a0907b14580c223..2c2dbfcd89571089741f14f84e366343aeb34657 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2097,6 +2097,7 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
 		pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
 		pipes[pipe_cnt].dout.dp_lanes = 4;
+		pipes[pipe_cnt].dout.is_virtual = 0;
 		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
 		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
 		switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
@@ -2150,6 +2151,7 @@ int dcn20_populate_dml_pipes_from_context(
 			break;
 		default:
 			/* In case there is no signal, set dp with 4 lanes to allow max config */
+			pipes[pipe_cnt].dout.is_virtual = 1;
 			pipes[pipe_cnt].dout.output_type = dm_dp;
 			pipes[pipe_cnt].dout.dp_lanes = 4;
 		}
@@ -3245,7 +3247,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
 bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 		bool fast_validate)
 {
-	bool voltage_supported = false;
+	bool voltage_supported;
 	DC_FP_START();
 	voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
 	DC_FP_END();
@@ -3506,7 +3508,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
 		calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
 
 		// FCLK:UCLK ratio is 1.08
-		min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
+		min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080,
+			1000000);
 
 		calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
 				min_dcfclk : min_fclk_required_by_uclk;
@@ -3606,7 +3609,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
 static bool init_soc_bounding_box(struct dc *dc,
 				  struct dcn20_resource_pool *pool)
 {
-	const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
 	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
 			get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
 	struct _vcs_dpi_ip_params_st *loaded_ip =
@@ -3614,116 +3616,6 @@ static bool init_soc_bounding_box(struct dc *dc,
 
 	DC_LOGGER_INIT(dc->ctx->logger);
 
-	/* TODO: upstream NV12 bounding box when its launched */
-	if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
-		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
-		return false;
-	}
-
-	if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
-		int i;
-
-		dcn2_0_nv12_soc.sr_exit_time_us =
-				fixed16_to_double_to_cpu(bb->sr_exit_time_us);
-		dcn2_0_nv12_soc.sr_enter_plus_exit_time_us =
-				fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
-		dcn2_0_nv12_soc.urgent_latency_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_us);
-		dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
-		dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
-		dcn2_0_nv12_soc.urgent_latency_vm_data_only_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
-		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
-		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
-		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
-		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
-		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
-		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
-		dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent =
-				fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
-		dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent =
-				fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
-		dcn2_0_nv12_soc.writeback_latency_us =
-				fixed16_to_double_to_cpu(bb->writeback_latency_us);
-		dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent =
-				fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
-		dcn2_0_nv12_soc.max_request_size_bytes =
-				le32_to_cpu(bb->max_request_size_bytes);
-		dcn2_0_nv12_soc.dram_channel_width_bytes =
-				le32_to_cpu(bb->dram_channel_width_bytes);
-		dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes =
-				le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
-		dcn2_0_nv12_soc.dcn_downspread_percent =
-				fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
-		dcn2_0_nv12_soc.downspread_percent =
-				fixed16_to_double_to_cpu(bb->downspread_percent);
-		dcn2_0_nv12_soc.dram_page_open_time_ns =
-				fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
-		dcn2_0_nv12_soc.dram_rw_turnaround_time_ns =
-				fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
-		dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes =
-				le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
-		dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles =
-				le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
-		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
-		dcn2_0_nv12_soc.channel_interleave_bytes =
-				le32_to_cpu(bb->channel_interleave_bytes);
-		dcn2_0_nv12_soc.num_banks =
-				le32_to_cpu(bb->num_banks);
-		dcn2_0_nv12_soc.num_chans =
-				le32_to_cpu(bb->num_chans);
-		dcn2_0_nv12_soc.vmm_page_size_bytes =
-				le32_to_cpu(bb->vmm_page_size_bytes);
-		dcn2_0_nv12_soc.dram_clock_change_latency_us =
-				fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
-		// HACK!! Lower uclock latency switch time so we don't switch
-		dcn2_0_nv12_soc.dram_clock_change_latency_us = 10;
-		dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us =
-				fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
-		dcn2_0_nv12_soc.return_bus_width_bytes =
-				le32_to_cpu(bb->return_bus_width_bytes);
-		dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz =
-				le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
-		dcn2_0_nv12_soc.xfc_bus_transport_time_us =
-				le32_to_cpu(bb->xfc_bus_transport_time_us);
-		dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us =
-				le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
-		dcn2_0_nv12_soc.use_urgent_burst_bw =
-				le32_to_cpu(bb->use_urgent_burst_bw);
-		dcn2_0_nv12_soc.num_states =
-				le32_to_cpu(bb->num_states);
-
-		for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) {
-			dcn2_0_nv12_soc.clock_limits[i].state =
-					le32_to_cpu(bb->clock_limits[i].state);
-			dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].socclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
-			dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
-		}
-	}
-
 	if (pool->base.pp_smu) {
 		struct pp_smu_nv_clock_table max_clocks = {0};
 		unsigned int uclk_states[8] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 96ee0b82f45809f38e740669fc9154ee649388fa..d3b643089603ea40f58cf3532d70a17939bbc878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -123,7 +123,7 @@ void dcn21_optimize_pwr_state(
  * PHY will hang on the next mode set attempt.
  * if enable PLL follow by disable PLL (without executing lane enable/disable),
  * RDPCS_PHY_DP_MPLLB_STATE remains 1,
- * which indicate that PLL disable attempt actually didn’t go through.
+ * which indicate that PLL disable attempt actually didn't go through.
  * As a workaround, insert PHY lane enable/disable before PLL disable.
  */
 void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
@@ -143,6 +143,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
 	struct dc_context *dc = abm->ctx;
 	uint32_t ramping_boundary = 0xFFFF;
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
 	cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
 	cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
@@ -212,6 +213,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
 	if (abm && panel_cntl)
 		dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
 
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
 	cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
 	cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 074e2713257f179dbc8cc642665e3d50765db686..0597391b21716bd89a138a9930c6a5f5415974e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
 #endif
 	.is_abm_supported = dcn21_is_abm_supported,
 	.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+	.set_hubp_blank = dcn10_set_hubp_blank,
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 6743764289167a06639268284dba98356c883f8f..072f8c880924366ffd42477a3ffd79886611748a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1329,8 +1329,8 @@ static bool dcn21_fast_validate_bw(
 	return out;
 }
 
-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
-		bool fast_validate)
+static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
+		struct dc_state *context, bool fast_validate)
 {
 	bool out = false;
 
@@ -1383,6 +1383,22 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
 
 	return out;
 }
+
+/*
+ * Some of the functions further below use the FPU, so we need to wrap this
+ * with DC_FP_START()/DC_FP_END(). Use the same approach as for
+ * dcn20_validate_bandwidth in dcn20_resource.c.
+ */
+bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+		bool fast_validate)
+{
+	bool voltage_supported;
+	DC_FP_START();
+	voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
+	DC_FP_END();
+	return voltage_supported;
+}
+
 static void dcn21_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 9da66e4911165d406c182707d417faf788fec542..33985401f25cba88b822bc15ad3d26c9cd70a09e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -133,7 +133,6 @@ static void dpp3_power_on_gamcor_lut(
 		struct dpp *dpp_base,
 	bool power_on)
 {
-	uint32_t power_status;
 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
 
 	if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
@@ -143,12 +142,6 @@ static void dpp3_power_on_gamcor_lut(
 	} else
 		REG_SET(CM_MEM_PWR_CTRL, 0,
 				GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
-
-	REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
-	if (power_status != 0)
-		BREAK_TO_DEBUGGER();
-
-
 }
 
 void dpp3_program_cm_dealpha(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 5fa150f34c60093122a482cb30d7656290c18440..705fbfc375029f7c1d089f0dca60b4da9b27f6fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -62,6 +62,7 @@
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
 	HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
 	HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
 	HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 3deb3fb1724dc3caaef08cda6252184f71be8cbf..9620fb8a27dc5a3b4cc72dfb2470f58ae44acbe2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -710,8 +710,11 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
 bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
 {
 	union dmub_rb_cmd cmd;
-	unsigned int surface_size, refresh_hz, denom;
 	uint32_t tmr_delay = 0, tmr_scale = 0;
+	struct dc_cursor_attributes cursor_attr;
+	bool cursor_cache_enable = false;
+	struct dc_stream_state *stream = NULL;
+	struct dc_plane_state *plane = NULL;
 
 	if (!dc->ctx->dmub_srv)
 		return false;
@@ -722,72 +725,150 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
 
 			/* First, check no-memory-requests case */
 			for (i = 0; i < dc->current_state->stream_count; i++) {
-				if (dc->current_state->stream_status[i]
-					    .plane_count)
+				if (dc->current_state->stream_status[i].plane_count)
 					/* Fail eligibility on a visible stream */
 					break;
 			}
 
-			if (dc->current_state->stream_count == 1 // single display only
-			    && dc->current_state->stream_status[0].plane_count == 1 // single surface only
-			    && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
-			    // Only 8 and 16 bit formats
-			    && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
-			    && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
-				surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
-					dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
-					(dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ?
-					 8 : 4);
-			} else {
-				// TODO: remove hard code size
-				surface_size = 128 * 1024 * 1024;
+			if (i == dc->current_state->stream_count) {
+				/* Enable no-memory-requests case */
+				memset(&cmd, 0, sizeof(cmd));
+				cmd.mall.header.type = DMUB_CMD__MALL;
+				cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
+				cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+				dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+				dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+
+				return true;
+			}
+
+			stream = dc->current_state->streams[0];
+			plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
+
+			if (stream && plane) {
+				cursor_cache_enable = stream->cursor_position.enable &&
+						plane->address.grph.cursor_cache_addr.quad_part;
+				cursor_attr = stream->cursor_attributes;
 			}
 
-			// TODO: remove hard code size
-			if (surface_size < 128 * 1024 * 1024) {
-				refresh_hz = div_u64((unsigned long long) dc->current_state->streams[0]->timing.pix_clk_100hz *
-						     100LL,
-						     (dc->current_state->streams[0]->timing.v_total *
-						      dc->current_state->streams[0]->timing.h_total));
+			/*
+			 * Second, check MALL eligibility
+			 *
+			 * single display only, single surface only, 8 and 16 bit formats only, no VM,
+			 * do not use MALL for displays that support PSR as they use D0i3.2 in DMCUB FW
+			 *
+			 * TODO: When we implement multi-display, PSR displays will be allowed if there is
+			 * a non-PSR display present, since in that case we can't do D0i3.2
+			 */
+			if (dc->current_state->stream_count == 1 &&
+					stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+					dc->current_state->stream_status[0].plane_count == 1 &&
+					plane->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F &&
+					plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
+					plane->address.page_table_base.quad_part == 0 &&
+					dc->hwss.does_plane_fit_in_mall &&
+					dc->hwss.does_plane_fit_in_mall(dc, plane,
+							cursor_cache_enable ? &cursor_attr : NULL)) {
+				unsigned int v_total = stream->adjust.v_total_max ?
+						stream->adjust.v_total_max : stream->timing.v_total;
+				unsigned int refresh_hz = div_u64((unsigned long long) stream->timing.pix_clk_100hz *
+						100LL, (v_total * stream->timing.h_total));
 
 				/*
-				 * Delay_Us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
-				 * Delay_Us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
-				 * (Delay_Us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
-				 * MallFrameCacheTmrDly = ((Delay_Us / 65.28) / 2^MallFrameCacheTmrScale) - 64
-				 *                      = (1000000 / refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
-				 *                      = 1000000 / (refresh * 65.28 * 2^MallFrameCacheTmrScale) - 64
-				 *                      = (1000000 * 100) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
+				 * one frame time in microsec:
+				 * Delay_Us = 1000000 / refresh
+				 * dynamic_delay_us = 1000000 / refresh + 2 * stutter_period
+				 *
+				 * one frame time modified by 'additional timer percent' (p):
+				 * Delay_Us_modified = dynamic_delay_us + dynamic_delay_us * p / 100
+				 *                   = dynamic_delay_us * (1 + p / 100)
+				 *                   = (1000000 / refresh + 2 * stutter_period) * (100 + p) / 100
+				 *                   = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh)
+				 *
+				 * formula for timer duration based on parameters, from regspec:
+				 * dynamic_delay_us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
+				 *
+				 * dynamic_delay_us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
+				 * (dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
+				 * MallFrameCacheTmrDly = ((dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale) - 64
+				 *                      = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
+				 *                      = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
 				 *
 				 * need to round up the result of the division before the subtraction
 				 */
-				denom = refresh_hz * 6528;
-				tmr_delay = div_u64((100000000LL + denom - 1), denom) - 64LL;
+				unsigned int denom = refresh_hz * 6528;
+				unsigned int stutter_period = dc->current_state->perf_params.stutter_period_us;
+
+				tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
+						(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
+						denom) - 64LL;
 
 				/* scale should be increased until it fits into 6 bits */
 				while (tmr_delay & ~0x3F) {
 					tmr_scale++;
 
 					if (tmr_scale > 3) {
-						/* The delay exceeds the range of the hystersis timer */
+						/* Delay exceeds range of hysteresis timer */
 						ASSERT(false);
 						return false;
 					}
 
 					denom *= 2;
-					tmr_delay = div_u64((100000000LL + denom - 1), denom) - 64LL;
+					tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
+							(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
+							denom) - 64LL;
+				}
+
+				/* Copy HW cursor */
+				if (cursor_cache_enable) {
+					memset(&cmd, 0, sizeof(cmd));
+					cmd.mall.header.type = DMUB_CMD__MALL;
+					cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_COPY_CURSOR;
+					cmd.mall.header.payload_bytes =
+							sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+					switch (cursor_attr.color_format) {
+					case CURSOR_MODE_MONO:
+						cmd.mall.cursor_bpp = 2;
+						break;
+					case CURSOR_MODE_COLOR_1BIT_AND:
+					case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+					case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+						cmd.mall.cursor_bpp = 32;
+						break;
+
+					case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+					case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+						cmd.mall.cursor_bpp = 64;
+						break;
+					}
+
+					cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
+					cmd.mall.cursor_copy_dst.quad_part =
+							plane->address.grph.cursor_cache_addr.quad_part;
+					cmd.mall.cursor_width = cursor_attr.width;
+					cmd.mall.cursor_height = cursor_attr.height;
+					cmd.mall.cursor_pitch = cursor_attr.pitch;
+
+					dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+					dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+					dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+					/* Use copied cursor, and it's okay to not switch back */
+					cursor_attr.address.quad_part =
+							plane->address.grph.cursor_cache_addr.quad_part;
+					dc_stream_set_cursor_attributes(stream, &cursor_attr);
 				}
 
 				/* Enable MALL */
 				memset(&cmd, 0, sizeof(cmd));
 				cmd.mall.header.type = DMUB_CMD__MALL;
-				cmd.mall.header.sub_type =
-					DMUB_CMD__MALL_ACTION_ALLOW;
-				cmd.mall.header.payload_bytes =
-					sizeof(cmd.mall) -
-					sizeof(cmd.mall.header);
+				cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_ALLOW;
+				cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
 				cmd.mall.tmr_delay = tmr_delay;
 				cmd.mall.tmr_scale = tmr_scale;
+				cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
 
 				dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
 				dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
@@ -814,6 +895,40 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
 	return true;
 }
 
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
+{
+	// add meta size?
+	unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
+			(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+	unsigned int mall_size = dc->caps.mall_size_total;
+	unsigned int cursor_size = 0;
+
+	if (dc->debug.mall_size_override)
+		mall_size = 1024 * 1024 * dc->debug.mall_size_override;
+
+	if (cursor_attr) {
+		cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
+
+		switch (cursor_attr->color_format) {
+		case CURSOR_MODE_MONO:
+			cursor_size /= 2;
+			break;
+		case CURSOR_MODE_COLOR_1BIT_AND:
+		case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+		case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+			cursor_size *= 4;
+			break;
+
+		case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+		case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+			cursor_size *= 8;
+			break;
+		}
+	}
+
+	return (surface_size + cursor_size) < mall_size;
+}
+
 void dcn30_hardware_release(struct dc *dc)
 {
 	/* if pstate unsupported, force it supported */
@@ -823,6 +938,53 @@ void dcn30_hardware_release(struct dc *dc)
 				dc->res_pool->hubbub, true, true);
 }
 
+void dcn30_set_hubp_blank(const struct dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		bool blank_enable)
+{
+	struct pipe_ctx *mpcc_pipe;
+	struct pipe_ctx *odm_pipe;
+
+	if (blank_enable) {
+		struct plane_resource *plane_res = &pipe_ctx->plane_res;
+		struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+		/* Wait for enter vblank */
+		stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
+
+		/* Blank HUBP to allow p-state during blank on all timings */
+		pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, true);
+		/* Confirm hubp in blank */
+		ASSERT(plane_res->hubp->funcs->hubp_in_blank(plane_res->hubp));
+		/* Toggle HUBP_DISABLE */
+		plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, true);
+		plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, false);
+		for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) {
+			mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
+			/* Confirm hubp in blank */
+			ASSERT(mpcc_pipe->plane_res.hubp->funcs->hubp_in_blank(mpcc_pipe->plane_res.hubp));
+			/* Toggle HUBP_DISABLE */
+			mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, true);
+			mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, false);
+
+		}
+		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+			odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, true);
+			/* Confirm hubp in blank */
+			ASSERT(odm_pipe->plane_res.hubp->funcs->hubp_in_blank(odm_pipe->plane_res.hubp));
+			/* Toggle HUBP_DISABLE */
+			odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, true);
+			odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, false);
+		}
+	} else {
+		pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
+		for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+			mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+			odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, false);
+	}
+}
+
 void dcn30_set_disp_pattern_generator(const struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum controller_dp_test_pattern test_pattern,
@@ -831,6 +993,25 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
 		const struct tg_color *solid_color,
 		int width, int height, int offset)
 {
-	pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
-			color_space, color_depth, solid_color, width, height, offset);
+	struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+	if (test_pattern != CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) {
+		pipe_ctx->vtp_locked = false;
+		/* turning on DPG */
+		stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+				color_depth, solid_color, width, height, offset);
+
+		/* Defer hubp blank if tg is locked */
+		if (stream_res->tg->funcs->is_tg_enabled(stream_res->tg)) {
+			if (stream_res->tg->funcs->is_locked(stream_res->tg))
+				pipe_ctx->vtp_locked = true;
+			else
+				dc->hwss.set_hubp_blank(dc, pipe_ctx, true);
+		}
+	} else {
+		dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
+		/* turning off DPG */
+		stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+				color_depth, solid_color, width, height, offset);
+	}
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index 7d32c43aafe0e65aa54a863b9f29fa612f2d89cf..3b7d4812e3119b1008f081f8da156a2aa6318c1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -65,6 +65,9 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
 void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
 void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
 
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane,
+		struct dc_cursor_attributes *cursor_attr);
+
 bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
 
 void dcn30_hardware_release(struct dc *dc);
@@ -77,4 +80,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
 		const struct tg_color *solid_color,
 		int width, int height, int offset);
 
+void dcn30_set_hubp_blank(const struct dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		bool blank_enable);
+
 #endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 6125fe440ad0ee44d81efac972656478bf4b9d43..204444fead9777cb4399fd97244380b0c7979165 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -71,6 +71,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
 	.edp_backlight_control = dce110_edp_backlight_control,
 	.edp_power_control = dce110_edp_power_control,
 	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+	.edp_wait_for_T12 = dce110_edp_wait_for_T12,
 	.set_cursor_position = dcn10_set_cursor_position,
 	.set_cursor_attribute = dcn10_set_cursor_attribute,
 	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
@@ -91,11 +92,13 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
 	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
 	.calc_vupdate_position = dcn10_calc_vupdate_position,
 	.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
+	.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
 	.set_backlight_level = dcn21_set_backlight_level,
 	.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
 	.hardware_release = dcn30_hardware_release,
 	.set_pipe = dcn21_set_pipe,
 	.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+	.set_hubp_blank = dcn30_set_hubp_blank,
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 3ba3991ee6128b489eaded6262ab04376c25c915..8980c90b2277a242ea8c59a9481fdddb8927ba7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -309,6 +309,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
 		.enable_crtc_reset = optc1_enable_crtc_reset,
 		.disable_reset_trigger = optc1_disable_reset_trigger,
 		.lock = optc3_lock,
+		.is_locked = optc1_is_locked,
 		.unlock = optc1_unlock,
 		.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
 		.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 5e126fdf6ec105e0a5971d84e0a4b30b2f728eff..8d0f663489ac5a915330f7f1d689d299daed647f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1716,125 +1716,22 @@ static bool is_soc_bounding_box_valid(struct dc *dc)
 static bool init_soc_bounding_box(struct dc *dc,
 				  struct dcn30_resource_pool *pool)
 {
-	const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
 	struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc;
 	struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip;
 
 	DC_LOGGER_INIT(dc->ctx->logger);
 
-	if (!bb && !is_soc_bounding_box_valid(dc)) {
+	if (!is_soc_bounding_box_valid(dc)) {
 		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
 		return false;
 	}
 
-	if (bb && !is_soc_bounding_box_valid(dc)) {
-		int i;
-
-		dcn3_0_soc.sr_exit_time_us =
-				fixed16_to_double_to_cpu(bb->sr_exit_time_us);
-		dcn3_0_soc.sr_enter_plus_exit_time_us =
-				fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
-		dcn3_0_soc.urgent_latency_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_us);
-		dcn3_0_soc.urgent_latency_pixel_data_only_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
-		dcn3_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
-		dcn3_0_soc.urgent_latency_vm_data_only_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
-		dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
-		dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
-		dcn3_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
-		dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
-		dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
-		dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
-		dcn3_0_soc.max_avg_sdp_bw_use_normal_percent =
-				fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
-		dcn3_0_soc.max_avg_dram_bw_use_normal_percent =
-				fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
-		dcn3_0_soc.writeback_latency_us =
-				fixed16_to_double_to_cpu(bb->writeback_latency_us);
-		dcn3_0_soc.ideal_dram_bw_after_urgent_percent =
-				fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
-		dcn3_0_soc.max_request_size_bytes =
-				le32_to_cpu(bb->max_request_size_bytes);
-		dcn3_0_soc.dram_channel_width_bytes =
-				le32_to_cpu(bb->dram_channel_width_bytes);
-		dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes =
-				le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
-		dcn3_0_soc.dcn_downspread_percent =
-				fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
-		dcn3_0_soc.downspread_percent =
-				fixed16_to_double_to_cpu(bb->downspread_percent);
-		dcn3_0_soc.dram_page_open_time_ns =
-				fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
-		dcn3_0_soc.dram_rw_turnaround_time_ns =
-				fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
-		dcn3_0_soc.dram_return_buffer_per_channel_bytes =
-				le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
-		dcn3_0_soc.round_trip_ping_latency_dcfclk_cycles =
-				le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
-		dcn3_0_soc.urgent_out_of_order_return_per_channel_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
-		dcn3_0_soc.channel_interleave_bytes =
-				le32_to_cpu(bb->channel_interleave_bytes);
-		dcn3_0_soc.num_banks =
-				le32_to_cpu(bb->num_banks);
-		dcn3_0_soc.num_chans =
-				le32_to_cpu(bb->num_chans);
-		dcn3_0_soc.gpuvm_min_page_size_bytes =
-				le32_to_cpu(bb->vmm_page_size_bytes);
-		dcn3_0_soc.dram_clock_change_latency_us =
-				fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
-		dcn3_0_soc.writeback_dram_clock_change_latency_us =
-				fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
-		dcn3_0_soc.return_bus_width_bytes =
-				le32_to_cpu(bb->return_bus_width_bytes);
-		dcn3_0_soc.dispclk_dppclk_vco_speed_mhz =
-				le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
-		dcn3_0_soc.xfc_bus_transport_time_us =
-				le32_to_cpu(bb->xfc_bus_transport_time_us);
-		dcn3_0_soc.xfc_xbuf_latency_tolerance_us =
-				le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
-		dcn3_0_soc.use_urgent_burst_bw =
-				le32_to_cpu(bb->use_urgent_burst_bw);
-		dcn3_0_soc.num_states =
-				le32_to_cpu(bb->num_states);
-
-		for (i = 0; i < dcn3_0_soc.num_states; i++) {
-			dcn3_0_soc.clock_limits[i].state =
-					le32_to_cpu(bb->clock_limits[i].state);
-			dcn3_0_soc.clock_limits[i].dcfclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
-			dcn3_0_soc.clock_limits[i].fabricclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
-			dcn3_0_soc.clock_limits[i].dispclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
-			dcn3_0_soc.clock_limits[i].dppclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
-			dcn3_0_soc.clock_limits[i].phyclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
-			dcn3_0_soc.clock_limits[i].socclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
-			dcn3_0_soc.clock_limits[i].dscclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
-			dcn3_0_soc.clock_limits[i].dram_speed_mts =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
-		}
-	}
-
 	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
 	loaded_ip->max_num_dpp = pool->base.pipe_count;
 	loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
 	dcn20_patch_bounding_box(dc, loaded_bb);
 
-	if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+	if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
 		struct bp_soc_bb_info bb_info = {0};
 
 		if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
@@ -2292,17 +2189,15 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
 		unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
 		unsigned int min_dram_speed_mts_margin = 160;
 
-		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
-
 		if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
 			min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
 
-		for (i = 3; i > 0; i--) {
-			if ((min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
-					(min_dram_speed_mts - min_dram_speed_mts_margin < dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
-				context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
-		}
+		/* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+		for (i = 3; i > 0; i--)
+			if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+				break;
 
+		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
 	}
@@ -2437,16 +2332,28 @@ bool dcn30_validate_bandwidth(struct dc *dc,
 	return out;
 }
 
-static noinline void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
-                                                       unsigned int *optimal_dcfclk,
-                                                       unsigned int *optimal_fclk)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+		unsigned int *optimal_dcfclk,
+		unsigned int *optimal_fclk)
 {
        double bw_from_dram, bw_from_dram1, bw_from_dram2;
 
        bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
-                       dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+		dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
        bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
-                       dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+		dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
 
        bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
 
@@ -2505,7 +2412,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
 		// Calculate optimal dcfclk for each uclk
 		for (i = 0; i < num_uclk_states; i++) {
 			DC_FP_START();
-			get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+			dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
 					&optimal_dcfclk_for_uclk[i], NULL);
 			DC_FP_END();
 			if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2631,6 +2538,10 @@ static bool dcn30_resource_construct(
 	dc->caps.max_cursor_size = 256;
 	dc->caps.min_horizontal_blanking_period = 80;
 	dc->caps.dmdata_alloc_size = 2048;
+	dc->caps.mall_size_per_mem_channel = 8;
+	/* total size = mall per channel * num channels * 1024 * 1024 */
+	dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
 
 	dc->caps.max_slave_planes = 1;
 	dc->caps.post_blend_color_processing = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index bdad72140cbcec03fe2b61a6b544ec150904d4f4..b8bf6d61005b070cf66923b01be618753dd3fce3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
 	.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
 	.set_pipe = dcn21_set_pipe,
 	.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+	.set_hubp_blank = dcn30_set_hubp_blank,
 };
 
 static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 35f5bf08ae96e2570d22282da46f1f499236d4a4..5d4b2c60192ee49763f318777ce4271cf5554feb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1489,124 +1489,21 @@ static bool is_soc_bounding_box_valid(struct dc *dc)
 static bool init_soc_bounding_box(struct dc *dc,
 				  struct dcn301_resource_pool *pool)
 {
-	const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
 	struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_01_soc;
 	struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_01_ip;
 
 	DC_LOGGER_INIT(dc->ctx->logger);
 
-	if (!bb && !is_soc_bounding_box_valid(dc)) {
+	if (!is_soc_bounding_box_valid(dc)) {
 		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
 		return false;
 	}
 
-	if (bb && !is_soc_bounding_box_valid(dc)) {
-		int i;
-
-		dcn3_01_soc.sr_exit_time_us =
-				fixed16_to_double_to_cpu(bb->sr_exit_time_us);
-		dcn3_01_soc.sr_enter_plus_exit_time_us =
-				fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
-		dcn3_01_soc.urgent_latency_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_us);
-		dcn3_01_soc.urgent_latency_pixel_data_only_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
-		dcn3_01_soc.urgent_latency_pixel_mixed_with_vm_data_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
-		dcn3_01_soc.urgent_latency_vm_data_only_us =
-				fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
-		dcn3_01_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
-		dcn3_01_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
-		dcn3_01_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
-		dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
-		dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
-		dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
-				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
-		dcn3_01_soc.max_avg_sdp_bw_use_normal_percent =
-				fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
-		dcn3_01_soc.max_avg_dram_bw_use_normal_percent =
-				fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
-		dcn3_01_soc.writeback_latency_us =
-				fixed16_to_double_to_cpu(bb->writeback_latency_us);
-		dcn3_01_soc.ideal_dram_bw_after_urgent_percent =
-				fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
-		dcn3_01_soc.max_request_size_bytes =
-				le32_to_cpu(bb->max_request_size_bytes);
-		dcn3_01_soc.dram_channel_width_bytes =
-				le32_to_cpu(bb->dram_channel_width_bytes);
-		dcn3_01_soc.fabric_datapath_to_dcn_data_return_bytes =
-				le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
-		dcn3_01_soc.dcn_downspread_percent =
-				fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
-		dcn3_01_soc.downspread_percent =
-				fixed16_to_double_to_cpu(bb->downspread_percent);
-		dcn3_01_soc.dram_page_open_time_ns =
-				fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
-		dcn3_01_soc.dram_rw_turnaround_time_ns =
-				fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
-		dcn3_01_soc.dram_return_buffer_per_channel_bytes =
-				le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
-		dcn3_01_soc.round_trip_ping_latency_dcfclk_cycles =
-				le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
-		dcn3_01_soc.urgent_out_of_order_return_per_channel_bytes =
-				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
-		dcn3_01_soc.channel_interleave_bytes =
-				le32_to_cpu(bb->channel_interleave_bytes);
-		dcn3_01_soc.num_banks =
-				le32_to_cpu(bb->num_banks);
-		dcn3_01_soc.num_chans =
-				le32_to_cpu(bb->num_chans);
-		dcn3_01_soc.gpuvm_min_page_size_bytes =
-				le32_to_cpu(bb->vmm_page_size_bytes);
-		dcn3_01_soc.dram_clock_change_latency_us =
-				fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
-		dcn3_01_soc.writeback_dram_clock_change_latency_us =
-				fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
-		dcn3_01_soc.return_bus_width_bytes =
-				le32_to_cpu(bb->return_bus_width_bytes);
-		dcn3_01_soc.dispclk_dppclk_vco_speed_mhz =
-				le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
-		dcn3_01_soc.xfc_bus_transport_time_us =
-				le32_to_cpu(bb->xfc_bus_transport_time_us);
-		dcn3_01_soc.xfc_xbuf_latency_tolerance_us =
-				le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
-		dcn3_01_soc.use_urgent_burst_bw =
-				le32_to_cpu(bb->use_urgent_burst_bw);
-		dcn3_01_soc.num_states =
-				le32_to_cpu(bb->num_states);
-
-		for (i = 0; i < dcn3_01_soc.num_states; i++) {
-			dcn3_01_soc.clock_limits[i].state =
-					le32_to_cpu(bb->clock_limits[i].state);
-			dcn3_01_soc.clock_limits[i].dcfclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
-			dcn3_01_soc.clock_limits[i].fabricclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
-			dcn3_01_soc.clock_limits[i].dispclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
-			dcn3_01_soc.clock_limits[i].dppclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
-			dcn3_01_soc.clock_limits[i].phyclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
-			dcn3_01_soc.clock_limits[i].socclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
-			dcn3_01_soc.clock_limits[i].dscclk_mhz =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
-			dcn3_01_soc.clock_limits[i].dram_speed_mts =
-					fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
-		}
-	}
-
 	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
 	loaded_ip->max_num_dpp = pool->base.pipe_count;
 	dcn20_patch_bounding_box(dc, loaded_bb);
 
-	if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+	if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
 		struct bp_soc_bb_info bb_info = {0};
 
 		if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 808c4dcdb3ac830c8d9f596f1866438d6210f4fc..4b659b63f75b3d3aea15df00bb218d25957904a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -53,6 +53,8 @@
 #include "dce/dce_i2c_hw.h"
 #include "dce/dce_panel_cntl.h"
 #include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "clk_mgr.h"
 
 #include "hw_sequencer_private.h"
 #include "reg_helper.h"
@@ -162,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
 
 		.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
 		.num_states = 1,
-		.sr_exit_time_us = 5.20,
-		.sr_enter_plus_exit_time_us = 9.60,
+		.sr_exit_time_us = 12,
+		.sr_enter_plus_exit_time_us = 20,
 		.urgent_latency_us = 4.0,
 		.urgent_latency_pixel_data_only_us = 4.0,
 		.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -190,7 +192,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
 		.num_banks = 8,
 		.gpuvm_min_page_size_bytes = 4096,
 		.hostvm_min_page_size_bytes = 4096,
-		.dram_clock_change_latency_us = 350,
+		.dram_clock_change_latency_us = 404,
 		.dummy_pstate_latency_us = 5,
 		.writeback_dram_clock_change_latency_us = 23.0,
 		.return_bus_width_bytes = 64,
@@ -238,6 +240,7 @@ static const struct dc_debug_options debug_defaults_diags = {
 		.dwb_fi_phase = -1, // -1 = disable
 		.dmub_command_table = true,
 		.enable_tri_buf = true,
+		.disable_psr = true,
 };
 
 enum dcn302_clk_src_array_id {
@@ -1213,6 +1216,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
 			dce_abm_destroy(&pool->multiple_abms[i]);
 	}
 
+	if (pool->psr != NULL)
+		dmub_psr_destroy(&pool->psr);
+
 	if (pool->dccg != NULL)
 		dcn_dccg_destroy(&pool->dccg);
 }
@@ -1224,6 +1230,165 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool)
 	*pool = NULL;
 }
 
+static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+		unsigned int *optimal_dcfclk,
+		unsigned int *optimal_fclk)
+{
+	double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+	bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
+		dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
+	bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
+		dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+	bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+	if (optimal_fclk)
+		*optimal_fclk = bw_from_dram /
+		(dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+	if (optimal_dcfclk)
+		*optimal_dcfclk =  bw_from_dram /
+		(dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+	unsigned int i, j;
+	unsigned int num_states = 0;
+
+	unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
+	unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
+	unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
+	unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
+
+	unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
+	unsigned int num_dcfclk_sta_targets = 4;
+	unsigned int num_uclk_states;
+
+
+	if (dc->ctx->dc_bios->vram_info.num_chans)
+		dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+
+	if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+		dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
+	dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+	dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+	if (bw_params->clk_table.entries[0].memclk_mhz) {
+		int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
+
+		for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+			if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+				max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+			if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+				max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+			if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+				max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+			if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+				max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+		}
+		if (!max_dcfclk_mhz)
+			max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
+		if (!max_dispclk_mhz)
+			max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
+		if (!max_dppclk_mhz)
+			max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
+		if (!max_phyclk_mhz)
+			max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
+
+		if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+			/* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
+			dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+			num_dcfclk_sta_targets++;
+		} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+			/* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
+			for (i = 0; i < num_dcfclk_sta_targets; i++) {
+				if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
+					dcfclk_sta_targets[i] = max_dcfclk_mhz;
+					break;
+				}
+			}
+			/* Update size of array since we "removed" duplicates */
+			num_dcfclk_sta_targets = i + 1;
+		}
+
+		num_uclk_states = bw_params->clk_table.num_entries;
+
+		/* Calculate optimal dcfclk for each uclk */
+		for (i = 0; i < num_uclk_states; i++) {
+			dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+					&optimal_dcfclk_for_uclk[i], NULL);
+			if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
+				optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
+			}
+		}
+
+		/* Calculate optimal uclk for each dcfclk sta target */
+		for (i = 0; i < num_dcfclk_sta_targets; i++) {
+			for (j = 0; j < num_uclk_states; j++) {
+				if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
+					optimal_uclk_for_dcfclk_sta_targets[i] =
+							bw_params->clk_table.entries[j].memclk_mhz * 16;
+					break;
+				}
+			}
+		}
+
+		i = 0;
+		j = 0;
+		/* create the final dcfclk and uclk table */
+		while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
+			if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+				dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+				dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+			} else {
+				if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+					dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+					dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+				} else {
+					j = num_uclk_states;
+				}
+			}
+		}
+
+		while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
+			dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+			dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+		}
+
+		while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
+				optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+			dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+			dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+		}
+
+		dcn3_02_soc.num_states = num_states;
+		for (i = 0; i < dcn3_02_soc.num_states; i++) {
+			dcn3_02_soc.clock_limits[i].state = i;
+			dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+			dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+			dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+			/* Fill all states with max values of all other clocks */
+			dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+			dcn3_02_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
+			dcn3_02_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
+			dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+			/* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
+			/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+			dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
+			dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
+			dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
+		}
+		/* re-init DML with updated bb */
+		dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+		if (dc->current_state)
+			dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+	}
+}
+
 static struct resource_funcs dcn302_res_pool_funcs = {
 		.destroy = dcn302_destroy_resource_pool,
 		.link_enc_create = dcn302_link_encoder_create,
@@ -1240,7 +1405,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
 		.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
 		.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
 		.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
-		.update_bw_bounding_box = dcn30_update_bw_bounding_box,
+		.update_bw_bounding_box = dcn302_update_bw_bounding_box,
 		.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
 };
 
@@ -1311,7 +1476,10 @@ static bool dcn302_resource_construct(
 	dc->caps.max_cursor_size = 256;
 	dc->caps.min_horizontal_blanking_period = 80;
 	dc->caps.dmdata_alloc_size = 2048;
-
+	dc->caps.mall_size_per_mem_channel = 4;
+	/* total size = mall per channel * num channels * 1024 * 1024 */
+	dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
 	dc->caps.max_slave_planes = 1;
 	dc->caps.post_blend_color_processing = true;
 	dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1354,8 +1522,6 @@ static bool dcn302_resource_construct(
 
 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
 		dc->debug = debug_defaults_drv;
-	else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
-		dc->debug = debug_defaults_diags;
 	else
 		dc->debug = debug_defaults_diags;
 
@@ -1469,6 +1635,14 @@ static bool dcn302_resource_construct(
 	}
 	pool->timing_generator_count = i;
 
+	/* PSR */
+	pool->psr = dmub_psr_create(ctx);
+	if (pool->psr == NULL) {
+		dm_error("DC: failed to create psr!\n");
+		BREAK_TO_DEBUGGER();
+		goto create_fail;
+	}
+
 	/* ABMs */
 	for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
 		pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
index 71f7deed18e34aae16b813e67de50f8e491e1797..42d2c73e30bc163801ed58e27dc1c7ccf1cfbc41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
@@ -30,4 +30,6 @@
 
 struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
 
+void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
 #endif /* _DCN302_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 5da7677627a171638524191cbb4bcea3d229054a..cac0b2c0d31b23a5935ec69de039b08ff563da23 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -30,9 +30,10 @@ struct dc_link;
 
 struct cp_psp_stream_config {
 	uint8_t otg_inst;
-	uint8_t link_enc_inst;
-	uint8_t stream_enc_inst;
-	uint8_t mst_supported;
+	uint8_t dig_be;
+	uint8_t dig_fe;
+	uint8_t assr_enabled;
+	uint8_t mst_enabled;
 	void *dm_stream_ctx;
 	bool dpms_off;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 45f028986a8dbfe0fb4d4ad2b4b24d8dc757958c..0f3f510fd83b8a0b54542145df9c44a90507eed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3138,7 +3138,7 @@ static void CalculateFlipSchedule(
 				4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
 				1) / 4.0;
 
-		if ((GPUVMEnable == true || DCCEnable == true)) {
+		if ((GPUVMEnable || DCCEnable)) {
 			mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
 					* ImmediateFlipBytes / TotImmediateFlipBytes;
 			TimeForFetchingRowInVBlankImmediateFlip = dml_max(
@@ -4168,10 +4168,11 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		locals->DIOSupport[i] = true;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			if (locals->OutputBppPerState[i][k] == BPP_INVALID
-					|| (mode_lib->vba.OutputFormat[k] == dm_420
+			if (!mode_lib->vba.skip_dio_check[k]
+					&& (locals->OutputBppPerState[i][k] == BPP_INVALID
+						|| (mode_lib->vba.OutputFormat[k] == dm_420
 							&& mode_lib->vba.Interlace[k] == true
-							&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+							&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
 				locals->DIOSupport[i] = false;
 			}
 		}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 80170f9721ce949330d72408b81c48575af13101..210c96cd5b03b5bfe4277f84e3562c4f51e42fc4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -3263,6 +3263,7 @@ static void CalculateFlipSchedule(
 
 static unsigned int TruncToValidBPP(
 		double DecimalBPP,
+		double DesiredBPP,
 		bool DSCEnabled,
 		enum output_encoder_class Output,
 		enum output_format_class Format,
@@ -3270,31 +3271,31 @@ static unsigned int TruncToValidBPP(
 {
 	if (Output == dm_hdmi) {
 		if (Format == dm_420) {
-			if (DecimalBPP >= 18)
+			if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
 				return 18;
-			else if (DecimalBPP >= 15)
+			else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
 				return 15;
-			else if (DecimalBPP >= 12)
+			else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
 				return 12;
 			else
 				return BPP_INVALID;
 		} else if (Format == dm_444) {
-			if (DecimalBPP >= 36)
+			if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
 				return 36;
-			else if (DecimalBPP >= 30)
+			else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
 				return 30;
-			else if (DecimalBPP >= 24)
+			else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
 				return 24;
-			else if (DecimalBPP >= 18)
+			else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
 				return 18;
 			else
 				return BPP_INVALID;
 		} else {
-			if (DecimalBPP / 1.5 >= 24)
+			if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
 				return 24;
-			else if (DecimalBPP / 1.5 >= 20)
+			else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
 				return 20;
-			else if (DecimalBPP / 1.5 >= 16)
+			else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
 				return 16;
 			else
 				return BPP_INVALID;
@@ -3302,53 +3303,86 @@ static unsigned int TruncToValidBPP(
 	} else {
 		if (DSCEnabled) {
 			if (Format == dm_420) {
-				if (DecimalBPP < 6)
-					return BPP_INVALID;
-				else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
-					return 1.5 * DSCInputBitPerComponent - 1 / 16;
-				else
-					return dml_floor(16 * DecimalBPP, 1) / 16;
+				if (DesiredBPP == 0) {
+					if (DecimalBPP < 6)
+						return BPP_INVALID;
+					else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0)
+						return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0;
+					else
+						return dml_floor(16 * DecimalBPP, 1) / 16.0;
+				} else {
+					if (DecimalBPP < 6
+							|| DesiredBPP < 6
+							|| DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0
+							|| DecimalBPP < DesiredBPP) {
+						return BPP_INVALID;
+					} else {
+						return DesiredBPP;
+					}
+				}
 			} else if (Format == dm_n422) {
-				if (DecimalBPP < 7)
-					return BPP_INVALID;
-				else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
-					return 2 * DSCInputBitPerComponent - 1 / 16;
-				else
-					return dml_floor(16 * DecimalBPP, 1) / 16;
+				if (DesiredBPP == 0) {
+					if (DecimalBPP < 7)
+						return BPP_INVALID;
+					else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0)
+						return 2 * DSCInputBitPerComponent - 1.0 / 16.0;
+					else
+						return dml_floor(16 * DecimalBPP, 1) / 16.0;
+				} else {
+					if (DecimalBPP < 7
+							|| DesiredBPP < 7
+							|| DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0
+							|| DecimalBPP < DesiredBPP) {
+						return BPP_INVALID;
+					} else {
+						return DesiredBPP;
+					}
+				}
 			} else {
-				if (DecimalBPP < 8)
-					return BPP_INVALID;
-				else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
-					return 3 * DSCInputBitPerComponent - 1 / 16;
-				else
-					return dml_floor(16 * DecimalBPP, 1) / 16;
+				if (DesiredBPP == 0) {
+					if (DecimalBPP < 8)
+						return BPP_INVALID;
+					else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0)
+						return 3 * DSCInputBitPerComponent - 1.0 / 16.0;
+					else
+						return dml_floor(16 * DecimalBPP, 1) / 16.0;
+				} else {
+					if (DecimalBPP < 8
+							|| DesiredBPP < 8
+							|| DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0
+							|| DecimalBPP < DesiredBPP) {
+						return BPP_INVALID;
+					} else {
+						return DesiredBPP;
+					}
+				}
 			}
 		} else if (Format == dm_420) {
-			if (DecimalBPP >= 18)
+			if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
 				return 18;
-			else if (DecimalBPP >= 15)
+			else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
 				return 15;
-			else if (DecimalBPP >= 12)
+			else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
 				return 12;
 			else
 				return BPP_INVALID;
 		} else if (Format == dm_s422 || Format == dm_n422) {
-			if (DecimalBPP >= 24)
+			if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
 				return 24;
-			else if (DecimalBPP >= 20)
+			else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
 				return 20;
-			else if (DecimalBPP >= 16)
+			else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
 				return 16;
 			else
 				return BPP_INVALID;
 		} else {
-			if (DecimalBPP >= 36)
+			if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
 				return 36;
-			else if (DecimalBPP >= 30)
+			else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
 				return 30;
-			else if (DecimalBPP >= 24)
+			else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
 				return 24;
-			else if (DecimalBPP >= 18)
+			else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
 				return 18;
 			else
 				return BPP_INVALID;
@@ -4137,6 +4171,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 					locals->RequiresFEC[i][k] = 0;
 					locals->OutputBppPerState[i][k] = TruncToValidBPP(
 							dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
+							mode_lib->vba.ForcedOutputLinkBPP[k],
 							false,
 							mode_lib->vba.Output[k],
 							mode_lib->vba.OutputFormat[k],
@@ -4153,6 +4188,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.Outbpp = TruncToValidBPP(
 								(1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
 								* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+								mode_lib->vba.ForcedOutputLinkBPP[k],
 								false,
 								mode_lib->vba.Output[k],
 								mode_lib->vba.OutputFormat[k],
@@ -4160,6 +4196,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.OutbppDSC = TruncToValidBPP(
 								(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
 								* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+								mode_lib->vba.ForcedOutputLinkBPP[k],
 								true,
 								mode_lib->vba.Output[k],
 								mode_lib->vba.OutputFormat[k],
@@ -4182,6 +4219,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.Outbpp = TruncToValidBPP(
 								(1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
 								* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+								mode_lib->vba.ForcedOutputLinkBPP[k],
 									false,
 									mode_lib->vba.Output[k],
 									mode_lib->vba.OutputFormat[k],
@@ -4189,6 +4227,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.OutbppDSC = TruncToValidBPP(
 								(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
 								* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+								mode_lib->vba.ForcedOutputLinkBPP[k],
 								true,
 								mode_lib->vba.Output[k],
 								mode_lib->vba.OutputFormat[k],
@@ -4213,6 +4252,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.Outbpp = TruncToValidBPP(
 								(1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
 								* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+								mode_lib->vba.ForcedOutputLinkBPP[k],
 								false,
 								mode_lib->vba.Output[k],
 								mode_lib->vba.OutputFormat[k],
@@ -4220,6 +4260,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.OutbppDSC = TruncToValidBPP(
 								(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
 								* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+								mode_lib->vba.ForcedOutputLinkBPP[k],
 								true,
 								mode_lib->vba.Output[k],
 								mode_lib->vba.OutputFormat[k],
@@ -4248,10 +4289,11 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		locals->DIOSupport[i] = true;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			if (locals->OutputBppPerState[i][k] == BPP_INVALID
-					|| (mode_lib->vba.OutputFormat[k] == dm_420
+			if (!mode_lib->vba.skip_dio_check[k]
+					&& (locals->OutputBppPerState[i][k] == BPP_INVALID
+						|| (mode_lib->vba.OutputFormat[k] == dm_420
 							&& mode_lib->vba.Interlace[k] == true
-							&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+							&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
 				locals->DIOSupport[i] = false;
 			}
 		}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 86ff24dffc3efbc7e4830c53015d0f979b8f0249..398210d1af34fcb8eaf3270833d71bfb30ba30f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -4257,10 +4257,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		locals->DIOSupport[i] = true;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			if (locals->OutputBppPerState[i][k] == BPP_INVALID
-					|| (mode_lib->vba.OutputFormat[k] == dm_420
+			if (!mode_lib->vba.skip_dio_check[k]
+					&& (locals->OutputBppPerState[i][k] == BPP_INVALID
+						|| (mode_lib->vba.OutputFormat[k] == dm_420
 							&& mode_lib->vba.Interlace[k] == true
-							&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+							&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
 				locals->DIOSupport[i] = false;
 			}
 		}
@@ -5121,48 +5122,48 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 		for (j = 0; j < 2; j++) {
 			enum dm_validation_status status = DML_VALIDATION_OK;
 
-			if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
+			if (!mode_lib->vba.ScaleRatioAndTapsSupport) {
 				status = DML_FAIL_SCALE_RATIO_TAP;
-			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
+			} else if (!mode_lib->vba.SourceFormatPixelAndScanSupport) {
 				status = DML_FAIL_SOURCE_PIXEL_FORMAT;
-			} else if (locals->ViewportSizeSupport[i][0] != true) {
+			} else if (!locals->ViewportSizeSupport[i][0]) {
 				status = DML_FAIL_VIEWPORT_SIZE;
-			} else if (locals->DIOSupport[i] != true) {
+			} else if (!locals->DIOSupport[i]) {
 				status = DML_FAIL_DIO_SUPPORT;
-			} else if (locals->NotEnoughDSCUnits[i] != false) {
+			} else if (locals->NotEnoughDSCUnits[i]) {
 				status = DML_FAIL_NOT_ENOUGH_DSC;
-			} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
+			} else if (locals->DSCCLKRequiredMoreThanSupported[i]) {
 				status = DML_FAIL_DSC_CLK_REQUIRED;
-			} else if (locals->ROBSupport[i][0] != true) {
+			} else if (!locals->ROBSupport[i][0]) {
 				status = DML_FAIL_REORDERING_BUFFER;
-			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
+			} else if (!locals->DISPCLK_DPPCLK_Support[i][j]) {
 				status = DML_FAIL_DISPCLK_DPPCLK;
-			} else if (locals->TotalAvailablePipesSupport[i][j] != true) {
+			} else if (!locals->TotalAvailablePipesSupport[i][j]) {
 				status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
-			} else if (mode_lib->vba.NumberOfOTGSupport != true) {
+			} else if (!mode_lib->vba.NumberOfOTGSupport) {
 				status = DML_FAIL_NUM_OTG;
-			} else if (mode_lib->vba.WritebackModeSupport != true) {
+			} else if (!mode_lib->vba.WritebackModeSupport) {
 				status = DML_FAIL_WRITEBACK_MODE;
-			} else if (mode_lib->vba.WritebackLatencySupport != true) {
+			} else if (!mode_lib->vba.WritebackLatencySupport) {
 				status = DML_FAIL_WRITEBACK_LATENCY;
-			} else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
+			} else if (!mode_lib->vba.WritebackScaleRatioAndTapsSupport) {
 				status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
-			} else if (mode_lib->vba.CursorSupport != true) {
+			} else if (!mode_lib->vba.CursorSupport) {
 				status = DML_FAIL_CURSOR_SUPPORT;
-			} else if (mode_lib->vba.PitchSupport != true) {
+			} else if (!mode_lib->vba.PitchSupport) {
 				status = DML_FAIL_PITCH_SUPPORT;
-			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
+			} else if (!locals->TotalVerticalActiveBandwidthSupport[i][0]) {
 				status = DML_FAIL_TOTAL_V_ACTIVE_BW;
-			} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
+			} else if (!locals->PTEBufferSizeNotExceeded[i][j]) {
 				status = DML_FAIL_PTE_BUFFER_SIZE;
-			} else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
+			} else if (mode_lib->vba.NonsupportedDSCInputBPC) {
 				status = DML_FAIL_DSC_INPUT_BPC;
-			} else if ((mode_lib->vba.HostVMEnable != false
-					&& locals->ImmediateFlipSupportedForState[i][j] != true)) {
+			} else if ((mode_lib->vba.HostVMEnable
+					&& !locals->ImmediateFlipSupportedForState[i][j])) {
 				status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
-			} else if (locals->PrefetchSupported[i][j] != true) {
+			} else if (!locals->PrefetchSupported[i][j]) {
 				status = DML_FAIL_PREFETCH_SUPPORT;
-			} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
+			} else if (!locals->VRatioInPrefetchSupported[i][j]) {
 				status = DML_FAIL_V_RATIO_PREFETCH;
 			}
 
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 319dec59bcd1e585d82ea351560ee5d7d80a6a9e..bc07082c135755950e31b63af9bec7356907db1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1219,13 +1219,13 @@ static bool CalculatePrefetchSchedule(
 			dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
 
 			if (prefetch_bw_equ > 0) {
-				if (GPUVMEnable == true) {
+				if (GPUVMEnable) {
 					Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
 				} else {
 					Tvm_equ = LineTime / 4;
 				}
 
-				if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+				if ((GPUVMEnable || myPipe->DCCEnable)) {
 					Tr0_equ = dml_max4(
 							(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
 							Tr0_trips,
@@ -4263,7 +4263,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (i = 0; i < v->soc.num_states; i++) {
 		v->DIOSupport[i] = true;
 		for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
-			if (v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
+			if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
 					&& (v->OutputBppPerState[i][k] == 0
 							|| (v->OutputFormat[k] == dm_420 && v->Interlace[k] == true && v->ProgressiveToInterlaceUnitInOPP == true))) {
 				v->DIOSupport[i] = false;
@@ -5558,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 		}
 	}
 
-	if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
+	if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
 		*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
 	} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
 		*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 5b5916b5bc7107aa240beb70258f8bb8a08dd815..0f14f205ebe593c3ab3a25d1091de8395fba7a71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -165,8 +165,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 	unsigned int swath_bytes_c = 0;
 	unsigned int full_swath_bytes_packed_l = 0;
 	unsigned int full_swath_bytes_packed_c = 0;
-	bool req128_l = 0;
-	bool req128_c = 0;
+	bool req128_l = false;
+	bool req128_c = false;
 	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
 	bool surf_vert = (pipe_src_param.source_scan == dm_vert);
 	unsigned int log2_swath_height_l = 0;
@@ -191,37 +191,37 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 		total_swath_bytes = 2 * full_swath_bytes_packed_l;
 
 	if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
-		req128_l = 0;
-		req128_c = 0;
+		req128_l = false;
+		req128_c = false;
 		swath_bytes_l = full_swath_bytes_packed_l;
 		swath_bytes_c = full_swath_bytes_packed_c;
 	} else if (!rq_param->yuv420) {
-		req128_l = 1;
-		req128_c = 0;
+		req128_l = true;
+		req128_c = false;
 		swath_bytes_c = full_swath_bytes_packed_c;
 		swath_bytes_l = full_swath_bytes_packed_l / 2;
 	} else if ((double)full_swath_bytes_packed_l / (double)full_swath_bytes_packed_c < 1.5) {
-		req128_l = 0;
-		req128_c = 1;
+		req128_l = false;
+		req128_c = true;
 		swath_bytes_l = full_swath_bytes_packed_l;
 		swath_bytes_c = full_swath_bytes_packed_c / 2;
 
 		total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
 
 		if (total_swath_bytes > detile_buf_size_in_bytes) {
-			req128_l = 1;
+			req128_l = true;
 			swath_bytes_l = full_swath_bytes_packed_l / 2;
 		}
 	} else {
-		req128_l = 1;
-		req128_c = 0;
+		req128_l = true;
+		req128_c = false;
 		swath_bytes_l = full_swath_bytes_packed_l/2;
 		swath_bytes_c = full_swath_bytes_packed_c;
 
 		total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
 
 		if (total_swath_bytes > detile_buf_size_in_bytes) {
-			req128_c = 1;
+			req128_c = true;
 			swath_bytes_c = full_swath_bytes_packed_c/2;
 		}
 	}
@@ -1006,8 +1006,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
 
 	double min_dst_y_ttu_vblank = 0;
 	unsigned int dlg_vblank_start = 0;
-	bool dual_plane = 0;
-	bool mode_422 = 0;
+	bool dual_plane = false;
+	bool mode_422 = false;
 	unsigned int access_dir = 0;
 	unsigned int vp_height_l = 0;
 	unsigned int vp_width_l = 0;
@@ -1021,7 +1021,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
 	double hratio_c = 0;
 	double vratio_l = 0;
 	double vratio_c = 0;
-	bool scl_enable = 0;
+	bool scl_enable = false;
 
 	double line_time_in_us = 0;
 	//	double vinit_l;
@@ -1156,7 +1156,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
 	// Source
 	//			 dcc_en			  = src.dcc;
 	dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
-	mode_422 = 0; // TODO
+	mode_422 = false; // TODO
 	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
 	vp_height_l = src->viewport_height;
 	vp_width_l = src->viewport_width;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index dd0c3b1780d702967ad3ef5a7db79cc14df1a6d7..0c5128187e0896868134d3436e41fc9256effc0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -297,6 +297,7 @@ struct _vcs_dpi_display_output_params_st {
 	int num_active_wb;
 	int output_bpc;
 	int output_type;
+	int is_virtual;
 	int output_format;
 	int dsc_slices;
 	int max_audio_sample_rate;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index c9fbb33f05a32e8c918ad30098186aae6b92c469..bc0485a59018ad2622e3c19c2895b340cbe44c2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -451,6 +451,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 				dout->output_bpp;
 		mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
 				(enum output_encoder_class) (dout->output_type);
+		mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] =
+				dout->is_virtual;
 
 		if (!dout->dsc_enable)
 			mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 3529fedc4c523c1ac37eeb763ef7c8360ae3ff8d..025aa5bd8ea0a0191d37343b06a69a4e93257861 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -340,6 +340,7 @@ struct vba_vars_st {
 	unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
 	enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
 	enum output_encoder_class Output[DC__NUM_DPP__MAX];
+	bool skip_dio_check[DC__NUM_DPP__MAX];
 	unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
 	bool SynchronizedVBlank;
 	unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
index df68430aeb0c2e08ce2e05daa63ddff6838d18c3..c6e28f6bf1a2791eb9fa19c70d02addde88d3249 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -28,6 +28,7 @@
  */
 
 #include "dm_services.h"
+#include "hw_factory_diag.h"
 #include "include/gpio_types.h"
 #include "../hw_factory.h"
 
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
index 8a74f6adb8eeeec433d553643ab19af5c44a3adc..bf68eb1d9a1d2fc7fa6dd5e659c29ed589179252 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -26,6 +26,8 @@
 #ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
 #define __DAL_HW_FACTORY_DIAG_FPGA_H__
 
+struct hw_factory;
+
 /* Initialize HW factory function pointers and pin info */
 void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
 
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
index bf906884692714a17ae7516fa660c007b6da8894..e5138a5a8eb5afaef039e45a3270c01d939e8033 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -24,6 +24,7 @@
  */
 
 #include "dm_services.h"
+#include "hw_translate_diag.h"
 #include "include/gpio_types.h"
 
 #include "../hw_translate.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 1ae153eab31d7e02765599ddf0ff6ee0dcf47347..7a8cec2d7a902ac7e3bebbf6ac2c2d096ecacf7f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -107,13 +107,12 @@ static enum gpio_result set_config(
 					msleep(3);
 			}
 		} else {
-			uint32_t reg2;
 			uint32_t sda_pd_dis = 0;
 			uint32_t scl_pd_dis = 0;
 
-			reg2 = REG_GET_2(gpio.MASK_reg,
-					DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
-					DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+			REG_GET_2(gpio.MASK_reg,
+				  DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+				  DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
 
 			if (sda_pd_dis) {
 				REG_SET(gpio.MASK_reg, regval,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index da73bfb3cacd04dce9528125750f20e793fb5b7a..92c65d2fa7d713d2e92c4798fd9a314fd39a26f7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -119,17 +119,3 @@ bool dal_hw_factory_init(
 		return false;
 	}
 }
-
-void dal_hw_factory_destroy(
-	struct dc_context *ctx,
-	struct hw_factory **factory)
-{
-	if (!factory || !*factory) {
-		BREAK_TO_DEBUGGER();
-		return;
-	}
-
-	kfree(*factory);
-
-	*factory = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2d77eac66cb036cfdb85e5ff9cf379f59d7140d9..8efa1b80546deaa03f0a93c7d610a45d6830f3ca 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -333,6 +333,7 @@ struct pipe_ctx {
 	union pipe_update_flags update_flags;
 	struct dwbc *dwbc;
 	struct mcif_wb *mcif_wb;
+	bool vtp_locked;
 };
 
 struct resource_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index ffd37696b6b9adbe161215fc96c5b599fd02f3ae..316301fc1e305eeb17eb24c29e11253f50fa3b50 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -309,9 +309,9 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
 static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_support, bool cur_support)
 {
 	if (cur_support != calc_support) {
-		if (calc_support == true && safe_to_lower)
+		if (calc_support && safe_to_lower)
 			return true;
-		else if (calc_support == false && !safe_to_lower)
+		else if (!calc_support && !safe_to_lower)
 			return true;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 69d9fbfb4bec18a183fc604b315ad2c4a8e430ac..cd1c0dc32bf8a8dd5e695ee2f67aebde062d5f07 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -74,6 +74,16 @@ struct dmcu_funcs {
 	bool (*is_dmcu_initialized)(struct dmcu *dmcu);
 	bool (*lock_phy)(struct dmcu *dmcu);
 	bool (*unlock_phy)(struct dmcu *dmcu);
+	bool (*send_edid_cea)(struct dmcu *dmcu,
+			int offset,
+			int total_length,
+			uint8_t *data,
+			int length);
+	bool (*recv_amd_vsdb)(struct dmcu *dmcu,
+			int *version,
+			int *min_frame_rate,
+			int *max_frame_rate);
+	bool (*recv_edid_cea_ack)(struct dmcu *dmcu, int *offset);
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f7632fe25976d399eb220668c4790432d25b26ab..754832d216fdabbdfa5c65fede5f12e7285964ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -190,6 +190,7 @@ struct timing_generator_funcs {
 	void (*set_blank)(struct timing_generator *tg,
 					bool enable_blanking);
 	bool (*is_blanked)(struct timing_generator *tg);
+	bool (*is_locked)(struct timing_generator *tg);
 	void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
 	void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
 	void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 62804dc7b6981c0928c4b70b3a3baeb8e84c6c6b..0586ab2ffd6ab46d9001f0ed9fa0cdc244676f7e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -54,6 +54,7 @@ struct hw_sequencer_funcs {
 	/* Embedded Display Related */
 	void (*edp_power_control)(struct dc_link *link, bool enable);
 	void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+	void (*edp_wait_for_T12)(struct dc_link *link);
 
 	/* Pipe Programming Related */
 	void (*init_hw)(struct dc *dc);
@@ -217,6 +218,9 @@ struct hw_sequencer_funcs {
 	/* Idle Optimization Related */
 	bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
 
+	bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane,
+			struct dc_cursor_attributes *cursor_attr);
+
 	bool (*is_abm_supported)(struct dc *dc,
 			struct dc_state *context, struct dc_stream_state *stream);
 
@@ -227,6 +231,10 @@ struct hw_sequencer_funcs {
 			enum dc_color_depth color_depth,
 			const struct tg_color *solid_color,
 			int width, int height, int offset);
+
+	void (*set_hubp_blank)(const struct dc *dc,
+			struct pipe_ctx *pipe_ctx,
+			bool blank_enable);
 };
 
 void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index f956b3bde680dfee271115d21587748e945ac4fd..34f43cb650f8c7c135225a4488e697380b0f3783 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -58,6 +58,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
 		return DC_IRQ_SOURCE_VBLANK5;
 	case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
 		return DC_IRQ_SOURCE_VBLANK6;
+	case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+		return DC_IRQ_SOURCE_DC1_VLINE0;
+	case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+		return DC_IRQ_SOURCE_DC2_VLINE0;
+	case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+		return DC_IRQ_SOURCE_DC3_VLINE0;
+	case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+		return DC_IRQ_SOURCE_DC4_VLINE0;
+	case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+		return DC_IRQ_SOURCE_DC5_VLINE0;
+	case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+		return DC_IRQ_SOURCE_DC6_VLINE0;
 	case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 		return DC_IRQ_SOURCE_VUPDATE1;
 	case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
@@ -167,6 +179,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
 	.ack = NULL
 };
 
+static const struct irq_source_info_funcs vline0_irq_info_funcs = {
+	.set = NULL,
+	.ack = NULL
+};
+
 static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
 	.set = NULL,
 	.ack = NULL
@@ -241,6 +258,14 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
 		.funcs = &vblank_irq_info_funcs\
 	}
 
+#define vline0_int_entry(reg_num)\
+	[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
+		IRQ_REG_ENTRY(OTG, reg_num,\
+			OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+			OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+		.funcs = &vline0_irq_info_funcs\
+	}
+
 #define dummy_irq_entry() \
 	{\
 		.funcs = &dummy_irq_info_funcs\
@@ -349,6 +374,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
 	vblank_int_entry(3),
 	vblank_int_entry(4),
 	vblank_int_entry(5),
+	vline0_int_entry(0),
+	vline0_int_entry(1),
+	vline0_int_entry(2),
+	vline0_int_entry(3),
+	vline0_int_entry(4),
+	vline0_int_entry(5),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn10 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 6bf27bde87240b1b4b87565d9e0c9c1b856d67b0..5f245bde54ff7664cf749a8d759f47b260ff657d 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
 	*irq_service = NULL;
 }
 
-const struct irq_source_info *find_irq_source_info(
+static const struct irq_source_info *find_irq_source_info(
 	struct irq_service *irq_service,
 	enum dc_irq_source source)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index d0ccd81ad5b4d8f04c297665bc963cd97677df7f..87812d81fed3c9501bc220547172795acebfbfd1 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -160,6 +160,7 @@ enum irq_type
 	IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
 	IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
 	IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
+	IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
 };
 
 #define DAL_VALID_IRQ_SRC_NUM(src) \
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 249a076d6f69c573eece0e3a9d23fe10bc7d7638..072b4e7e624b006aa0eced5f99fd3824b73e8f9a 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
+#define DMUB_FW_VERSION_GIT_HASH 0x6444c02e7
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 47
+#define DMUB_FW_VERSION_REVISION 51
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -458,6 +458,10 @@ struct dmub_rb_cmd_mall {
 	uint16_t cursor_pitch;
 	uint16_t cursor_height;
 	uint8_t cursor_bpp;
+	uint8_t debug_bits;
+
+	uint8_t reserved1;
+	uint8_t reserved2;
 };
 
 struct dmub_cmd_digx_encoder_control_data {
@@ -487,13 +491,34 @@ struct dmub_rb_cmd_enable_disp_power_gating {
 	struct dmub_cmd_enable_disp_power_gating_data power_gating;
 };
 
-struct dmub_cmd_dig1_transmitter_control_data {
+struct dmub_dig_transmitter_control_data_v1_7 {
+	uint8_t phyid; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+	uint8_t action; /**< Defined as ATOM_TRANSMITER_ACTION_xxx */
+	union {
+		uint8_t digmode; /**< enum atom_encode_mode_def */
+		uint8_t dplaneset; /**< DP voltage swing and pre-emphasis value, "DP_LANE_SET__xDB_y_zV" */
+	} mode_laneset;
+	uint8_t lanenum; /**< Number of lanes */
+	union {
+		uint32_t symclk_10khz; /**< Symbol Clock in 10Khz */
+	} symclk_units;
+	uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */
+	uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */
+	uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
+	uint8_t reserved0; /**< For future use */
+	uint8_t reserved1; /**< For future use */
+	uint8_t reserved2[3]; /**< For future use */
+	uint32_t reserved3[11]; /**< For future use */
+};
+
+union dmub_cmd_dig1_transmitter_control_data {
 	struct dig_transmitter_control_parameters_v1_6 dig;
+	struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7;
 };
 
 struct dmub_rb_cmd_dig1_transmitter_control {
 	struct dmub_cmd_header header;
-	struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
+	union dmub_cmd_dig1_transmitter_control_data transmitter_control;
 };
 
 struct dmub_rb_cmd_dpphy_init {
@@ -624,6 +649,7 @@ enum dmub_cmd_mall_type {
 	DMUB_CMD__MALL_ACTION_ALLOW = 0,
 	DMUB_CMD__MALL_ACTION_DISALLOW = 1,
 	DMUB_CMD__MALL_ACTION_COPY_CURSOR = 2,
+	DMUB_CMD__MALL_ACTION_NO_DF_REQ = 3,
 };
 
 struct dmub_cmd_psr_copy_settings_data {
@@ -648,6 +674,7 @@ struct dmub_cmd_psr_copy_settings_data {
 	uint8_t multi_disp_optimizations_en;
 	uint16_t init_sdp_deadline;
 	uint16_t pad2;
+	uint32_t line_time_in_us;
 };
 
 struct dmub_rb_cmd_psr_copy_settings {
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index cafba1d23c6a60837d2192da17b155a51def9770..8e8e65fa83c020619bd048dc4b191e16a3bb321d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -81,6 +81,13 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
 	addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
 }
 
+bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub)
+{
+	/* Cached inbox is not supported in this fw version range */
+	return !(dmub->fw_version >= DMUB_FW_VERSION(1, 0, 0) &&
+		 dmub->fw_version <= DMUB_FW_VERSION(1, 10, 0));
+}
+
 void dmub_dcn20_reset(struct dmub_srv *dmub)
 {
 	union dmub_gpint_data_register cmd;
@@ -216,7 +223,7 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
 	dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
 
 	/* New firmware can support CW4. */
-	if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+	if (dmub_dcn20_use_cached_inbox(dmub)) {
 		REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
 		REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
 		REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
@@ -255,7 +262,7 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
 			      const struct dmub_region *inbox1)
 {
 	/* New firmware can support CW4 for the inbox. */
-	if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10))
+	if (dmub_dcn20_use_cached_inbox(dmub))
 		REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base);
 	else
 		REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index d438f365cbb08f5405bc09902787dc9d47f62fc3..a62be9c0652e3ec19332b8ae3ffbcb5677c5a695 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -198,4 +198,6 @@ void dmub_dcn20_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
 
 union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub);
 
+bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub);
+
 #endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index f00df02ded81bc9c1b054144d15323effebc83a4..b4bc0df2f14a3e66ff169c449f43fbb888d89004 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -26,6 +26,7 @@
 #include "../dmub_srv.h"
 #include "dmub_reg.h"
 #include "dmub_dcn20.h"
+#include "dmub_dcn30.h"
 
 #include "sienna_cichlid_ip_offset.h"
 #include "dcn/dcn_3_0_0_offset.h"
@@ -154,7 +155,7 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
 	offset = cw4->offset;
 
 	/* New firmware can support CW4. */
-	if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+	if (dmub_dcn20_use_cached_inbox(dmub)) {
 		REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
 		REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
 		REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index f388d36af0b6c1e0505a5ef3758182c33d96bfde..61f64a295f06aa82c4dfb6092eb3d0be295dc067 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -406,6 +406,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 	dmub->fb_offset = params->fb_offset;
 	dmub->psp_version = params->psp_version;
 
+	if (dmub->hw_funcs.reset)
+		dmub->hw_funcs.reset(dmub);
+
 	if (inst_fb && data_fb) {
 		cw0.offset.quad_part = inst_fb->gpu_addr;
 		cw0.region.base = DMUB_CW0_BASE;
@@ -427,9 +430,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 
 	}
 
-	if (dmub->hw_funcs.reset)
-		dmub->hw_funcs.reset(dmub);
-
 	if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
 	    fw_state_fb && scratch_mem_fb) {
 		cw2.offset.quad_part = data_fb->gpu_addr;
@@ -489,9 +489,6 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
 	if (!dmub->sw_init)
 		return DMUB_STATUS_INVALID;
 
-	if (dmub->hw_init == false)
-		return DMUB_STATUS_OK;
-
 	if (dmub->hw_funcs.reset)
 		dmub->hw_funcs.reset(dmub);
 
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_table.c b/drivers/gpu/drm/amd/display/modules/color/color_table.c
index 692e536e7d05702694343e100499608f0b45f8de..410f2a82b9a2ebbe3a2e3872f55047f5106a7b36 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_table.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.c
@@ -1,10 +1,26 @@
 /*
- * Copyright (c) 2019 Advanced Micro Devices, Inc. (unpublished)
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
  *
- * All rights reserved.  This notice is intended as a precaution against
- * inadvertent publication and does not imply publication or any waiver
- * of confidentiality.  The year included in the foregoing notice is the
- * year of creation of the work.
  */
 
 #include "color_table.h"
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 6c678cfb82e39898f7e5134b9dc96d9d739e107e..5c22cf7e6118fb41960e762bf2e41bcdd83f327c 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -397,7 +397,7 @@ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
 static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
 {
 	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
-			hdcp->connection.link.dp.mst_supported);
+			hdcp->connection.link.dp.mst_enabled);
 }
 
 static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 3a367a5968ae19c5e297a7337dacf0c04e5ec397..904ce9b88088eae800e7e5e21681aedac7e12c0b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -106,7 +106,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
 	dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
 	dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
 	if (is_dp_hdcp(hdcp))
-		dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+		dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_enabled;
 
 	dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
 	dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
@@ -548,6 +548,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
 			   TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
 			hdcp->connection.is_hdcp2_revoked = 1;
 			status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+		}  else {
+			status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
 		}
 	}
 	mutex_unlock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index eed560eecbab498dce293105ca1add9add9a5dc9..d223ed3be5d3deca64218cc5752c75472c80d9aa 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -101,8 +101,8 @@ enum mod_hdcp_status {
 
 struct mod_hdcp_displayport {
 	uint8_t rev;
-	uint8_t assr_supported;
-	uint8_t mst_supported;
+	uint8_t assr_enabled;
+	uint8_t mst_enabled;
 };
 
 struct mod_hdcp_hdmi {
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 0fdf7a3e96deaca5c5e5a0f0f9fb3cf1b04d6d95..57f198de5e2cb3cdef09656ecc525ad73a34666d 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -409,16 +409,11 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 }
 
 /**
- *****************************************************************************
- *  Function: mod_build_hf_vsif_infopacket
+ *  mod_build_hf_vsif_infopacket - Prepare HDMI Vendor Specific info frame.
+ *                                 Follows HDMI Spec to build up Vendor Specific info frame
  *
- *  @brief
- *     Prepare HDMI Vendor Specific info frame.
- *     Follows HDMI Spec to build up Vendor Specific info frame
- *
- *  @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
- *  @param [out] info_packet:   output structure where to store VSIF
- *****************************************************************************
+ *  @stream:      contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ *  @info_packet: output structure where to store VSIF
  */
 void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
 		struct dc_info_packet *info_packet)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4fd8bce95d843140341bae3db3acd99ce9dad9ba..6270ecbd24389b8f47f684aa2142e7e312cb3585 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -266,7 +266,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
 	 * format U4.10.
 	 */
 	for (i = 1; i+1 < num_entries; i++) {
-		lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+		lut_index = DIV_ROUNDUP((i * params.backlight_lut_array_size), num_entries);
 		ASSERT(lut_index < params.backlight_lut_array_size);
 
 		table->backlight_thresholds[i] = (big_endian) ?
@@ -278,7 +278,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
 	}
 }
 
-void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
 {
 	unsigned int set = params.set;
 
@@ -452,7 +452,7 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
 			params, ram_table);
 }
 
-void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
 {
 	unsigned int set = params.set;
 
@@ -598,7 +598,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
 			params, ram_table, true);
 }
 
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
+static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
 {
 	unsigned int i, j;
 	unsigned int set = params.set;
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 9cb9ceb4d74dc28ae48b8acb80b659d03b8e5e09..a1ece3eecdf5e0af5fa3679b5d3d89f8334c657f 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -28,6 +28,7 @@
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2        0x00020000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3        0x00040000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4        0x00080000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5        0x00100000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK        0xFFFF0000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT       16
 
@@ -36,6 +37,7 @@
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2   0x00000002
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3   0x00000004
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4   0x00000008
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5   0x00000010
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK   0x0000FFFF
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT  0
 
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9676016a37ce906f4eef84c4f8b5cf8fabc8450e..43ed6291b2b89acea6e0f736043afde1fa1e2a68 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -213,6 +213,7 @@ enum PP_FEATURE_MASK {
 	PP_ACG_MASK = 0x10000,
 	PP_STUTTER_MODE = 0x20000,
 	PP_AVFS_MASK = 0x40000,
+	PP_GFX_DCS_MASK = 0x80000,
 };
 
 enum DC_FEATURE_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
new file mode 100644
index 0000000000000000000000000000000000000000..bd129266ebfd10d9418f4f72beb86be4376d912e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_OFFSET_HEADER
+#define _osssys_4_2_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define mmIH_VMID_0_LUT                                                                                0x0000
+#define mmIH_VMID_0_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_1_LUT                                                                                0x0001
+#define mmIH_VMID_1_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_2_LUT                                                                                0x0002
+#define mmIH_VMID_2_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_3_LUT                                                                                0x0003
+#define mmIH_VMID_3_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_4_LUT                                                                                0x0004
+#define mmIH_VMID_4_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_5_LUT                                                                                0x0005
+#define mmIH_VMID_5_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_6_LUT                                                                                0x0006
+#define mmIH_VMID_6_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_7_LUT                                                                                0x0007
+#define mmIH_VMID_7_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_8_LUT                                                                                0x0008
+#define mmIH_VMID_8_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_9_LUT                                                                                0x0009
+#define mmIH_VMID_9_LUT_BASE_IDX                                                                       0
+#define mmIH_VMID_10_LUT                                                                               0x000a
+#define mmIH_VMID_10_LUT_BASE_IDX                                                                      0
+#define mmIH_VMID_11_LUT                                                                               0x000b
+#define mmIH_VMID_11_LUT_BASE_IDX                                                                      0
+#define mmIH_VMID_12_LUT                                                                               0x000c
+#define mmIH_VMID_12_LUT_BASE_IDX                                                                      0
+#define mmIH_VMID_13_LUT                                                                               0x000d
+#define mmIH_VMID_13_LUT_BASE_IDX                                                                      0
+#define mmIH_VMID_14_LUT                                                                               0x000e
+#define mmIH_VMID_14_LUT_BASE_IDX                                                                      0
+#define mmIH_VMID_15_LUT                                                                               0x000f
+#define mmIH_VMID_15_LUT_BASE_IDX                                                                      0
+#define mmIH_VMID_0_LUT_MM                                                                             0x0010
+#define mmIH_VMID_0_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_1_LUT_MM                                                                             0x0011
+#define mmIH_VMID_1_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_2_LUT_MM                                                                             0x0012
+#define mmIH_VMID_2_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_3_LUT_MM                                                                             0x0013
+#define mmIH_VMID_3_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_4_LUT_MM                                                                             0x0014
+#define mmIH_VMID_4_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_5_LUT_MM                                                                             0x0015
+#define mmIH_VMID_5_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_6_LUT_MM                                                                             0x0016
+#define mmIH_VMID_6_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_7_LUT_MM                                                                             0x0017
+#define mmIH_VMID_7_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_8_LUT_MM                                                                             0x0018
+#define mmIH_VMID_8_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_9_LUT_MM                                                                             0x0019
+#define mmIH_VMID_9_LUT_MM_BASE_IDX                                                                    0
+#define mmIH_VMID_10_LUT_MM                                                                            0x001a
+#define mmIH_VMID_10_LUT_MM_BASE_IDX                                                                   0
+#define mmIH_VMID_11_LUT_MM                                                                            0x001b
+#define mmIH_VMID_11_LUT_MM_BASE_IDX                                                                   0
+#define mmIH_VMID_12_LUT_MM                                                                            0x001c
+#define mmIH_VMID_12_LUT_MM_BASE_IDX                                                                   0
+#define mmIH_VMID_13_LUT_MM                                                                            0x001d
+#define mmIH_VMID_13_LUT_MM_BASE_IDX                                                                   0
+#define mmIH_VMID_14_LUT_MM                                                                            0x001e
+#define mmIH_VMID_14_LUT_MM_BASE_IDX                                                                   0
+#define mmIH_VMID_15_LUT_MM                                                                            0x001f
+#define mmIH_VMID_15_LUT_MM_BASE_IDX                                                                   0
+#define mmIH_COOKIE_0                                                                                  0x0020
+#define mmIH_COOKIE_0_BASE_IDX                                                                         0
+#define mmIH_COOKIE_1                                                                                  0x0021
+#define mmIH_COOKIE_1_BASE_IDX                                                                         0
+#define mmIH_COOKIE_2                                                                                  0x0022
+#define mmIH_COOKIE_2_BASE_IDX                                                                         0
+#define mmIH_COOKIE_3                                                                                  0x0023
+#define mmIH_COOKIE_3_BASE_IDX                                                                         0
+#define mmIH_COOKIE_4                                                                                  0x0024
+#define mmIH_COOKIE_4_BASE_IDX                                                                         0
+#define mmIH_COOKIE_5                                                                                  0x0025
+#define mmIH_COOKIE_5_BASE_IDX                                                                         0
+#define mmIH_COOKIE_6                                                                                  0x0026
+#define mmIH_COOKIE_6_BASE_IDX                                                                         0
+#define mmIH_COOKIE_7                                                                                  0x0027
+#define mmIH_COOKIE_7_BASE_IDX                                                                         0
+#define mmIH_REGISTER_LAST_PART0                                                                       0x003f
+#define mmIH_REGISTER_LAST_PART0_BASE_IDX                                                              0
+#define mmSEM_REQ_INPUT_0                                                                              0x0040
+#define mmSEM_REQ_INPUT_0_BASE_IDX                                                                     0
+#define mmSEM_REQ_INPUT_1                                                                              0x0041
+#define mmSEM_REQ_INPUT_1_BASE_IDX                                                                     0
+#define mmSEM_REQ_INPUT_2                                                                              0x0042
+#define mmSEM_REQ_INPUT_2_BASE_IDX                                                                     0
+#define mmSEM_REQ_INPUT_3                                                                              0x0043
+#define mmSEM_REQ_INPUT_3_BASE_IDX                                                                     0
+#define mmSEM_REGISTER_LAST_PART0                                                                      0x007f
+#define mmSEM_REGISTER_LAST_PART0_BASE_IDX                                                             0
+#define mmIH_RB_CNTL                                                                                   0x0080
+#define mmIH_RB_CNTL_BASE_IDX                                                                          0
+#define mmIH_RB_BASE                                                                                   0x0081
+#define mmIH_RB_BASE_BASE_IDX                                                                          0
+#define mmIH_RB_BASE_HI                                                                                0x0082
+#define mmIH_RB_BASE_HI_BASE_IDX                                                                       0
+#define mmIH_RB_RPTR                                                                                   0x0083
+#define mmIH_RB_RPTR_BASE_IDX                                                                          0
+#define mmIH_RB_WPTR                                                                                   0x0084
+#define mmIH_RB_WPTR_BASE_IDX                                                                          0
+#define mmIH_RB_WPTR_ADDR_HI                                                                           0x0085
+#define mmIH_RB_WPTR_ADDR_HI_BASE_IDX                                                                  0
+#define mmIH_RB_WPTR_ADDR_LO                                                                           0x0086
+#define mmIH_RB_WPTR_ADDR_LO_BASE_IDX                                                                  0
+#define mmIH_DOORBELL_RPTR                                                                             0x0087
+#define mmIH_DOORBELL_RPTR_BASE_IDX                                                                    0
+#define mmIH_RB_CNTL_RING1                                                                             0x008c
+#define mmIH_RB_CNTL_RING1_BASE_IDX                                                                    0
+#define mmIH_RB_BASE_RING1                                                                             0x008d
+#define mmIH_RB_BASE_RING1_BASE_IDX                                                                    0
+#define mmIH_RB_BASE_HI_RING1                                                                          0x008e
+#define mmIH_RB_BASE_HI_RING1_BASE_IDX                                                                 0
+#define mmIH_RB_RPTR_RING1                                                                             0x008f
+#define mmIH_RB_RPTR_RING1_BASE_IDX                                                                    0
+#define mmIH_RB_WPTR_RING1                                                                             0x0090
+#define mmIH_RB_WPTR_RING1_BASE_IDX                                                                    0
+#define mmIH_DOORBELL_RPTR_RING1                                                                       0x0093
+#define mmIH_DOORBELL_RPTR_RING1_BASE_IDX                                                              0
+#define mmIH_RB_CNTL_RING2                                                                             0x0098
+#define mmIH_RB_CNTL_RING2_BASE_IDX                                                                    0
+#define mmIH_RB_BASE_RING2                                                                             0x0099
+#define mmIH_RB_BASE_RING2_BASE_IDX                                                                    0
+#define mmIH_RB_BASE_HI_RING2                                                                          0x009a
+#define mmIH_RB_BASE_HI_RING2_BASE_IDX                                                                 0
+#define mmIH_RB_RPTR_RING2                                                                             0x009b
+#define mmIH_RB_RPTR_RING2_BASE_IDX                                                                    0
+#define mmIH_RB_WPTR_RING2                                                                             0x009c
+#define mmIH_RB_WPTR_RING2_BASE_IDX                                                                    0
+#define mmIH_DOORBELL_RPTR_RING2                                                                       0x009f
+#define mmIH_DOORBELL_RPTR_RING2_BASE_IDX                                                              0
+#define mmIH_VERSION                                                                                   0x00a5
+#define mmIH_VERSION_BASE_IDX                                                                          0
+#define mmIH_CNTL                                                                                      0x00c0
+#define mmIH_CNTL_BASE_IDX                                                                             0
+#define mmIH_CNTL2                                                                                     0x00c1
+#define mmIH_CNTL2_BASE_IDX                                                                            0
+#define mmIH_STATUS                                                                                    0x00c2
+#define mmIH_STATUS_BASE_IDX                                                                           0
+#define mmIH_PERFMON_CNTL                                                                              0x00c3
+#define mmIH_PERFMON_CNTL_BASE_IDX                                                                     0
+#define mmIH_PERFCOUNTER0_RESULT                                                                       0x00c4
+#define mmIH_PERFCOUNTER0_RESULT_BASE_IDX                                                              0
+#define mmIH_PERFCOUNTER1_RESULT                                                                       0x00c5
+#define mmIH_PERFCOUNTER1_RESULT_BASE_IDX                                                              0
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0                                                                  0x00c7
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX                                                         0
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32                                                                 0x00c8
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX                                                        0
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64                                                                 0x00c9
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX                                                        0
+#define mmIH_DSM_MATCH_FIELD_CONTROL                                                                   0x00ca
+#define mmIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX                                                          0
+#define mmIH_DSM_MATCH_DATA_CONTROL                                                                    0x00cb
+#define mmIH_DSM_MATCH_DATA_CONTROL_BASE_IDX                                                           0
+#define mmIH_DSM_MATCH_FCN_ID                                                                          0x00cc
+#define mmIH_DSM_MATCH_FCN_ID_BASE_IDX                                                                 0
+#define mmIH_LIMIT_INT_RATE_CNTL                                                                       0x00cd
+#define mmIH_LIMIT_INT_RATE_CNTL_BASE_IDX                                                              0
+#define mmIH_VF_RB_STATUS                                                                              0x00ce
+#define mmIH_VF_RB_STATUS_BASE_IDX                                                                     0
+#define mmIH_VF_RB_STATUS2                                                                             0x00cf
+#define mmIH_VF_RB_STATUS2_BASE_IDX                                                                    0
+#define mmIH_VF_RB1_STATUS                                                                             0x00d0
+#define mmIH_VF_RB1_STATUS_BASE_IDX                                                                    0
+#define mmIH_VF_RB1_STATUS2                                                                            0x00d1
+#define mmIH_VF_RB1_STATUS2_BASE_IDX                                                                   0
+#define mmIH_VF_RB2_STATUS                                                                             0x00d2
+#define mmIH_VF_RB2_STATUS_BASE_IDX                                                                    0
+#define mmIH_VF_RB2_STATUS2                                                                            0x00d3
+#define mmIH_VF_RB2_STATUS2_BASE_IDX                                                                   0
+#define mmIH_INT_FLOOD_CNTL                                                                            0x00d5
+#define mmIH_INT_FLOOD_CNTL_BASE_IDX                                                                   0
+#define mmIH_RB0_INT_FLOOD_STATUS                                                                      0x00d6
+#define mmIH_RB0_INT_FLOOD_STATUS_BASE_IDX                                                             0
+#define mmIH_RB1_INT_FLOOD_STATUS                                                                      0x00d7
+#define mmIH_RB1_INT_FLOOD_STATUS_BASE_IDX                                                             0
+#define mmIH_RB2_INT_FLOOD_STATUS                                                                      0x00d8
+#define mmIH_RB2_INT_FLOOD_STATUS_BASE_IDX                                                             0
+#define mmIH_INT_FLOOD_STATUS                                                                          0x00d9
+#define mmIH_INT_FLOOD_STATUS_BASE_IDX                                                                 0
+#define mmIH_STORM_CLIENT_LIST_CNTL                                                                    0x00da
+#define mmIH_STORM_CLIENT_LIST_CNTL_BASE_IDX                                                           0
+#define mmIH_CLK_CTRL                                                                                  0x00db
+#define mmIH_CLK_CTRL_BASE_IDX                                                                         0
+#define mmIH_INT_FLAGS                                                                                 0x00dc
+#define mmIH_INT_FLAGS_BASE_IDX                                                                        0
+#define mmIH_LAST_INT_INFO0                                                                            0x00dd
+#define mmIH_LAST_INT_INFO0_BASE_IDX                                                                   0
+#define mmIH_LAST_INT_INFO1                                                                            0x00de
+#define mmIH_LAST_INT_INFO1_BASE_IDX                                                                   0
+#define mmIH_LAST_INT_INFO2                                                                            0x00df
+#define mmIH_LAST_INT_INFO2_BASE_IDX                                                                   0
+#define mmIH_SCRATCH                                                                                   0x00e0
+#define mmIH_SCRATCH_BASE_IDX                                                                          0
+#define mmIH_CLIENT_CREDIT_ERROR                                                                       0x00e1
+#define mmIH_CLIENT_CREDIT_ERROR_BASE_IDX                                                              0
+#define mmIH_GPU_IOV_VIOLATION_LOG                                                                     0x00e2
+#define mmIH_GPU_IOV_VIOLATION_LOG_BASE_IDX                                                            0
+#define mmIH_COOKIE_REC_VIOLATION_LOG                                                                  0x00e3
+#define mmIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX                                                         0
+#define mmIH_CREDIT_STATUS                                                                             0x00e4
+#define mmIH_CREDIT_STATUS_BASE_IDX                                                                    0
+#define mmIH_MMHUB_ERROR                                                                               0x00e5
+#define mmIH_MMHUB_ERROR_BASE_IDX                                                                      0
+#define mmIH_MEM_POWER_CTRL                                                                            0x00e8
+#define mmIH_MEM_POWER_CTRL_BASE_IDX                                                                   0
+#define mmIH_REGISTER_LAST_PART2                                                                       0x00ff
+#define mmIH_REGISTER_LAST_PART2_BASE_IDX                                                              0
+#define mmSEM_CLK_CTRL                                                                                 0x0100
+#define mmSEM_CLK_CTRL_BASE_IDX                                                                        0
+#define mmSEM_UTC_CREDIT                                                                               0x0101
+#define mmSEM_UTC_CREDIT_BASE_IDX                                                                      0
+#define mmSEM_UTC_CONFIG                                                                               0x0102
+#define mmSEM_UTC_CONFIG_BASE_IDX                                                                      0
+#define mmSEM_UTCL2_TRAN_EN_LUT                                                                        0x0103
+#define mmSEM_UTCL2_TRAN_EN_LUT_BASE_IDX                                                               0
+#define mmSEM_MCIF_CONFIG                                                                              0x0104
+#define mmSEM_MCIF_CONFIG_BASE_IDX                                                                     0
+#define mmSEM_PERFMON_CNTL                                                                             0x0105
+#define mmSEM_PERFMON_CNTL_BASE_IDX                                                                    0
+#define mmSEM_PERFCOUNTER0_RESULT                                                                      0x0106
+#define mmSEM_PERFCOUNTER0_RESULT_BASE_IDX                                                             0
+#define mmSEM_PERFCOUNTER1_RESULT                                                                      0x0107
+#define mmSEM_PERFCOUNTER1_RESULT_BASE_IDX                                                             0
+#define mmSEM_STATUS                                                                                   0x0108
+#define mmSEM_STATUS_BASE_IDX                                                                          0
+#define mmSEM_MAILBOX_CLIENTCONFIG                                                                     0x0109
+#define mmSEM_MAILBOX_CLIENTCONFIG_BASE_IDX                                                            0
+#define mmSEM_MAILBOX                                                                                  0x010a
+#define mmSEM_MAILBOX_BASE_IDX                                                                         0
+#define mmSEM_MAILBOX_CONTROL                                                                          0x010b
+#define mmSEM_MAILBOX_CONTROL_BASE_IDX                                                                 0
+#define mmSEM_CHICKEN_BITS                                                                             0x010c
+#define mmSEM_CHICKEN_BITS_BASE_IDX                                                                    0
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA                                                               0x010d
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA_BASE_IDX                                                      0
+#define mmSEM_GPU_IOV_VIOLATION_LOG                                                                    0x010e
+#define mmSEM_GPU_IOV_VIOLATION_LOG_BASE_IDX                                                           0
+#define mmSEM_OUTSTANDING_THRESHOLD                                                                    0x010f
+#define mmSEM_OUTSTANDING_THRESHOLD_BASE_IDX                                                           0
+#define mmSEM_MEM_POWER_CTRL                                                                           0x0110
+#define mmSEM_MEM_POWER_CTRL_BASE_IDX                                                                  0
+#define mmSEM_REGISTER_LAST_PART2                                                                      0x017f
+#define mmSEM_REGISTER_LAST_PART2_BASE_IDX                                                             0
+#define mmIH_ACTIVE_FCN_ID                                                                             0x0180
+#define mmIH_ACTIVE_FCN_ID_BASE_IDX                                                                    0
+#define mmIH_VIRT_RESET_REQ                                                                            0x0181
+#define mmIH_VIRT_RESET_REQ_BASE_IDX                                                                   0
+#define mmIH_CLIENT_CFG                                                                                0x0184
+#define mmIH_CLIENT_CFG_BASE_IDX                                                                       0
+#define mmIH_CLIENT_CFG_INDEX                                                                          0x0188
+#define mmIH_CLIENT_CFG_INDEX_BASE_IDX                                                                 0
+#define mmIH_CLIENT_CFG_DATA                                                                           0x0189
+#define mmIH_CLIENT_CFG_DATA_BASE_IDX                                                                  0
+#define mmIH_CID_REMAP_INDEX                                                                           0x018a
+#define mmIH_CID_REMAP_INDEX_BASE_IDX                                                                  0
+#define mmIH_CID_REMAP_DATA                                                                            0x018b
+#define mmIH_CID_REMAP_DATA_BASE_IDX                                                                   0
+#define mmIH_CHICKEN                                                                                   0x018c
+#define mmIH_CHICKEN_BASE_IDX                                                                          0
+#define mmIH_MMHUB_CNTL                                                                                0x018d
+#define mmIH_MMHUB_CNTL_BASE_IDX                                                                       0
+#define mmIH_INT_DROP_CNTL                                                                             0x018e
+#define mmIH_INT_DROP_CNTL_BASE_IDX                                                                    0
+#define mmIH_INT_DROP_MATCH_VALUE0                                                                     0x018f
+#define mmIH_INT_DROP_MATCH_VALUE0_BASE_IDX                                                            0
+#define mmIH_INT_DROP_MATCH_VALUE1                                                                     0x0190
+#define mmIH_INT_DROP_MATCH_VALUE1_BASE_IDX                                                            0
+#define mmIH_INT_DROP_MATCH_MASK0                                                                      0x0191
+#define mmIH_INT_DROP_MATCH_MASK0_BASE_IDX                                                             0
+#define mmIH_INT_DROP_MATCH_MASK1                                                                      0x0192
+#define mmIH_INT_DROP_MATCH_MASK1_BASE_IDX                                                             0
+#define mmIH_REGISTER_LAST_PART1                                                                       0x019f
+#define mmIH_REGISTER_LAST_PART1_BASE_IDX                                                              0
+#define mmSEM_ACTIVE_FCN_ID                                                                            0x01a0
+#define mmSEM_ACTIVE_FCN_ID_BASE_IDX                                                                   0
+#define mmSEM_VIRT_RESET_REQ                                                                           0x01a1
+#define mmSEM_VIRT_RESET_REQ_BASE_IDX                                                                  0
+#define mmSEM_RESP_SDMA0                                                                               0x01a4
+#define mmSEM_RESP_SDMA0_BASE_IDX                                                                      0
+#define mmSEM_RESP_SDMA1                                                                               0x01a5
+#define mmSEM_RESP_SDMA1_BASE_IDX                                                                      0
+#define mmSEM_RESP_UVD                                                                                 0x01a6
+#define mmSEM_RESP_UVD_BASE_IDX                                                                        0
+#define mmSEM_RESP_VCE_0                                                                               0x01a7
+#define mmSEM_RESP_VCE_0_BASE_IDX                                                                      0
+#define mmSEM_RESP_ACP                                                                                 0x01a8
+#define mmSEM_RESP_ACP_BASE_IDX                                                                        0
+#define mmSEM_RESP_ISP                                                                                 0x01a9
+#define mmSEM_RESP_ISP_BASE_IDX                                                                        0
+#define mmSEM_RESP_VCE_1                                                                               0x01aa
+#define mmSEM_RESP_VCE_1_BASE_IDX                                                                      0
+#define mmSEM_RESP_VP8                                                                                 0x01ab
+#define mmSEM_RESP_VP8_BASE_IDX                                                                        0
+#define mmSEM_RESP_GC                                                                                  0x01ac
+#define mmSEM_RESP_GC_BASE_IDX                                                                         0
+#define mmSEM_RESP_UVD_1                                                                               0x01ad
+#define mmSEM_RESP_UVD_1_BASE_IDX                                                                      0
+#define mmSEM_CID_REMAP_INDEX                                                                          0x01b0
+#define mmSEM_CID_REMAP_INDEX_BASE_IDX                                                                 0
+#define mmSEM_CID_REMAP_DATA                                                                           0x01b1
+#define mmSEM_CID_REMAP_DATA_BASE_IDX                                                                  0
+#define mmSEM_ATOMIC_OP_LUT                                                                            0x01b2
+#define mmSEM_ATOMIC_OP_LUT_BASE_IDX                                                                   0
+#define mmSEM_EDC_CONFIG                                                                               0x01b3
+#define mmSEM_EDC_CONFIG_BASE_IDX                                                                      0
+#define mmSEM_CHICKEN_BITS2                                                                            0x01b4
+#define mmSEM_CHICKEN_BITS2_BASE_IDX                                                                   0
+#define mmSEM_MMHUB_CNTL                                                                               0x01b5
+#define mmSEM_MMHUB_CNTL_BASE_IDX                                                                      0
+#define mmSEM_REGISTER_LAST_PART1                                                                      0x01bf
+#define mmSEM_REGISTER_LAST_PART1_BASE_IDX                                                             0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
new file mode 100644
index 0000000000000000000000000000000000000000..3ea83ea9ce3a4bc88c9eece8a2ef537245413360
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
@@ -0,0 +1,1300 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_SH_MASK_HEADER
+#define _osssys_4_2_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_0_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_1_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_2_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_3_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_4_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_5_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_6_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_7_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_8_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT                                                                           0x0
+#define IH_VMID_9_LUT__PASID_MASK                                                                             0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT                                                                          0x0
+#define IH_VMID_10_LUT__PASID_MASK                                                                            0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT                                                                          0x0
+#define IH_VMID_11_LUT__PASID_MASK                                                                            0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT                                                                          0x0
+#define IH_VMID_12_LUT__PASID_MASK                                                                            0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT                                                                          0x0
+#define IH_VMID_13_LUT__PASID_MASK                                                                            0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT                                                                          0x0
+#define IH_VMID_14_LUT__PASID_MASK                                                                            0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT                                                                          0x0
+#define IH_VMID_15_LUT__PASID_MASK                                                                            0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT                                                                        0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK                                                                          0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT                                                                       0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK                                                                         0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT                                                                       0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK                                                                         0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT                                                                       0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK                                                                         0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT                                                                       0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK                                                                         0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT                                                                       0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK                                                                         0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT                                                                       0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK                                                                         0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT                                                                         0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT                                                                         0x8
+#define IH_COOKIE_0__RING_ID__SHIFT                                                                           0x10
+#define IH_COOKIE_0__VM_ID__SHIFT                                                                             0x18
+#define IH_COOKIE_0__RESERVED__SHIFT                                                                          0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT                                                                         0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK                                                                           0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK                                                                           0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK                                                                             0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK                                                                               0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK                                                                            0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK                                                                           0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT                                                                    0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK                                                                      0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT                                                                   0x0
+#define IH_COOKIE_2__RESERVED__SHIFT                                                                          0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT                                                                     0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK                                                                     0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK                                                                            0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK                                                                       0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT                                                                            0x0
+#define IH_COOKIE_3__RESERVED__SHIFT                                                                          0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT                                                                         0x1f
+#define IH_COOKIE_3__PAS_ID_MASK                                                                              0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK                                                                            0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK                                                                           0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT                                                                   0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK                                                                     0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT                                                                  0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK                                                                    0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT                                                                  0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK                                                                    0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT                                                                 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK                                                                   0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT                                                               0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK                                                                 0xFFFFFFFFL
+//SEM_REQ_INPUT_0
+#define SEM_REQ_INPUT_0__DATA__SHIFT                                                                          0x0
+#define SEM_REQ_INPUT_0__DATA_MASK                                                                            0xFFFFFFFFL
+//SEM_REQ_INPUT_1
+#define SEM_REQ_INPUT_1__DATA__SHIFT                                                                          0x0
+#define SEM_REQ_INPUT_1__DATA_MASK                                                                            0xFFFFFFFFL
+//SEM_REQ_INPUT_2
+#define SEM_REQ_INPUT_2__DATA__SHIFT                                                                          0x0
+#define SEM_REQ_INPUT_2__DATA_MASK                                                                            0xFFFFFFFFL
+//SEM_REQ_INPUT_3
+#define SEM_REQ_INPUT_3__DATA__SHIFT                                                                          0x0
+#define SEM_REQ_INPUT_3__DATA_MASK                                                                            0xFFFFFFFFL
+//SEM_REGISTER_LAST_PART0
+#define SEM_REGISTER_LAST_PART0__RESERVED__SHIFT                                                              0x0
+#define SEM_REGISTER_LAST_PART0__RESERVED_MASK                                                                0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT                                                                          0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT                                                                            0x1
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT                                                                   0x7
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT                                                              0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT                                                               0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT                                                                   0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT                                                                      0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT                                                              0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT                                                               0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT                                                                        0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT                                                                            0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT                                                                           0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT                                                                         0x15
+#define IH_RB_CNTL__MC_RO__SHIFT                                                                              0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT                                                                            0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT                                                                           0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT                                                                0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK                                                                            0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK                                                                              0x0000003EL
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK                                                                     0x00000080L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK                                                                0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK                                                                 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK                                                                     0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK                                                                        0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK                                                                0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK                                                                 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK                                                                          0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK                                                                              0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK                                                                             0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK                                                                           0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK                                                                                0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK                                                                              0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK                                                                             0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK                                                                  0x80000000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT                                                                               0x0
+#define IH_RB_BASE__ADDR_MASK                                                                                 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT                                                                            0x0
+#define IH_RB_BASE_HI__ADDR_MASK                                                                              0x000000FFL
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT                                                                             0x2
+#define IH_RB_RPTR__OFFSET_MASK                                                                               0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT                                                                        0x0
+#define IH_RB_WPTR__OFFSET__SHIFT                                                                             0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT                                                                       0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT                                                                    0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK                                                                          0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK                                                                               0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK                                                                         0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK                                                                      0x00080000L
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT                                                                       0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK                                                                         0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT                                                                       0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK                                                                         0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT                                                                       0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT                                                                       0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK                                                                         0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK                                                                         0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT                                                                    0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT                                                                      0x1
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE__SHIFT                                                             0x7
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT                                                         0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT                                                             0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT                                                                0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT                                                        0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT                                                         0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT                                                                      0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT                                                                     0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT                                                                        0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT                                                                      0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT                                                                     0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT                                                          0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK                                                                      0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK                                                                        0x0000003EL
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE_MASK                                                               0x00000080L
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK                                                           0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK                                                               0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK                                                                  0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK                                                          0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK                                                           0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK                                                                        0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK                                                                       0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK                                                                          0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK                                                                        0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK                                                                       0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK                                                            0x80000000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT                                                                         0x0
+#define IH_RB_BASE_RING1__ADDR_MASK                                                                           0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT                                                                      0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK                                                                        0x000000FFL
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT                                                                       0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK                                                                         0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT                                                                  0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT                                                                       0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT                                                                 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT                                                              0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK                                                                    0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK                                                                         0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK                                                                   0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK                                                                0x00080000L
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT                                                                 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT                                                                 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK                                                                   0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK                                                                   0x10000000L
+//IH_RB_CNTL_RING2
+#define IH_RB_CNTL_RING2__RB_ENABLE__SHIFT                                                                    0x0
+#define IH_RB_CNTL_RING2__RB_SIZE__SHIFT                                                                      0x1
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE__SHIFT                                                             0x7
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE__SHIFT                                                         0x9
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR__SHIFT                                                             0xa
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR__SHIFT                                                                0xb
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD__SHIFT                                                        0xc
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE__SHIFT                                                         0x10
+#define IH_RB_CNTL_RING2__MC_SWAP__SHIFT                                                                      0x12
+#define IH_RB_CNTL_RING2__MC_SNOOP__SHIFT                                                                     0x14
+#define IH_RB_CNTL_RING2__MC_RO__SHIFT                                                                        0x16
+#define IH_RB_CNTL_RING2__MC_VMID__SHIFT                                                                      0x18
+#define IH_RB_CNTL_RING2__MC_SPACE__SHIFT                                                                     0x1c
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR__SHIFT                                                          0x1f
+#define IH_RB_CNTL_RING2__RB_ENABLE_MASK                                                                      0x00000001L
+#define IH_RB_CNTL_RING2__RB_SIZE_MASK                                                                        0x0000003EL
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE_MASK                                                               0x00000080L
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE_MASK                                                           0x00000200L
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR_MASK                                                               0x00000400L
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR_MASK                                                                  0x00000800L
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD_MASK                                                          0x0000F000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE_MASK                                                           0x00010000L
+#define IH_RB_CNTL_RING2__MC_SWAP_MASK                                                                        0x000C0000L
+#define IH_RB_CNTL_RING2__MC_SNOOP_MASK                                                                       0x00100000L
+#define IH_RB_CNTL_RING2__MC_RO_MASK                                                                          0x00400000L
+#define IH_RB_CNTL_RING2__MC_VMID_MASK                                                                        0x0F000000L
+#define IH_RB_CNTL_RING2__MC_SPACE_MASK                                                                       0x70000000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR_MASK                                                            0x80000000L
+//IH_RB_BASE_RING2
+#define IH_RB_BASE_RING2__ADDR__SHIFT                                                                         0x0
+#define IH_RB_BASE_RING2__ADDR_MASK                                                                           0xFFFFFFFFL
+//IH_RB_BASE_HI_RING2
+#define IH_RB_BASE_HI_RING2__ADDR__SHIFT                                                                      0x0
+#define IH_RB_BASE_HI_RING2__ADDR_MASK                                                                        0x000000FFL
+//IH_RB_RPTR_RING2
+#define IH_RB_RPTR_RING2__OFFSET__SHIFT                                                                       0x2
+#define IH_RB_RPTR_RING2__OFFSET_MASK                                                                         0x0003FFFCL
+//IH_RB_WPTR_RING2
+#define IH_RB_WPTR_RING2__RB_OVERFLOW__SHIFT                                                                  0x0
+#define IH_RB_WPTR_RING2__OFFSET__SHIFT                                                                       0x2
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE__SHIFT                                                                 0x12
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW__SHIFT                                                              0x13
+#define IH_RB_WPTR_RING2__RB_OVERFLOW_MASK                                                                    0x00000001L
+#define IH_RB_WPTR_RING2__OFFSET_MASK                                                                         0x0003FFFCL
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE_MASK                                                                   0x00040000L
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW_MASK                                                                0x00080000L
+//IH_DOORBELL_RPTR_RING2
+#define IH_DOORBELL_RPTR_RING2__OFFSET__SHIFT                                                                 0x0
+#define IH_DOORBELL_RPTR_RING2__ENABLE__SHIFT                                                                 0x1c
+#define IH_DOORBELL_RPTR_RING2__OFFSET_MASK                                                                   0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING2__ENABLE_MASK                                                                   0x10000000L
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT                                                                             0x0
+#define IH_VERSION__MAJVER__SHIFT                                                                             0x8
+#define IH_VERSION__REV__SHIFT                                                                                0x10
+#define IH_VERSION__MINVER_MASK                                                                               0x0000007FL
+#define IH_VERSION__MAJVER_MASK                                                                               0x00007F00L
+#define IH_VERSION__REV_MASK                                                                                  0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT                                                                  0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT                                                               0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT                                                                     0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT                                                                       0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK                                                                    0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK                                                                 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK                                                                       0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK                                                                         0x01F00000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT                                                    0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT                                                     0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK                                                      0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK                                                       0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT                                                                                0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT                                                                          0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT                                                                         0x2
+#define IH_STATUS__RB_FULL__SHIFT                                                                             0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT                                                                       0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT                                                                         0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT                                                                          0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT                                                                         0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT                                                                 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT                                                                   0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT                                                                  0xa
+#define IH_STATUS__SWITCH_READY__SHIFT                                                                        0xb
+#define IH_STATUS__RB1_FULL__SHIFT                                                                            0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT                                                                      0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT                                                                        0xe
+#define IH_STATUS__RB2_FULL__SHIFT                                                                            0xf
+#define IH_STATUS__RB2_FULL_DRAIN__SHIFT                                                                      0x10
+#define IH_STATUS__RB2_OVERFLOW__SHIFT                                                                        0x11
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT                                                                   0x12
+#define IH_STATUS__IDLE_MASK                                                                                  0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK                                                                            0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK                                                                           0x00000004L
+#define IH_STATUS__RB_FULL_MASK                                                                               0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK                                                                         0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK                                                                           0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK                                                                            0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK                                                                           0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK                                                                   0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK                                                                     0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK                                                                    0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK                                                                          0x00000800L
+#define IH_STATUS__RB1_FULL_MASK                                                                              0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK                                                                        0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK                                                                          0x00004000L
+#define IH_STATUS__RB2_FULL_MASK                                                                              0x00008000L
+#define IH_STATUS__RB2_FULL_DRAIN_MASK                                                                        0x00010000L
+#define IH_STATUS__RB2_OVERFLOW_MASK                                                                          0x00020000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK                                                                     0x00040000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT                                                                       0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT                                                                        0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT                                                                     0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT                                                                       0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT                                                                        0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT                                                                     0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK                                                                         0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK                                                                          0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK                                                                       0x000007FCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK                                                                         0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK                                                                          0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK                                                                       0x07FC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT                                                             0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK                                                               0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT                                                             0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK                                                               0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT                                                             0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK                                                               0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT                                                            0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK                                                              0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT                                                            0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK                                                              0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT                                                             0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT                                                           0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT                                                       0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT                                                          0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT                                                            0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT                                                           0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT                                                       0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK                                                               0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK                                                             0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK                                                         0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK                                                            0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK                                                              0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK                                                             0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK                                                         0x00000040L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT                                                               0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK                                                                 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT                                                                     0x0
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT                                                                     0x1
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK                                                                       0x00000001L
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK                                                                       0x0000001EL
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT                                                           0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT                                                          0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT                                                         0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT                                                           0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT                                                            0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK                                                             0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK                                                            0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK                                                           0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK                                                             0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK                                                              0xFFE00000L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT                                                              0x0
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF__SHIFT                                                                0x10
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK                                                                0x0000FFFFL
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF_MASK                                                                  0xFFFF0000L
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT                                                                   0x0
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF__SHIFT                                                        0x10
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK                                                                     0x0000FFFFL
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF_MASK                                                          0xFFFF0000L
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT                                                             0x0
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF__SHIFT                                                               0x10
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK                                                               0x0000FFFFL
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF_MASK                                                                 0xFFFF0000L
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT                                                                  0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK                                                                    0x0000FFFFL
+//IH_VF_RB2_STATUS
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF__SHIFT                                                             0x0
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF__SHIFT                                                               0x10
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF_MASK                                                               0x0000FFFFL
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF_MASK                                                                 0xFFFF0000L
+//IH_VF_RB2_STATUS2
+#define IH_VF_RB2_STATUS2__RB_FULL_VF__SHIFT                                                                  0x0
+#define IH_VF_RB2_STATUS2__RB_FULL_VF_MASK                                                                    0x0000FFFFL
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT                                                                   0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT                                                           0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT                                                      0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK                                                                     0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK                                                             0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK                                                        0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT                                                     0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT                                                        0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK                                                       0x0000FFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK                                                          0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT                                                     0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT                                                        0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK                                                       0x0000FFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK                                                          0x80000000L
+//IH_RB2_INT_FLOOD_STATUS
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT                                                     0x0
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT                                                        0x1f
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK                                                       0x0000FFFFL
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK                                                          0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT                                                              0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT                                                  0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT                                                  0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT                                                      0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT                                                         0x1c
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT                                                               0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK                                                                0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK                                                    0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK                                                    0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK                                                        0x0F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK                                                           0x10000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK                                                                 0x40000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT                                             0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT                                             0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT                                             0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT                                             0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT                                             0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT                                             0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT                                             0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT                                             0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT                                             0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT                                            0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT                                            0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT                                            0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT                                            0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT                                            0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT                                            0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT                                            0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT                                            0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT                                            0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT                                            0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT                                            0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT                                            0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT                                            0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT                                            0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT                                            0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT                                            0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT                                            0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT                                            0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT                                            0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT                                            0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT                                            0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT                                            0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK                                               0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK                                               0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK                                               0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK                                               0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK                                               0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK                                               0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK                                               0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK                                               0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK                                               0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK                                              0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK                                              0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK                                              0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK                                              0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK                                              0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK                                              0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK                                              0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK                                              0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK                                              0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK                                              0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK                                              0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK                                              0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK                                              0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK                                              0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK                                              0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK                                              0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK                                              0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK                                              0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK                                              0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK                                              0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK                                              0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK                                              0x80000000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT                                            0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT                                                   0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT                                                        0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT                                                    0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT                                                       0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT                                                             0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT                                                             0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK                                              0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK                                                     0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK                                                          0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK                                                      0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK                                                         0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK                                                               0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK                                                               0x80000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT                                                                    0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT                                                                    0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT                                                                    0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT                                                                    0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT                                                                    0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT                                                                    0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT                                                                    0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT                                                                    0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT                                                                    0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT                                                                    0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT                                                                   0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT                                                                   0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT                                                                   0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT                                                                   0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT                                                                   0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT                                                                   0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT                                                                   0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT                                                                   0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT                                                                   0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT                                                                   0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT                                                                   0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT                                                                   0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT                                                                   0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT                                                                   0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT                                                                   0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT                                                                   0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT                                                                   0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT                                                                   0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT                                                                   0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT                                                                   0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT                                                                   0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT                                                                   0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK                                                                      0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK                                                                      0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK                                                                      0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK                                                                      0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK                                                                      0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK                                                                      0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK                                                                      0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK                                                                      0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK                                                                      0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK                                                                      0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK                                                                     0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK                                                                     0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK                                                                     0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK                                                                     0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK                                                                     0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK                                                                     0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK                                                                     0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK                                                                     0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK                                                                     0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK                                                                     0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK                                                                     0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK                                                                     0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK                                                                     0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK                                                                     0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK                                                                     0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK                                                                     0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK                                                                     0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK                                                                     0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK                                                                     0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK                                                                     0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK                                                                     0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK                                                                     0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT                                                                   0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT                                                                   0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT                                                                     0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT                                                                       0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT                                                                   0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK                                                                     0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK                                                                     0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK                                                                       0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK                                                                         0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK                                                                     0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT                                                                  0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK                                                                    0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT                                                                      0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT                                                                       0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT                                                                          0x14
+#define IH_LAST_INT_INFO2__PAS_ID_MASK                                                                        0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK                                                                         0x000F0000L
+#define IH_LAST_INT_INFO2__VF_MASK                                                                            0x00100000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT                                                                               0x0
+#define IH_SCRATCH__DATA_MASK                                                                                 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT                                                                  0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT                                                         0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT                                                         0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT                                                         0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT                                                         0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT                                                         0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT                                                         0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT                                                         0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT                                                         0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT                                                         0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT                                                        0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT                                                        0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT                                                        0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT                                                        0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT                                                        0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT                                                        0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT                                                        0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT                                                        0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT                                                        0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT                                                        0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT                                                        0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT                                                        0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT                                                        0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT                                                        0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT                                                        0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT                                                        0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT                                                        0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT                                                        0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT                                                        0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT                                                        0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT                                                        0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT                                                        0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK                                                                    0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK                                                           0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK                                                           0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK                                                           0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK                                                           0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK                                                           0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK                                                           0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK                                                           0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK                                                           0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK                                                           0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK                                                          0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK                                                          0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK                                                          0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK                                                          0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK                                                          0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK                                                          0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK                                                          0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK                                                          0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK                                                          0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK                                                          0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK                                                          0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK                                                          0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK                                                          0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK                                                          0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK                                                          0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK                                                          0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK                                                          0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK                                                          0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK                                                          0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK                                                          0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK                                                          0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK                                                          0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT                                                     0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT                                            0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT                                                              0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT                                                               0x12
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT                                                                   0x13
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT                                                                0x14
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT                                                         0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK                                                       0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK                                              0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK                                                                0x0003FFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK                                                                 0x00040000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK                                                                     0x00080000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK                                                                  0x00F00000L
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK                                                           0xFF000000L
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT                                                  0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT                                                         0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT                                                      0x18
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK                                                    0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK                                                           0x00FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK                                                        0xFF000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT                                                     0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT                                                     0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT                                                     0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT                                                     0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT                                                     0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT                                                     0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT                                                     0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT                                                     0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT                                                     0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT                                                    0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT                                                    0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT                                                    0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT                                                    0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT                                                    0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT                                                    0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT                                                    0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT                                                    0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT                                                    0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT                                                    0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT                                                    0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT                                                    0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT                                                    0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT                                                    0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT                                                    0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT                                                    0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT                                                    0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT                                                    0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT                                                    0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT                                                    0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT                                                    0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT                                                    0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK                                                       0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK                                                       0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK                                                       0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK                                                       0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK                                                       0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK                                                       0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK                                                       0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK                                                       0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK                                                       0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK                                                      0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK                                                      0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK                                                      0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK                                                      0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK                                                      0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK                                                      0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK                                                      0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK                                                      0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK                                                      0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK                                                      0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK                                                      0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK                                                      0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK                                                      0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK                                                      0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK                                                      0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK                                                      0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK                                                      0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK                                                      0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK                                                      0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK                                                      0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK                                                      0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK                                                      0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT                                                                    0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT                                                                    0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT                                                                    0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT                                                               0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT                                                               0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT                                                               0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK                                                                      0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK                                                                      0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK                                                                      0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK                                                                 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK                                                                 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK                                                                 0x00000080L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT                                                 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT                                                   0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT                                                   0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT                                                   0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT                                               0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT                                        0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT                                     0xe
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK                                                   0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK                                                     0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK                                                     0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK                                                     0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK                                                 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK                                          0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK                                       0x0000C000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT                                                               0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK                                                                 0xFFFFFFFFL
+//SEM_CLK_CTRL
+#define SEM_CLK_CTRL__ON_DELAY__SHIFT                                                                         0x0
+#define SEM_CLK_CTRL__OFF_HYSTERESIS__SHIFT                                                                   0x4
+#define SEM_CLK_CTRL__RESERVED__SHIFT                                                                         0xc
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7__SHIFT                                                                   0x18
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6__SHIFT                                                                   0x19
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE__SHIFT                                                            0x1a
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4__SHIFT                                                                   0x1b
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3__SHIFT                                                                   0x1c
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2__SHIFT                                                                   0x1d
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT                                                            0x1e
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT                                                            0x1f
+#define SEM_CLK_CTRL__ON_DELAY_MASK                                                                           0x0000000FL
+#define SEM_CLK_CTRL__OFF_HYSTERESIS_MASK                                                                     0x00000FF0L
+#define SEM_CLK_CTRL__RESERVED_MASK                                                                           0x00FFF000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7_MASK                                                                     0x01000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6_MASK                                                                     0x02000000L
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE_MASK                                                              0x04000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4_MASK                                                                     0x08000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3_MASK                                                                     0x10000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2_MASK                                                                     0x20000000L
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK                                                              0x40000000L
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK                                                              0x80000000L
+//SEM_UTC_CREDIT
+#define SEM_UTC_CREDIT__UTCL2_CREDIT__SHIFT                                                                   0x0
+#define SEM_UTC_CREDIT__WATERMARK__SHIFT                                                                      0x8
+#define SEM_UTC_CREDIT__UTCL2_CREDIT_MASK                                                                     0x0000001FL
+#define SEM_UTC_CREDIT__WATERMARK_MASK                                                                        0x00000F00L
+//SEM_UTC_CONFIG
+#define SEM_UTC_CONFIG__USE_MTYPE__SHIFT                                                                      0x0
+#define SEM_UTC_CONFIG__FORCE_SNOOP__SHIFT                                                                    0x3
+#define SEM_UTC_CONFIG__FORCE_GCC__SHIFT                                                                      0x4
+#define SEM_UTC_CONFIG__USE_PT_SNOOP__SHIFT                                                                   0x5
+#define SEM_UTC_CONFIG__USE_MTYPE_MASK                                                                        0x00000007L
+#define SEM_UTC_CONFIG__FORCE_SNOOP_MASK                                                                      0x00000008L
+#define SEM_UTC_CONFIG__FORCE_GCC_MASK                                                                        0x00000010L
+#define SEM_UTC_CONFIG__USE_PT_SNOOP_MASK                                                                     0x00000020L
+//SEM_UTCL2_TRAN_EN_LUT
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN__SHIFT                                                          0x0
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN__SHIFT                                                          0x1
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN__SHIFT                                                            0x2
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN__SHIFT                                                           0x3
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN__SHIFT                                                            0x4
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN__SHIFT                                                            0x5
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN__SHIFT                                                           0x6
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN__SHIFT                                                            0x7
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN__SHIFT                                                           0x8
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED__SHIFT                                                                0x9
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN__SHIFT                                                             0x1f
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN_MASK                                                            0x00000001L
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN_MASK                                                            0x00000002L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN_MASK                                                              0x00000004L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN_MASK                                                             0x00000008L
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN_MASK                                                              0x00000010L
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN_MASK                                                              0x00000020L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN_MASK                                                             0x00000040L
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN_MASK                                                              0x00000080L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN_MASK                                                             0x00000100L
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED_MASK                                                                  0x7FFFFE00L
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN_MASK                                                               0x80000000L
+//SEM_MCIF_CONFIG
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT                                                                   0x0
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT__SHIFT                                                               0x2
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT__SHIFT                                                               0x8
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK                                                                     0x00000003L
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT_MASK                                                                 0x000000FCL
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT_MASK                                                                 0x00003F00L
+//SEM_PERFMON_CNTL
+#define SEM_PERFMON_CNTL__PERF_ENABLE0__SHIFT                                                                 0x0
+#define SEM_PERFMON_CNTL__PERF_CLEAR0__SHIFT                                                                  0x1
+#define SEM_PERFMON_CNTL__PERF_SEL0__SHIFT                                                                    0x2
+#define SEM_PERFMON_CNTL__PERF_ENABLE1__SHIFT                                                                 0xa
+#define SEM_PERFMON_CNTL__PERF_CLEAR1__SHIFT                                                                  0xb
+#define SEM_PERFMON_CNTL__PERF_SEL1__SHIFT                                                                    0xc
+#define SEM_PERFMON_CNTL__PERF_ENABLE0_MASK                                                                   0x00000001L
+#define SEM_PERFMON_CNTL__PERF_CLEAR0_MASK                                                                    0x00000002L
+#define SEM_PERFMON_CNTL__PERF_SEL0_MASK                                                                      0x000003FCL
+#define SEM_PERFMON_CNTL__PERF_ENABLE1_MASK                                                                   0x00000400L
+#define SEM_PERFMON_CNTL__PERF_CLEAR1_MASK                                                                    0x00000800L
+#define SEM_PERFMON_CNTL__PERF_SEL1_MASK                                                                      0x000FF000L
+//SEM_PERFCOUNTER0_RESULT
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT                                                            0x0
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT_MASK                                                              0xFFFFFFFFL
+//SEM_PERFCOUNTER1_RESULT
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT                                                            0x0
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT_MASK                                                              0xFFFFFFFFL
+//SEM_STATUS
+#define SEM_STATUS__SEM_IDLE__SHIFT                                                                           0x0
+#define SEM_STATUS__SEM_INTERNAL_IDLE__SHIFT                                                                  0x1
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL__SHIFT                                                                 0x2
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL__SHIFT                                                                 0x3
+#define SEM_STATUS__WRITE1_FIFO_FULL__SHIFT                                                                   0x4
+#define SEM_STATUS__CHECK0_FIFO_FULL__SHIFT                                                                   0x5
+#define SEM_STATUS__MC_RDREQ_PENDING__SHIFT                                                                   0x6
+#define SEM_STATUS__MC_WRREQ_PENDING__SHIFT                                                                   0x7
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING__SHIFT                                                              0x8
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING__SHIFT                                                              0x9
+#define SEM_STATUS__UVD_MAILBOX_PENDING__SHIFT                                                                0xa
+#define SEM_STATUS__VCE_MAILBOX_PENDING__SHIFT                                                                0xb
+#define SEM_STATUS__CPG1_MAILBOX_PENDING__SHIFT                                                               0xc
+#define SEM_STATUS__CPG2_MAILBOX_PENDING__SHIFT                                                               0xd
+#define SEM_STATUS__VCE1_MAILBOX_PENDING__SHIFT                                                               0xe
+#define SEM_STATUS__ATC_REQ_PENDING__SHIFT                                                                    0xf
+#define SEM_STATUS__OUTSTANDING_CLEAN__SHIFT                                                                  0x10
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH__SHIFT                                                           0x11
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH__SHIFT                                                        0x12
+#define SEM_STATUS__INVREQ_CNT_IDLE__SHIFT                                                                    0x13
+#define SEM_STATUS__ENTRYLIST_IDLE__SHIFT                                                                     0x14
+#define SEM_STATUS__MIF_IDLE__SHIFT                                                                           0x15
+#define SEM_STATUS__REGISTER_IDLE__SHIFT                                                                      0x16
+#define SEM_STATUS__ATCL2_INVREQ_IDLE__SHIFT                                                                  0x17
+#define SEM_STATUS__UVD1_MAILBOX_PENDING__SHIFT                                                               0x18
+#define SEM_STATUS__SWITCH_READY__SHIFT                                                                       0x1f
+#define SEM_STATUS__SEM_IDLE_MASK                                                                             0x00000001L
+#define SEM_STATUS__SEM_INTERNAL_IDLE_MASK                                                                    0x00000002L
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL_MASK                                                                   0x00000004L
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL_MASK                                                                   0x00000008L
+#define SEM_STATUS__WRITE1_FIFO_FULL_MASK                                                                     0x00000010L
+#define SEM_STATUS__CHECK0_FIFO_FULL_MASK                                                                     0x00000020L
+#define SEM_STATUS__MC_RDREQ_PENDING_MASK                                                                     0x00000040L
+#define SEM_STATUS__MC_WRREQ_PENDING_MASK                                                                     0x00000080L
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING_MASK                                                                0x00000100L
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING_MASK                                                                0x00000200L
+#define SEM_STATUS__UVD_MAILBOX_PENDING_MASK                                                                  0x00000400L
+#define SEM_STATUS__VCE_MAILBOX_PENDING_MASK                                                                  0x00000800L
+#define SEM_STATUS__CPG1_MAILBOX_PENDING_MASK                                                                 0x00001000L
+#define SEM_STATUS__CPG2_MAILBOX_PENDING_MASK                                                                 0x00002000L
+#define SEM_STATUS__VCE1_MAILBOX_PENDING_MASK                                                                 0x00004000L
+#define SEM_STATUS__ATC_REQ_PENDING_MASK                                                                      0x00008000L
+#define SEM_STATUS__OUTSTANDING_CLEAN_MASK                                                                    0x00010000L
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH_MASK                                                             0x00020000L
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH_MASK                                                          0x00040000L
+#define SEM_STATUS__INVREQ_CNT_IDLE_MASK                                                                      0x00080000L
+#define SEM_STATUS__ENTRYLIST_IDLE_MASK                                                                       0x00100000L
+#define SEM_STATUS__MIF_IDLE_MASK                                                                             0x00200000L
+#define SEM_STATUS__REGISTER_IDLE_MASK                                                                        0x00400000L
+#define SEM_STATUS__ATCL2_INVREQ_IDLE_MASK                                                                    0x00800000L
+#define SEM_STATUS__UVD1_MAILBOX_PENDING_MASK                                                                 0x01000000L
+#define SEM_STATUS__SWITCH_READY_MASK                                                                         0x80000000L
+//SEM_MAILBOX_CLIENTCONFIG
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT                                                           0x0
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT                                                           0x3
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT                                                           0x6
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT                                                           0x9
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0__SHIFT                                                         0xc
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT                                                          0xf
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0__SHIFT                                                        0x12
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT                                                          0x15
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK                                                             0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK                                                             0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK                                                             0x000001C0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK                                                             0x00000E00L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0_MASK                                                           0x00007000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK                                                            0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0_MASK                                                          0x001C0000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK                                                            0x00E00000L
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT                                                                          0x0
+#define SEM_MAILBOX__RESERVED__SHIFT                                                                          0x10
+#define SEM_MAILBOX__HOSTPORT_MASK                                                                            0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK                                                                            0xFFFF0000L
+//SEM_MAILBOX_CONTROL
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT                                                           0x0
+#define SEM_MAILBOX_CONTROL__RESERVED__SHIFT                                                                  0x10
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK                                                             0x0000FFFFL
+#define SEM_MAILBOX_CONTROL__RESERVED_MASK                                                                    0xFFFF0000L
+//SEM_CHICKEN_BITS
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN__SHIFT                                                             0x0
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN__SHIFT                                                            0x1
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN__SHIFT                                                             0x2
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR__SHIFT                                                                 0x3
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN__SHIFT                                                                  0x6
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN__SHIFT                                                           0x7
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX__SHIFT                                                           0x8
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX__SHIFT                                              0xa
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID__SHIFT                                                                 0xc
+#define SEM_CHICKEN_BITS__ATOMIC_EN__SHIFT                                                                    0xe
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK__SHIFT                                                        0xf
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX__SHIFT                                                                0x10
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN__SHIFT                                               0x12
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK__SHIFT                                                       0x13
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN_MASK                                                               0x00000001L
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN_MASK                                                              0x00000002L
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN_MASK                                                               0x00000004L
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR_MASK                                                                   0x00000018L
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN_MASK                                                                    0x00000040L
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN_MASK                                                             0x00000080L
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX_MASK                                                             0x00000300L
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX_MASK                                                0x00000C00L
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID_MASK                                                                   0x00003000L
+#define SEM_CHICKEN_BITS__ATOMIC_EN_MASK                                                                      0x00004000L
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK_MASK                                                          0x00008000L
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX_MASK                                                                  0x00030000L
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN_MASK                                                 0x00040000L
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK_MASK                                                         0x00080000L
+//SEM_MAILBOX_CLIENTCONFIG_EXTRA
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0__SHIFT                                                   0x0
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0__SHIFT                                                   0x4
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0_MASK                                                     0x0000000FL
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0_MASK                                                     0x000000F0L
+//SEM_GPU_IOV_VIOLATION_LOG
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT                                                    0x0
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT                                           0x1
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT                                                             0x2
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT                                                              0x12
+#define SEM_GPU_IOV_VIOLATION_LOG__VF__SHIFT                                                                  0x13
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT                                                               0x14
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT                                                        0x18
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK                                                      0x00000001L
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK                                             0x00000002L
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK                                                               0x0003FFFCL
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE_MASK                                                                0x00040000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_MASK                                                                    0x00080000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID_MASK                                                                 0x00F00000L
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK                                                          0xFF000000L
+//SEM_OUTSTANDING_THRESHOLD
+#define SEM_OUTSTANDING_THRESHOLD__VALUE__SHIFT                                                               0x0
+#define SEM_OUTSTANDING_THRESHOLD__VALUE_MASK                                                                 0x000000FFL
+//SEM_MEM_POWER_CTRL
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT                                                          0x0
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN__SHIFT                                                            0x1
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN__SHIFT                                                            0x2
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN__SHIFT                                                            0x3
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS__SHIFT                                                        0x4
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY__SHIFT                                                 0x8
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT                                              0xe
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK                                                            0x00000001L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN_MASK                                                              0x00000002L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN_MASK                                                              0x00000004L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN_MASK                                                              0x00000008L
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS_MASK                                                          0x00000070L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY_MASK                                                   0x00003F00L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY_MASK                                                0x0000C000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT                                                              0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK                                                                0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT                                                                        0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT                                                                     0x4
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT                                                                        0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK                                                                          0x0000000FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK                                                                       0x7FFFFFF0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK                                                                          0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT                                                                          0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT                                                                          0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK                                                                            0x0000FFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK                                                                            0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT                                                                0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK                                                                  0x0000001FL
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT                                                                     0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK                                                                       0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR__SHIFT                                                         0x0
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT                                                                0x12
+#define IH_CLIENT_CFG_DATA__RING_ID__SHIFT                                                                    0x14
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT                                                               0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT                                       0x18
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR_MASK                                                           0x0003FFFFL
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK                                                                  0x000C0000L
+#define IH_CLIENT_CFG_DATA__RING_ID_MASK                                                                      0x00300000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK                                                                 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK                                         0x01000000L
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT                                                                      0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK                                                                        0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT                                                                   0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT                                                                0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT                                                             0x10
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK                                                                     0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK                                                                  0x0000FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK                                                               0x00FF0000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT                                                          0x0
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT                                                               0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT                                                                0x4
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK                                                            0x00000001L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK                                                                 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK                                                                  0x00000010L
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT                                                                          0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT                                                                         0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT                                                                    0xc
+#define IH_MMHUB_CNTL__UNITID_MASK                                                                            0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK                                                                           0x00000700L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK                                                                      0x00007000L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT                                                                  0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT                                                           0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT                                                           0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT                                                               0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT                                                                  0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT                                                          0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT                                                                0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT                                                      0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT                                                                  0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK                                                                    0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK                                                             0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK                                                             0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK                                                                 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK                                                                    0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK                                                            0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK                                                                  0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK                                                        0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK                                                                    0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT                                                0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT                                                0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT                                                    0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT                                                       0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT                                         0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK                                                  0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK                                                  0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK                                                      0x000F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK                                                         0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK                                           0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT                                          0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK                                            0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT                                                  0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT                                                  0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT                                                      0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT                                                         0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT                                           0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK                                                    0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK                                                    0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK                                                        0x000F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK                                                           0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK                                             0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT                                            0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK                                              0xFFFFFFFFL
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT                                                               0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK                                                                 0xFFFFFFFFL
+//SEM_ACTIVE_FCN_ID
+#define SEM_ACTIVE_FCN_ID__VFID__SHIFT                                                                        0x0
+#define SEM_ACTIVE_FCN_ID__VF__SHIFT                                                                          0x1f
+#define SEM_ACTIVE_FCN_ID__VFID_MASK                                                                          0x0000000FL
+#define SEM_ACTIVE_FCN_ID__VF_MASK                                                                            0x80000000L
+//SEM_VIRT_RESET_REQ
+#define SEM_VIRT_RESET_REQ__VF__SHIFT                                                                         0x0
+#define SEM_VIRT_RESET_REQ__PF__SHIFT                                                                         0x1f
+#define SEM_VIRT_RESET_REQ__VF_MASK                                                                           0x0000FFFFL
+#define SEM_VIRT_RESET_REQ__PF_MASK                                                                           0x80000000L
+//SEM_RESP_SDMA0
+#define SEM_RESP_SDMA0__ADDR__SHIFT                                                                           0x2
+#define SEM_RESP_SDMA0__ADDR_MASK                                                                             0x000FFFFCL
+//SEM_RESP_SDMA1
+#define SEM_RESP_SDMA1__ADDR__SHIFT                                                                           0x2
+#define SEM_RESP_SDMA1__ADDR_MASK                                                                             0x000FFFFCL
+//SEM_RESP_UVD
+#define SEM_RESP_UVD__ADDR__SHIFT                                                                             0x2
+#define SEM_RESP_UVD__ADDR_MASK                                                                               0x000FFFFCL
+//SEM_RESP_VCE_0
+#define SEM_RESP_VCE_0__ADDR__SHIFT                                                                           0x2
+#define SEM_RESP_VCE_0__ADDR_MASK                                                                             0x000FFFFCL
+//SEM_RESP_ACP
+#define SEM_RESP_ACP__ADDR__SHIFT                                                                             0x2
+#define SEM_RESP_ACP__ADDR_MASK                                                                               0x000FFFFCL
+//SEM_RESP_ISP
+#define SEM_RESP_ISP__ADDR__SHIFT                                                                             0x2
+#define SEM_RESP_ISP__ADDR_MASK                                                                               0x000FFFFCL
+//SEM_RESP_VCE_1
+#define SEM_RESP_VCE_1__ADDR__SHIFT                                                                           0x2
+#define SEM_RESP_VCE_1__ADDR_MASK                                                                             0x000FFFFCL
+//SEM_RESP_VP8
+#define SEM_RESP_VP8__ADDR__SHIFT                                                                             0x2
+#define SEM_RESP_VP8__ADDR_MASK                                                                               0x000FFFFCL
+//SEM_RESP_GC
+#define SEM_RESP_GC__ADDR__SHIFT                                                                              0x2
+#define SEM_RESP_GC__ADDR_MASK                                                                                0x000FFFFCL
+//SEM_RESP_UVD_1
+#define SEM_RESP_UVD_1__ADDR__SHIFT                                                                           0x2
+#define SEM_RESP_UVD_1__ADDR_MASK                                                                             0x000FFFFCL
+//SEM_CID_REMAP_INDEX
+#define SEM_CID_REMAP_INDEX__INDEX__SHIFT                                                                     0x0
+#define SEM_CID_REMAP_INDEX__INDEX_MASK                                                                       0x00000003L
+//SEM_CID_REMAP_DATA
+#define SEM_CID_REMAP_DATA__CLIENT_ID__SHIFT                                                                  0x0
+#define SEM_CID_REMAP_DATA__INITIATOR_ID__SHIFT                                                               0x8
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT                                                            0x10
+#define SEM_CID_REMAP_DATA__CLIENT_ID_MASK                                                                    0x000000FFL
+#define SEM_CID_REMAP_DATA__INITIATOR_ID_MASK                                                                 0x0000FF00L
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK                                                              0x00FF0000L
+//SEM_ATOMIC_OP_LUT
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL__SHIFT                                                               0x0
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1__SHIFT                                                               0x7
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL__SHIFT                                                                 0xe
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0__SHIFT                                                                 0x15
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL_MASK                                                                 0x0000007FL
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1_MASK                                                                 0x00003F80L
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL_MASK                                                                   0x001FC000L
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0_MASK                                                                   0x0FE00000L
+//SEM_EDC_CONFIG
+#define SEM_EDC_CONFIG__WRITE_DIS__SHIFT                                                                      0x0
+#define SEM_EDC_CONFIG__DIS_EDC__SHIFT                                                                        0x1
+#define SEM_EDC_CONFIG__WRITE_DIS_MASK                                                                        0x00000001L
+#define SEM_EDC_CONFIG__DIS_EDC_MASK                                                                          0x00000002L
+//SEM_CHICKEN_BITS2
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT                                                   0x0
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID__SHIFT                                                   0x1
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE_MASK                                                     0x00000001L
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID_MASK                                                     0x00000002L
+//SEM_MMHUB_CNTL
+#define SEM_MMHUB_CNTL__UNIT_ID__SHIFT                                                                        0x0
+#define SEM_MMHUB_CNTL__TLVL_VALUE__SHIFT                                                                     0x8
+#define SEM_MMHUB_CNTL__UNIT_ID_MASK                                                                          0x0000003FL
+#define SEM_MMHUB_CNTL__TLVL_VALUE_MASK                                                                       0x00000700L
+//SEM_REGISTER_LAST_PART1
+#define SEM_REGISTER_LAST_PART1__RESERVED__SHIFT                                                              0x0
+#define SEM_REGISTER_LAST_PART1__RESERVED_MASK                                                                0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h
new file mode 100644
index 0000000000000000000000000000000000000000..55facadea54b9fe66d08b7d3af47480f86f938ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_11_0_6_OFFSET_HEADER
+#define _smuio_11_0_6_OFFSET_HEADER
+
+
+
+// addressBlock: smuio_smuio_SmuSmuioDec
+// base address: 0x5a000
+#define mmCGTT_ROM_CLK_CTRL0                                                                           0x00e4
+#define mmCGTT_ROM_CLK_CTRL0_BASE_IDX                                                                  0
+#define mmROM_INDEX                                                                                    0x00e5
+#define mmROM_INDEX_BASE_IDX                                                                           0
+#define mmROM_DATA                                                                                     0x00e6
+#define mmROM_DATA_BASE_IDX                                                                            0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h
new file mode 100644
index 0000000000000000000000000000000000000000..7d6a2fac28398f63519c8ea42ba41024f75ac417
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_11_0_6_SH_MASK_HEADER
+#define _smuio_11_0_6_SH_MASK_HEADER
+
+
+//CGTT_ROM_CLK_CTRL0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT                                                                   0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT                                                             0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT                                                             0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT                                                             0x1f
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK                                                                     0x0000000FL
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK                                                               0x00000FF0L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK                                                               0x40000000L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK                                                               0x80000000L
+//ROM_INDEX
+#define ROM_INDEX__ROM_INDEX__SHIFT                                                                           0x0
+#define ROM_INDEX__ROM_INDEX_MASK                                                                             0x01FFFFFFL
+//ROM_DATA
+#define ROM_DATA__ROM_DATA__SHIFT                                                                             0x0
+#define ROM_DATA__ROM_DATA_MASK                                                                               0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index f775aac6c1bd005660bbde2dd1a92fcfd7ad3993..a41875ac5dfbdaca9564db8a0679f610b870f438 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -103,6 +103,7 @@ enum pp_clock_type {
 
 enum amd_pp_sensors {
 	AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+	AMDGPU_PP_SENSOR_CPU_CLK,
 	AMDGPU_PP_SENSOR_VDDNB,
 	AMDGPU_PP_SENSOR_VDDGFX,
 	AMDGPU_PP_SENSOR_UVD_VCLK,
@@ -155,9 +156,11 @@ enum {
 enum PP_OD_DPM_TABLE_COMMAND {
 	PP_OD_EDIT_SCLK_VDDC_TABLE,
 	PP_OD_EDIT_MCLK_VDDC_TABLE,
+	PP_OD_EDIT_CCLK_VDDC_TABLE,
 	PP_OD_EDIT_VDDC_CURVE,
 	PP_OD_RESTORE_DEFAULT_TABLE,
-	PP_OD_COMMIT_DPM_TABLE
+	PP_OD_COMMIT_DPM_TABLE,
+	PP_OD_EDIT_VDDGFX_OFFSET
 };
 
 struct pp_states_info {
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 07633e22e99a15c15debf61b0b79a2ba3b82f266..7dff85c81e5a7ea09cf0ed9905bfb101690f45b3 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -33,7 +33,7 @@ struct IP_BASE_INSTANCE
 struct IP_BASE
 {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
 
 
 static const struct IP_BASE ACP_BASE ={ { { { 0x02403800, 0x00480000, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 7b6ef05a1d35a959627128b3d73b9f38fedae680..5fa65f191a3790071d8b2de786ede25c1bfcabc1 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -36,6 +36,7 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/nospec.h>
 #include <linux/pm_runtime.h>
+#include <asm/processor.h>
 #include "hwmgr.h"
 
 static const struct cg_flag_name clocks[] = {
@@ -730,11 +731,18 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  *
  * - minimum and maximum engine clock labeled OD_SCLK
  *
- * - maximum memory clock labeled OD_MCLK
+ * - minimum(not available for Vega20 and Navi1x) and maximum memory
+ *   clock labeled OD_MCLK
  *
  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
  *   They can be used to calibrate the sclk voltage curve.
  *
+ * - voltage offset(in mV) applied on target voltage calculation.
+ *   This is available for Sienna Cichlid, Navy Flounder and Dimgrey
+ *   Cavefish. For these ASICs, the target voltage calculation can be
+ *   illustrated by "voltage = voltage calculated from v/f curve +
+ *   overdrive vddgfx offset"
+ *
  * - a list of valid ranges for sclk, mclk, and voltage curve points
  *   labeled OD_RANGE
  *
@@ -755,6 +763,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  *   600mV. "vc 2 1000 1000" will update point3 with clock set
  *   as 1000Mhz and voltage 1000mV.
  *
+ *   To update the voltage offset applied for gfxclk/voltage calculation,
+ *   enter the new value by writing a string that contains "vo offset".
+ *   This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
+ *   And the offset can be a positive or negative value.
+ *
  * - When you have edited all of the states as needed, write "c" (commit)
  *   to the file to commit your changes
  *
@@ -787,6 +800,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 
 	if (*buf == 's')
 		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
+	else if (*buf == 'p')
+		type = PP_OD_EDIT_CCLK_VDDC_TABLE;
 	else if (*buf == 'm')
 		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
 	else if(*buf == 'r')
@@ -795,6 +810,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 		type = PP_OD_COMMIT_DPM_TABLE;
 	else if (!strncmp(buf, "vc", 2))
 		type = PP_OD_EDIT_VDDC_CURVE;
+	else if (!strncmp(buf, "vo", 2))
+		type = PP_OD_EDIT_VDDGFX_OFFSET;
 	else
 		return -EINVAL;
 
@@ -802,12 +819,14 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 
 	tmp_str = buf_cpy;
 
-	if (type == PP_OD_EDIT_VDDC_CURVE)
+	if ((type == PP_OD_EDIT_VDDC_CURVE) ||
+	     (type == PP_OD_EDIT_VDDGFX_OFFSET))
 		tmp_str++;
 	while (isspace(*++tmp_str));
 
-	while (tmp_str[0]) {
-		sub_str = strsep(&tmp_str, delimiter);
+	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+		if (strlen(sub_str) == 0)
+			continue;
 		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
 		if (ret)
 			return -EINVAL;
@@ -898,7 +917,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 		size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
 		size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
 		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
+		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
 		size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
+		size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
 	} else if (adev->powerplay.pp_funcs->print_clock_levels) {
 		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
 		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
@@ -1074,7 +1095,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
 {
 	int ret;
-	long level;
+	unsigned long level;
 	char *sub_str = NULL;
 	char *tmp;
 	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
@@ -1087,11 +1108,10 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
 	memcpy(buf_cpy, buf, bytes);
 	buf_cpy[bytes] = '\0';
 	tmp = buf_cpy;
-	while (tmp[0]) {
-		sub_str = strsep(&tmp, delimiter);
+	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
 		if (strlen(sub_str)) {
-			ret = kstrtol(sub_str, 0, &level);
-			if (ret)
+			ret = kstrtoul(sub_str, 0, &level);
+			if (ret || level > 31)
 				return -EINVAL;
 			*mask |= 1 << level;
 		} else
@@ -1346,6 +1366,138 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
 	return count;
 }
 
+static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_to_adev(ddev);
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_in_reset(adev))
+		return -EPERM;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(ddev->dev);
+		return ret;
+	}
+
+	if (is_support_sw_smu(adev))
+		size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
+	else
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_to_adev(ddev);
+	int ret;
+	uint32_t mask = 0;
+
+	if (amdgpu_in_reset(adev))
+		return -EPERM;
+
+	ret = amdgpu_read_mask(buf, count, &mask);
+	if (ret)
+		return ret;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(ddev->dev);
+		return ret;
+	}
+
+	if (is_support_sw_smu(adev))
+		ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
+	else
+		ret = 0;
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	if (ret)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_to_adev(ddev);
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_in_reset(adev))
+		return -EPERM;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(ddev->dev);
+		return ret;
+	}
+
+	if (is_support_sw_smu(adev))
+		size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
+	else
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_to_adev(ddev);
+	int ret;
+	uint32_t mask = 0;
+
+	if (amdgpu_in_reset(adev))
+		return -EPERM;
+
+	ret = amdgpu_read_mask(buf, count, &mask);
+	if (ret)
+		return ret;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(ddev->dev);
+		return ret;
+	}
+
+	if (is_support_sw_smu(adev))
+		ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
+	else
+		ret = 0;
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	if (ret)
+		return -EINVAL;
+
+	return count;
+}
+
 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
 		struct device_attribute *attr,
 		char *buf)
@@ -1717,8 +1869,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 			i++;
 		memcpy(buf_cpy, buf, count-i);
 		tmp_str = buf_cpy;
-		while (tmp_str[0]) {
-			sub_str = strsep(&tmp_str, delimiter);
+		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+			if (strlen(sub_str) == 0)
+				continue;
 			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
 			if (ret)
 				return -EINVAL;
@@ -2025,6 +2178,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC),
 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC),
 	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
@@ -2067,7 +2222,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
 	} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
 		*states = ATTR_STATE_UNSUPPORTED;
 		if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
-		    (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+		    (is_support_sw_smu(adev) && adev->smu.is_apu) ||
+			(!is_support_sw_smu(adev) && hwmgr->od_enabled))
 			*states = ATTR_STATE_SUPPORTED;
 	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
 		if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2087,6 +2243,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
 	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
 		if (asic_type < CHIP_VEGA12)
 			*states = ATTR_STATE_UNSUPPORTED;
+	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+		if (!(asic_type == CHIP_VANGOGH))
+			*states = ATTR_STATE_UNSUPPORTED;
+	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+		if (!(asic_type == CHIP_VANGOGH))
+			*states = ATTR_STATE_UNSUPPORTED;
 	}
 
 	if (asic_type == CHIP_ARCTURUS) {
@@ -2897,7 +3059,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
 					 char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	uint32_t limit = 0;
+	int limit_type = to_sensor_dev_attr(attr)->index;
+	uint32_t limit = limit_type << 24;
 	ssize_t size;
 	int r;
 
@@ -2911,7 +3074,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
 	}
 
 	if (is_support_sw_smu(adev)) {
-		smu_get_power_limit(&adev->smu, &limit, true);
+		smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
 		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
 		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2931,7 +3094,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 					 char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	uint32_t limit = 0;
+	int limit_type = to_sensor_dev_attr(attr)->index;
+	uint32_t limit = limit_type << 24;
 	ssize_t size;
 	int r;
 
@@ -2945,7 +3109,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 	}
 
 	if (is_support_sw_smu(adev)) {
-		smu_get_power_limit(&adev->smu, &limit, false);
+		smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
 		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
 		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2960,6 +3124,15 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 	return size;
 }
 
+static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	int limit_type = to_sensor_dev_attr(attr)->index;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+}
 
 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 		struct device_attribute *attr,
@@ -2967,6 +3140,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 		size_t count)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	int limit_type = to_sensor_dev_attr(attr)->index;
 	int err;
 	u32 value;
 
@@ -2981,7 +3155,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 		return err;
 
 	value = value / 1000000; /* convert to Watt */
-
+	value |= limit_type << 24;
 
 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
 	if (err < 0) {
@@ -3193,6 +3367,12 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
+static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
+static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
@@ -3231,6 +3411,12 @@ static struct attribute *hwmon_attributes[] = {
 	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
 	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
 	&sensor_dev_attr_power1_cap.dev_attr.attr,
+	&sensor_dev_attr_power1_label.dev_attr.attr,
+	&sensor_dev_attr_power2_average.dev_attr.attr,
+	&sensor_dev_attr_power2_cap_max.dev_attr.attr,
+	&sensor_dev_attr_power2_cap_min.dev_attr.attr,
+	&sensor_dev_attr_power2_cap.dev_attr.attr,
+	&sensor_dev_attr_power2_label.dev_attr.attr,
 	&sensor_dev_attr_freq1_input.dev_attr.attr,
 	&sensor_dev_attr_freq1_label.dev_attr.attr,
 	&sensor_dev_attr_freq2_input.dev_attr.attr,
@@ -3323,8 +3509,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 			effective_mode &= ~S_IWUSR;
 	}
 
-	if (((adev->flags & AMD_IS_APU) ||
-	     adev->family == AMDGPU_FAMILY_SI) &&	/* not implemented yet */
+	if (((adev->family == AMDGPU_FAMILY_SI) ||
+		 ((adev->flags & AMD_IS_APU) &&
+	      (adev->asic_type != CHIP_VANGOGH))) &&	/* not implemented yet */
 	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
 	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
 	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
@@ -3387,6 +3574,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
 		return 0;
 
+	/* only Vangogh has fast PPT limit and power labels */
+	if (!(adev->asic_type == CHIP_VANGOGH) &&
+	    (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
+		 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
+		 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
+		 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
+		 attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+		return 0;
+
 	return effective_mode;
 }
 
@@ -3465,6 +3662,27 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
  */
 #if defined(CONFIG_DEBUG_FS)
 
+static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
+					   struct amdgpu_device *adev) {
+	uint16_t *p_val;
+	uint32_t size;
+	int i;
+
+	if (is_support_cclk_dpm(adev)) {
+		p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+				GFP_KERNEL);
+
+		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
+					    (void *)p_val, &size)) {
+			for (i = 0; i < adev->smu.cpu_core_num; i++)
+				seq_printf(m, "\t%u MHz (CPU%d)\n",
+					   *(p_val + i), i);
+		}
+
+		kfree(p_val);
+	}
+}
+
 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
 {
 	uint32_t value;
@@ -3475,6 +3693,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
 	/* GPU Clocks */
 	size = sizeof(value);
 	seq_printf(m, "GFX Clocks and Power:\n");
+
+	amdgpu_debugfs_prints_cpu_info(m, adev);
+
 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
 		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 0d797fa9f5cc69dd37314151d48d955a89e1e7d9..10b0624ade651eaa0d5ae30d4d8426cc7323f0c6 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -33,6 +33,8 @@
 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES	1000
 #define SMU_FW_NAME_LEN			0x24
 
+#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
+
 struct smu_hw_power_state {
 	unsigned int magic;
 };
@@ -159,6 +161,19 @@ enum smu_power_src_type
 	SMU_POWER_SOURCE_COUNT,
 };
 
+enum smu_ppt_limit_type
+{
+	SMU_DEFAULT_PPT_LIMIT = 0,
+	SMU_FAST_PPT_LIMIT,
+};
+
+enum smu_ppt_limit_level
+{
+	SMU_PPT_LIMIT_MIN = -1,
+	SMU_PPT_LIMIT_CURRENT,
+	SMU_PPT_LIMIT_MAX,
+};
+
 enum smu_memory_pool_size
 {
     SMU_MEMORY_POOL_SIZE_ZERO   = 0,
@@ -168,6 +183,17 @@ enum smu_memory_pool_size
     SMU_MEMORY_POOL_SIZE_2_GB   = 0x80000000,
 };
 
+struct smu_user_dpm_profile {
+	uint32_t fan_mode;
+	uint32_t power_limit;
+	uint32_t fan_speed_percent;
+	uint32_t flags;
+
+	/* user clock state information */
+	uint32_t clk_mask[SMU_CLK_COUNT];
+	uint32_t clk_dependency;
+};
+
 #define SMU_TABLE_INIT(tables, table_id, s, a, d)	\
 	do {						\
 		tables[table_id].size = s;		\
@@ -459,131 +485,672 @@ struct smu_context
 	struct work_struct interrupt_work;
 
 	unsigned fan_max_rpm;
-	unsigned manual_fan_speed_rpm;
+	unsigned manual_fan_speed_percent;
 
 	uint32_t gfx_default_hard_min_freq;
 	uint32_t gfx_default_soft_max_freq;
 	uint32_t gfx_actual_hard_min_freq;
 	uint32_t gfx_actual_soft_max_freq;
+
+	/* APU only */
+	uint32_t cpu_default_soft_min_freq;
+	uint32_t cpu_default_soft_max_freq;
+	uint32_t cpu_actual_soft_min_freq;
+	uint32_t cpu_actual_soft_max_freq;
+	uint32_t cpu_core_id_select;
+	uint16_t cpu_core_num;
+
+	struct smu_user_dpm_profile user_dpm_profile;
 };
 
 struct i2c_adapter;
 
+/**
+ * struct pptable_funcs - Callbacks used to interact with the SMU.
+ */
 struct pptable_funcs {
+	/**
+	 * @run_btc: Calibrate voltage/frequency curve to fit the system's
+	 *           power delivery and voltage margins. Required for adaptive
+	 *           voltage frequency scaling (AVFS).
+	 */
 	int (*run_btc)(struct smu_context *smu);
+
+	/**
+	 * @get_allowed_feature_mask: Get allowed feature mask.
+	 * &feature_mask: Array to store feature mask.
+	 * &num: Elements in &feature_mask.
+	 */
 	int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+	/**
+	 * @get_current_power_state: Get the current power state.
+	 *
+	 * Return: Current power state on success, negative errno on failure.
+	 */
 	enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+
+	/**
+	 * @set_default_dpm_table: Retrieve the default overdrive settings from
+	 *                         the SMU.
+	 */
 	int (*set_default_dpm_table)(struct smu_context *smu);
+
 	int (*set_power_state)(struct smu_context *smu);
+
+	/**
+	 * @populate_umd_state_clk: Populate the UMD power state table with
+	 *                          defaults.
+	 */
 	int (*populate_umd_state_clk)(struct smu_context *smu);
+
+	/**
+	 * @print_clk_levels: Print DPM clock levels for a clock domain
+	 *                    to buffer. Star current level.
+	 *
+	 * Used for sysfs interfaces.
+	 */
 	int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+
+	/**
+	 * @force_clk_levels: Set a range of allowed DPM levels for a clock
+	 *                    domain.
+	 * &clk_type: Clock domain.
+	 * &mask: Range of allowed DPM levels.
+	 */
 	int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
+
+	/**
+	 * @od_edit_dpm_table: Edit the custom overdrive DPM table.
+	 * &type: Type of edit.
+	 * &input: Edit parameters.
+	 * &size: Size of &input.
+	 */
 	int (*od_edit_dpm_table)(struct smu_context *smu,
 				 enum PP_OD_DPM_TABLE_COMMAND type,
 				 long *input, uint32_t size);
+
+	/**
+	 * @get_clock_by_type_with_latency: Get the speed and latency of a clock
+	 *                                  domain.
+	 */
 	int (*get_clock_by_type_with_latency)(struct smu_context *smu,
 					      enum smu_clk_type clk_type,
 					      struct
 					      pp_clock_levels_with_latency
 					      *clocks);
+	/**
+	 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
+	 *                                  domain.
+	 */
+	int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+					      enum amd_pp_clock_type type,
+					      struct
+					      pp_clock_levels_with_voltage
+					      *clocks);
+
+	/**
+	 * @get_power_profile_mode: Print all power profile modes to
+	 *                          buffer. Star current mode.
+	 */
 	int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+
+	/**
+	 * @set_power_profile_mode: Set a power profile mode. Also used to
+	 *                          create/set custom power profile modes.
+	 * &input: Power profile mode parameters.
+	 * &size: Size of &input.
+	 */
 	int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+
+	/**
+	 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
+	 *                      management.
+	 */
 	int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable);
+
+	/**
+	 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
+	 *                       management.
+	 */
 	int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
+
+	/**
+	 * @read_sensor: Read data from a sensor.
+	 * &sensor: Sensor to read data from.
+	 * &data: Sensor reading.
+	 * &size: Size of &data.
+	 */
 	int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
 			   void *data, uint32_t *size);
+
+	/**
+	 * @pre_display_config_changed: Prepare GPU for a display configuration
+	 *                              change.
+	 *
+	 * Disable display tracking and pin memory clock speed to maximum. Used
+	 * in display component synchronization.
+	 */
 	int (*pre_display_config_changed)(struct smu_context *smu);
+
+	/**
+	 * @display_config_changed: Notify the SMU of the current display
+	 *                          configuration.
+	 *
+	 * Allows SMU to properly track blanking periods for memory clock
+	 * adjustment. Used in display component synchronization.
+	 */
 	int (*display_config_changed)(struct smu_context *smu);
+
 	int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+
+	/**
+	 * @notify_smc_display_config: Applies display requirements to the
+	 *                             current power state.
+	 *
+	 * Optimize deep sleep DCEFclk and mclk for the current display
+	 * configuration. Used in display component synchronization.
+	 */
 	int (*notify_smc_display_config)(struct smu_context *smu);
+
+	/**
+	 * @is_dpm_running: Check if DPM is running.
+	 *
+	 * Return: True if DPM is running, false otherwise.
+	 */
 	bool (*is_dpm_running)(struct smu_context *smu);
-	int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
+
+	/**
+	 * @get_fan_speed_percent: Get the current fan speed in percent.
+	 */
+	int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+
+	/**
+	 * @set_watermarks_table: Configure and upload the watermarks tables to
+	 *                        the SMU.
+	 */
 	int (*set_watermarks_table)(struct smu_context *smu,
 				    struct pp_smu_wm_range_sets *clock_ranges);
+
+	/**
+	 * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
+	 */
 	int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
+
+	/**
+	 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
+	 * &clocks_in_khz: Array of DPM levels.
+	 * &num_states: Elements in &clocks_in_khz.
+	 */
 	int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
+
+	/**
+	 * @set_default_od_settings: Set the overdrive tables to defaults.
+	 */
 	int (*set_default_od_settings)(struct smu_context *smu);
+
+	/**
+	 * @set_performance_level: Set a performance level.
+	 */
 	int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+
+	/**
+	 * @display_disable_memory_clock_switch: Enable/disable dynamic memory
+	 *                                       clock switching.
+	 *
+	 * Disabling this feature forces memory clock speed to maximum.
+	 * Enabling sets the minimum memory clock capable of driving the
+	 * current display configuration.
+	 */
 	int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
+
+	/**
+	 * @dump_pptable: Print the power play table to the system log.
+	 */
 	void (*dump_pptable)(struct smu_context *smu);
+
+	/**
+	 * @get_power_limit: Get the device's power limits.
+	 */
 	int (*get_power_limit)(struct smu_context *smu);
+
+	/**
+	 * @get_ppt_limit: Get the device's ppt limits.
+	 */
+	int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
+			enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
+
+	/**
+	 * @set_df_cstate: Set data fabric cstate.
+	 */
 	int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+
+	/**
+	 * @allow_xgmi_power_down: Enable/disable external global memory
+	 *                         interconnect power down.
+	 */
 	int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
+
+	/**
+	 * @update_pcie_parameters: Update and upload the system's PCIe
+	 *                          capabilites to the SMU.
+	 * &pcie_gen_cap: Maximum allowed PCIe generation.
+	 * &pcie_width_cap: Maximum allowed PCIe width.
+	 */
 	int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+
+	/**
+	 * @i2c_init: Initialize i2c.
+	 *
+	 * The i2c bus is used internally by the SMU voltage regulators and
+	 * other devices. The i2c's EEPROM also stores bad page tables on boards
+	 * with ECC.
+	 */
 	int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+
+	/**
+	 * @i2c_fini: Tear down i2c.
+	 */
 	void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+
+	/**
+	 * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
+	 */
 	void (*get_unique_id)(struct smu_context *smu);
+
+	/**
+	 * @get_dpm_clock_table: Get a copy of the DPM clock table.
+	 *
+	 * Used by display component in bandwidth and watermark calculations.
+	 */
 	int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
+
+	/**
+	 * @init_microcode: Request the SMU's firmware from the kernel.
+	 */
 	int (*init_microcode)(struct smu_context *smu);
+
+	/**
+	 * @load_microcode: Load firmware onto the SMU.
+	 */
 	int (*load_microcode)(struct smu_context *smu);
+
+	/**
+	 * @fini_microcode: Release the SMU's firmware.
+	 */
 	void (*fini_microcode)(struct smu_context *smu);
+
+	/**
+	 * @init_smc_tables: Initialize the SMU tables.
+	 */
 	int (*init_smc_tables)(struct smu_context *smu);
+
+	/**
+	 * @fini_smc_tables: Release the SMU tables.
+	 */
 	int (*fini_smc_tables)(struct smu_context *smu);
+
+	/**
+	 * @init_power: Initialize the power gate table context.
+	 */
 	int (*init_power)(struct smu_context *smu);
+
+	/**
+	 * @fini_power: Release the power gate table context.
+	 */
 	int (*fini_power)(struct smu_context *smu);
+
+	/**
+	 * @check_fw_status: Check the SMU's firmware status.
+	 *
+	 * Return: Zero if check passes, negative errno on failure.
+	 */
 	int (*check_fw_status)(struct smu_context *smu);
+
+	/**
+	 * @setup_pptable: Initialize the power play table and populate it with
+	 *                 default values.
+	 */
 	int (*setup_pptable)(struct smu_context *smu);
+
+	/**
+	 * @get_vbios_bootup_values: Get default boot values from the VBIOS.
+	 */
 	int (*get_vbios_bootup_values)(struct smu_context *smu);
+
+	/**
+	 * @check_fw_version: Print driver and SMU interface versions to the
+	 *                    system log.
+	 *
+	 * Interface mismatch is not a critical failure.
+	 */
 	int (*check_fw_version)(struct smu_context *smu);
+
+	/**
+	 * @powergate_sdma: Power up/down system direct memory access.
+	 */
 	int (*powergate_sdma)(struct smu_context *smu, bool gate);
+
+	/**
+	 * @set_gfx_cgpg: Enable/disable graphics engine course grain power
+	 *                gating.
+	 */
 	int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
+
+	/**
+	 * @write_pptable: Write the power play table to the SMU.
+	 */
 	int (*write_pptable)(struct smu_context *smu);
+
+	/**
+	 * @set_driver_table_location: Send the location of the driver table to
+	 *                             the SMU.
+	 */
 	int (*set_driver_table_location)(struct smu_context *smu);
+
+	/**
+	 * @set_tool_table_location: Send the location of the tool table to the
+	 *                           SMU.
+	 */
 	int (*set_tool_table_location)(struct smu_context *smu);
+
+	/**
+	 * @notify_memory_pool_location: Send the location of the memory pool to
+	 *                               the SMU.
+	 */
 	int (*notify_memory_pool_location)(struct smu_context *smu);
+
+	/**
+	 * @system_features_control: Enable/disable all SMU features.
+	 */
 	int (*system_features_control)(struct smu_context *smu, bool en);
+
+	/**
+	 * @send_smc_msg_with_param: Send a message with a parameter to the SMU.
+	 * &msg: Type of message.
+	 * &param: Message parameter.
+	 * &read_arg: SMU response (optional).
+	 */
 	int (*send_smc_msg_with_param)(struct smu_context *smu,
 				       enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+	/**
+	 * @send_smc_msg: Send a message to the SMU.
+	 * &msg: Type of message.
+	 * &read_arg: SMU response (optional).
+	 */
 	int (*send_smc_msg)(struct smu_context *smu,
 			    enum smu_message_type msg,
 			    uint32_t *read_arg);
+
+	/**
+	 * @init_display_count: Notify the SMU of the number of display
+	 *                      components in current display configuration.
+	 */
 	int (*init_display_count)(struct smu_context *smu, uint32_t count);
+
+	/**
+	 * @set_allowed_mask: Notify the SMU of the features currently allowed
+	 *                    by the driver.
+	 */
 	int (*set_allowed_mask)(struct smu_context *smu);
+
+	/**
+	 * @get_enabled_mask: Get a mask of features that are currently enabled
+	 *                    on the SMU.
+	 * &feature_mask: Array representing enabled feature mask.
+	 * &num: Elements in &feature_mask.
+	 */
 	int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+	/**
+	 * @feature_is_enabled: Test if a feature is enabled.
+	 *
+	 * Return: One if enabled, zero if disabled.
+	 */
 	int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
+
+	/**
+	 * @disable_all_features_with_exception: Disable all features with
+	 *                                       exception to those in &mask.
+	 */
 	int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
+
+	/**
+	 * @notify_display_change: Enable fast memory clock switching.
+	 *
+	 * Allows for fine grained memory clock switching but has more stringent
+	 * timing requirements.
+	 */
 	int (*notify_display_change)(struct smu_context *smu);
+
+	/**
+	 * @set_power_limit: Set power limit in watts.
+	 */
 	int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+
+	/**
+	 * @init_max_sustainable_clocks: Populate max sustainable clock speed
+	 *                               table with values from the SMU.
+	 */
 	int (*init_max_sustainable_clocks)(struct smu_context *smu);
+
+	/**
+	 * @enable_thermal_alert: Enable thermal alert interrupts.
+	 */
 	int (*enable_thermal_alert)(struct smu_context *smu);
+
+	/**
+	 * @disable_thermal_alert: Disable thermal alert interrupts.
+	 */
 	int (*disable_thermal_alert)(struct smu_context *smu);
+
+	/**
+	 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
+	 *                           clock speed in MHz.
+	 */
 	int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
+
+	/**
+	 * @display_clock_voltage_request: Set a hard minimum frequency
+	 * for a clock domain.
+	 */
 	int (*display_clock_voltage_request)(struct smu_context *smu, struct
 					     pp_display_clock_request
 					     *clock_req);
+
+	/**
+	 * @get_fan_control_mode: Get the current fan control mode.
+	 */
 	uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+
+	/**
+	 * @set_fan_control_mode: Set the fan control mode.
+	 */
 	int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+
+	/**
+	 * @set_fan_speed_percent: Set a static fan speed in percent.
+	 */
 	int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
-	int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+
+	/**
+	 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
+	 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
+	 */
 	int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+	/**
+	 * @gfx_off_control: Enable/disable graphics engine poweroff.
+	 */
 	int (*gfx_off_control)(struct smu_context *smu, bool enable);
+
+
+	/**
+	 * @get_gfx_off_status: Get graphics engine poweroff status.
+	 *
+	 * Return:
+	 * 0 - GFXOFF(default).
+	 * 1 - Transition out of GFX State.
+	 * 2 - Not in GFXOFF.
+	 * 3 - Transition into GFXOFF.
+	 */
 	uint32_t (*get_gfx_off_status)(struct smu_context *smu);
+
+	/**
+	 * @register_irq_handler: Register interupt request handlers.
+	 */
 	int (*register_irq_handler)(struct smu_context *smu);
+
+	/**
+	 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
+	 */
 	int (*set_azalia_d3_pme)(struct smu_context *smu);
+
+	/**
+	 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
+	 *                                    clock speeds table.
+	 *
+	 * Provides a way for the display component (DC) to get the max
+	 * sustainable clocks from the SMU.
+	 */
 	int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
+
+	/**
+	 * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off).
+	 */
 	bool (*baco_is_support)(struct smu_context *smu);
+
+	/**
+	 * @baco_get_state: Get the current BACO state.
+	 *
+	 * Return: Current BACO state.
+	 */
 	enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
+
+	/**
+	 * @baco_set_state: Enter/exit BACO.
+	 */
 	int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
+
+	/**
+	 * @baco_enter: Enter BACO.
+	 */
 	int (*baco_enter)(struct smu_context *smu);
+
+	/**
+	 * @baco_exit: Exit Baco.
+	 */
 	int (*baco_exit)(struct smu_context *smu);
+
+	/**
+	 * @mode1_reset_is_support: Check if GPU supports mode1 reset.
+	 */
 	bool (*mode1_reset_is_support)(struct smu_context *smu);
+
+	/**
+	 * @mode1_reset: Perform mode1 reset.
+	 *
+	 * Complete GPU reset.
+	 */
 	int (*mode1_reset)(struct smu_context *smu);
+
+	/**
+	 * @mode2_reset: Perform mode2 reset.
+	 *
+	 * Mode2 reset generally does not reset as many IPs as mode1 reset. The
+	 * IPs reset varies by asic.
+	 */
 	int (*mode2_reset)(struct smu_context *smu);
+
+	/**
+	 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
+	 *                         domain in MHz.
+	 */
 	int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+
+	/**
+	 * @set_soft_freq_limited_range: Set the soft frequency range of a clock
+	 *                               domain in MHz.
+	 */
 	int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+
+	/**
+	 * @set_power_source: Notify the SMU of the current power source.
+	 */
 	int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
+
+	/**
+	 * @log_thermal_throttling_event: Print a thermal throttling warning to
+	 *                                the system's log.
+	 */
 	void (*log_thermal_throttling_event)(struct smu_context *smu);
+
+	/**
+	 * @get_pp_feature_mask: Print a human readable table of enabled
+	 *                       features to buffer.
+	 */
 	size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
+
+	/**
+	 * @set_pp_feature_mask: Request the SMU enable/disable features to
+	 *                       match those enabled in &new_mask.
+	 */
 	int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
+
+	/**
+	 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
+	 *
+	 * Return: Size of &table
+	 */
 	ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
+
+	/**
+	 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
+	 */
 	int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+
+	/**
+	 * @gfx_ulv_control: Enable/disable ultra low voltage.
+	 */
 	int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+
+	/**
+	 * @deep_sleep_control: Enable/disable deep sleep.
+	 */
 	int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
+
+	/**
+	 * @get_fan_parameters: Get fan parameters.
+	 *
+	 * Get maximum fan speed from the power play table.
+	 */
 	int (*get_fan_parameters)(struct smu_context *smu);
+
+	/**
+	 * @post_init: Helper function for asic specific workarounds.
+	 */
 	int (*post_init)(struct smu_context *smu);
+
+	/**
+	 * @interrupt_work: Work task scheduled from SMU interrupt handler.
+	 */
 	void (*interrupt_work)(struct smu_context *smu);
+
+	/**
+	 * @gpo_control: Enable/disable graphics power optimization if supported.
+	 */
 	int (*gpo_control)(struct smu_context *smu, bool enablement);
+
+	/**
+	 * @gfx_state_change_set: Send the current graphics state to the SMU.
+	 */
 	int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
+
+	/**
+	 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
+	 *                                      parameters to defaults.
+	 */
 	int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
 };
 
@@ -597,6 +1164,7 @@ typedef enum {
 	METRICS_CURR_DCLK1,
 	METRICS_CURR_FCLK,
 	METRICS_CURR_DCEFCLK,
+	METRICS_AVERAGE_CPUCLK,
 	METRICS_AVERAGE_GFXCLK,
 	METRICS_AVERAGE_SOCCLK,
 	METRICS_AVERAGE_FCLK,
@@ -637,6 +1205,12 @@ enum smu_cmn2asic_mapping_type {
 #define FEA_MAP(fea) \
 	[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
 
+#define FEA_MAP_REVERSE(fea) \
+	[SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
+#define FEA_MAP_HALF_REVERSE(fea) \
+	[SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
 #define TAB_MAP(tab) \
 	[SMU_TABLE_##tab] = {1, TABLE_##tab}
 
@@ -663,7 +1237,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
 
 int smu_get_power_limit(struct smu_context *smu,
 			uint32_t *limit,
-			bool max_setting);
+			enum smu_ppt_limit_level limit_level);
 
 int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
@@ -719,6 +1293,7 @@ extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
 extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
 
 bool is_support_sw_smu(struct amdgpu_device *adev);
+bool is_support_cclk_dpm(struct amdgpu_device *adev);
 int smu_reset(struct smu_context *smu);
 int smu_sys_get_pp_table(struct smu_context *smu, void **table);
 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 1c19eae93ff1750aeefdb1ba3bc53743e6bed42f..6e23a3f803a7c53e7b49f0b76fd9a432e54245cc 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -141,7 +141,6 @@ typedef struct {
   uint32_t MaxGfxClk;
 
   uint8_t NumDfPstatesEnabled;
-  uint8_t NumDpmLevelsEnabled;
   uint8_t NumDcfclkLevelsEnabled;
   uint8_t NumDispClkLevelsEnabled;  //applies to both dispclk and dppclk
   uint8_t NumSocClkLevelsEnabled;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 720d15612fe1ac45c1965d169be8069b198a8412..aa4822202587f327e06b96ad3d5a988cf528e0cc 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -133,8 +133,6 @@
 	__SMU_DUMMY_MAP(PowerUpSdma),                 \
 	__SMU_DUMMY_MAP(SetHardMinIspclkByFreq),      \
 	__SMU_DUMMY_MAP(SetHardMinVcn),               \
-	__SMU_DUMMY_MAP(Spare1),                      \
-	__SMU_DUMMY_MAP(Spare2),           	      \
 	__SMU_DUMMY_MAP(SetAllowFclkSwitch),          \
 	__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq),       \
 	__SMU_DUMMY_MAP(ActiveProcessNotify),         \
@@ -211,6 +209,11 @@
 	__SMU_DUMMY_MAP(SetGpoFeaturePMask),             \
 	__SMU_DUMMY_MAP(DisallowGpo),                    \
 	__SMU_DUMMY_MAP(Enable2ndUSB20Port),             \
+	__SMU_DUMMY_MAP(RequestActiveWgp),               \
+       __SMU_DUMMY_MAP(SetFastPPTLimit),                \
+       __SMU_DUMMY_MAP(SetSlowPPTLimit),                \
+       __SMU_DUMMY_MAP(GetFastPPTLimit),                \
+       __SMU_DUMMY_MAP(GetSlowPPTLimit),                \
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)	SMU_MSG_##type
@@ -236,10 +239,12 @@ enum smu_clk_type {
 	SMU_SCLK,
 	SMU_MCLK,
 	SMU_PCIE,
+	SMU_OD_CCLK,
 	SMU_OD_SCLK,
 	SMU_OD_MCLK,
 	SMU_OD_VDDC_CURVE,
 	SMU_OD_RANGE,
+	SMU_OD_VDDGFX_OFFSET,
 	SMU_CLK_COUNT,
 };
 
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 5d0b29653ffa13ccf8b4ae265b69512331a5dc27..d4cddd2390a29d1d54f6003714321791f9bc516f 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
 #define SMU11_DRIVER_IF_VERSION_NV12 0x36
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
 #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
 #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
 
@@ -129,6 +129,15 @@ struct smu_11_0_power_context {
 	enum smu_11_0_power_state power_state;
 };
 
+struct smu_11_5_power_context {
+	uint32_t	power_source;
+	uint8_t		in_power_limit_boost_mode;
+	enum smu_11_0_power_state power_state;
+
+	uint32_t	current_fast_ppt_limit;
+	uint32_t	max_fast_ppt_limit;
+};
+
 enum smu_v11_0_baco_seq {
 	BACO_SEQ_BACO = 0,
 	BACO_SEQ_MSR,
@@ -203,14 +212,8 @@ int
 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
 			       uint32_t mode);
 
-int
-smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
-
-int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
-				       uint32_t speed);
-
-int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
-				uint32_t *speed);
+int smu_v11_0_set_fan_speed_percent(struct smu_context *smu,
+				    uint32_t speed);
 
 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
 				     uint32_t pstate);
@@ -278,10 +281,6 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
 
 int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
 
-void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
-
-void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
-
 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
 			      bool enablement);
 
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
index 55d7892e4e0e52c3fe86f007c6178889968b1d5d..fe130a497d6c3b743142f20bf2a2090c19a72836 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
@@ -104,7 +104,11 @@
 #define PPSMC_MSG_DramLogSetDramBufferSize             0x46
 #define PPSMC_MSG_RequestActiveWgp                     0x47
 #define PPSMC_MSG_QueryActiveWgp                       0x48
-#define PPSMC_Message_Count                            0x49
+#define PPSMC_MSG_SetFastPPTLimit                      0x49
+#define PPSMC_MSG_SetSlowPPTLimit                      0x4A
+#define PPSMC_MSG_GetFastPPTLimit                      0x4B
+#define PPSMC_MSG_GetSlowPPTLimit                      0x4C
+#define PPSMC_Message_Count                            0x4D
 
 //Argument for PPSMC_MSG_GfxDeviceDriverReset
 enum {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index fa2e8cb079670aa800d024c934d5767f23f6ad25..02de3b6199e5332afe1b7308f5dce9e12c27ed5e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,7 +60,5 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 
 int smu_v12_0_set_driver_table_location(struct smu_context *smu);
 
-void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
-
 #endif
 #endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 6a7de8b898fafeaaf217beb3e0c9a5421a36d3e0..f2cef0930aa96165b711827f40b94767c52324d3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -33,6 +33,7 @@
 #include "ppsmc.h"
 #include "amd_acpi.h"
 #include "pp_psm.h"
+#include "vega10_hwmgr.h"
 
 extern const struct pp_smumgr_func ci_smu_funcs;
 extern const struct pp_smumgr_func smu8_smu_funcs;
@@ -46,7 +47,6 @@ extern const struct pp_smumgr_func vega12_smu_funcs;
 extern const struct pp_smumgr_func smu10_smu_funcs;
 extern const struct pp_smumgr_func vega20_smu_funcs;
 
-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
 
 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 83a6504e093cbba2fdb1cef05a02344b1315924f..b1038d30c8dcc558935d493f5567269532092093 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -279,7 +279,7 @@ static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
  *
  * @hwmgr:           input parameter: pointer to HwMgr
  * @clock_value:     input parameter: memory clock
- * @dividers:        output parameter: memory PLL dividers
+ * @mpll_param:      output parameter: memory clock parameters
  * @strobe_mode:     input parameter: 1 for strobe mode,  0 for performance mode
  */
 int atomctrl_get_memory_pll_dividers_si(
@@ -332,7 +332,7 @@ int atomctrl_get_memory_pll_dividers_si(
  *
  * @hwmgr:                 input parameter: pointer to HwMgr
  * @clock_value:           input parameter: memory clock
- * @dividers:              output parameter: memory PLL dividers
+ * @mpll_param:            output parameter: memory clock parameters
  */
 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
 		uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index 741e03ad5311f115157496ca922f23668b45e175..f2a55c1413f597a4d643d1a2c99367517bdff17e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1362,6 +1362,7 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i
  * @hwmgr: Pointer to the hardware manager.
  * @entry_index: The index of the entry to be extracted from the table.
  * @power_state: The address of the PowerState instance being created.
+ * @call_back_func: The function to call into to fill power state
  * Return: -1 if the entry cannot be retrieved.
  */
 int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 88322781e447b5812896890b1a776fbae4010092..ed05a30d1139dc5488e3541c7420f33cce81f3c2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1487,7 +1487,7 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
 	}
 
 	if (!smu10_data->fine_grain_enabled) {
-		pr_err("Fine grain not started\n");
+		pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 82676c086ce464d9190b01097918df8c12a0036d..c57dc9ae81f2f4e7af3dc1dd3d9537cca3c18eb3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -235,7 +235,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 /**
  * smu7_enable_smc_voltage_controller - Enable voltage control
  *
- * @hwmgr  the address of the powerplay hardware manager.
+ * @hwmgr:  the address of the powerplay hardware manager.
  * Return:   always PP_Result_OK
  */
 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
@@ -4501,7 +4501,7 @@ static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
  * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
  *
  * @hwmgr:  the address of the powerplay hardware manager.
- * @usMaxFanRpm:  max operating fan RPM value.
+ * @us_max_fan_rpm:  max operating fan RPM value.
  * Return:   The response that came from the SMC.
  */
 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 1b47f94e033178c118e0595bf669af7331e82a94..29c99642d22d407e5613aef1f1264efb9cc30f86 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -542,11 +542,11 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
 
 #define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
 /**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0.
-*/
+ * Get Leakage VDDC based on leakage ID.
+ *
+ * @hwmgr:  the address of the powerplay hardware manager.
+ * return:  always 0.
+ */
 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
@@ -600,9 +600,9 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
 /**
  * Change virtual leakage voltage to actual value.
  *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
+ * @hwmgr:         the address of the powerplay hardware manager.
+ * @voltage:       pointer to changing voltage
+ * @leakage_table: pointer to leakage table
  */
 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
 		uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
@@ -624,13 +624,13 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
 }
 
 /**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param     hwmgr  the address of the powerplay hardware manager.
-* @param     pointer to voltage lookup table
-* @param     pointer to leakage table
-* @return     always 0
-*/
+ * Patch voltage lookup table by EVV leakages.
+ *
+ * @hwmgr:         the address of the powerplay hardware manager.
+ * @lookup_table:  pointer to voltage lookup table
+ * @leakage_table: pointer to leakage table
+ * return:         always 0
+ */
 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_voltage_lookup_table *lookup_table,
 		struct vega10_leakage_voltage *leakage_table)
@@ -1001,13 +1001,12 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
 }
 
 /**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    vol_table  the pointer to changing voltage table
-* @return    0 in success
-*/
-
+ * Remove repeated voltage values and create table with unique values.
+ *
+ * @hwmgr:      the address of the powerplay hardware manager.
+ * @vol_table:  the pointer to changing voltage table
+ * return:      0 in success
+ */
 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
 		struct pp_atomfwctrl_voltage_table *vol_table)
 {
@@ -1151,11 +1150,11 @@ static void vega10_trim_voltage_table_to_fit_state_table(
 }
 
 /**
-* Create Voltage Tables.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
+ * Create Voltage Tables.
+ *
+ * @hwmgr:  the address of the powerplay hardware manager.
+ * return:  always 0
+ */
 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
@@ -1212,11 +1211,11 @@ static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 }
 
 /*
- * @fn vega10_init_dpm_state
- * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
+ * vega10_init_dpm_state
+ * Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
  *
- * @param    dpm_state - the address of the DPM Table to initiailize.
- * @return   None.
+ * @dpm_state: - the address of the DPM Table to initiailize.
+ * return:   None.
  */
 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
 {
@@ -1460,11 +1459,11 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 }
 
 /*
- * @fn vega10_populate_ulv_state
- * @brief Function to provide parameters for Utral Low Voltage state to SMC.
+ * vega10_populate_ulv_state
+ * Function to provide parameters for Utral Low Voltage state to SMC.
  *
- * @param    hwmgr - the address of the hardware manager.
- * @return   Always 0.
+ * @hwmgr: - the address of the hardware manager.
+ * return:   Always 0.
  */
 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
 {
@@ -1545,13 +1544,13 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
 }
 
 /**
-* Populates single SMC GFXSCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    gfx_clock  the GFX clock to use to populate the structure.
-* @param    current_gfxclk_level  location in PPTable for the SMC GFXCLK structure.
-*/
-
+ * Populates single SMC GFXSCLK structure using the provided engine clock
+ *
+ * @hwmgr:      the address of the hardware manager
+ * @gfx_clock:  the GFX clock to use to populate the structure.
+ * @current_gfxclk_level:  location in PPTable for the SMC GFXCLK structure.
+ * @acg_freq:   ACG frequenty to return (MHz)
+ */
 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
 		uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
 		uint32_t *acg_freq)
@@ -1610,12 +1609,13 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
 }
 
 /**
- * @brief Populates single SMC SOCCLK structure using the provided clock.
+ * Populates single SMC SOCCLK structure using the provided clock.
  *
- * @param    hwmgr - the address of the hardware manager.
- * @param    soc_clock - the SOC clock to use to populate the structure.
- * @param    current_socclk_level - location in PPTable for the SMC SOCCLK structure.
- * @return   0 on success..
+ * @hwmgr:     the address of the hardware manager.
+ * @soc_clock: the SOC clock to use to populate the structure.
+ * @current_soc_did:   DFS divider to pass back to caller
+ * @current_vol_index: index of current VDD to pass back to caller
+ * return:      0 on success
  */
 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
 		uint32_t soc_clock, uint8_t *current_soc_did,
@@ -1659,10 +1659,10 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
 }
 
 /**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @hwmgr:      the address of the hardware manager
+ */
 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
@@ -1746,12 +1746,12 @@ static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
 	}
 }
 
-/**
- * @brief Populates single SMC GFXCLK structure using the provided clock.
+/*
+ * Populates single SMC GFXCLK structure using the provided clock.
  *
- * @param    hwmgr - the address of the hardware manager.
- * @param    mem_clock - the memory clock to use to populate the structure.
- * @return   0 on success..
+ * @hwmgr:     the address of the hardware manager.
+ * @mem_clock: the memory clock to use to populate the structure.
+ * return:     0 on success..
  */
 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
 		uint32_t mem_clock, uint8_t *current_mem_vid,
@@ -1808,10 +1808,10 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
 }
 
 /**
- * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
  *
- * @param    pHwMgr - the address of the hardware manager.
- * @return   PP_Result_OK on success.
+ * @hwmgr:  the address of the hardware manager.
+ * return:   PP_Result_OK on success.
  */
 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
 {
@@ -2486,12 +2486,11 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 }
 
 /**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput  the pointer to input data (PowerState)
-* @return   always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr:  the address of the powerplay hardware manager.
+ * return:  always 0
+ */
 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
 {
 	int result;
@@ -2864,11 +2863,11 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
 }
 
 /**
- * @brief Tell SMC to enabled the supported DPMs.
+ * Tell SMC to enabled the supported DPMs.
  *
- * @param    hwmgr - the address of the powerplay hardware manager.
- * @Param    bitmap - bitmap for the features to enabled.
- * @return   0 on at least one DPM is successfully enabled.
+ * @hwmgr:   the address of the powerplay hardware manager.
+ * @bitmap:  bitmap for the features to enabled.
+ * return:  0 on at least one DPM is successfully enabled.
  */
 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
 {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
index f752b4ad0c8ae76700996411ed78322444591459..07c06f8c90b093ed58577db19fb7c5537a9f75ab 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
@@ -442,5 +442,6 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
 
 #endif /* _VEGA10_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index dc206fa88c5e539b7beb508b5240f77165fe5342..c0753029a8e2a9f6b1039ab906a2d354b047a463 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -718,12 +718,11 @@ static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
 #endif
 
 /**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput  the pointer to input data (PowerState)
-* @return   always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr:  the address of the powerplay hardware manager.
+ * return:  always 0
+ */
 static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
 {
 	int result;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index da84012b7fd5167c5883fb57ad164883c9b0f639..87811b005b85f314c3c9491533a6867fc3123791 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -771,12 +771,11 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 }
 
 /**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput  the pointer to input data (PowerState)
-* @return   always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr:  the address of the powerplay hardware manager.
+ * return:  always 0
+ */
 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
 {
 	int result;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e84c737e39673b35b429f85ea7076d320ebc1597..d143ef1b460bf6f887d0aaf49c97150f8a604445 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -266,6 +266,119 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
 	return ret;
 }
 
+/**
+ * smu_set_user_clk_dependencies - set user profile clock dependencies
+ *
+ * @smu:	smu_context pointer
+ * @clk:	enum smu_clk_type type
+ *
+ * Enable/Disable the clock dependency for the @clk type.
+ */
+static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
+{
+	if (smu->adev->in_suspend)
+		return;
+
+	/*
+	 * mclk, fclk and socclk are interdependent
+	 * on each other
+	 */
+	if (clk == SMU_MCLK) {
+		/* reset clock dependency */
+		smu->user_dpm_profile.clk_dependency = 0;
+		/* set mclk dependent clocks(fclk and socclk) */
+		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
+	} else if (clk == SMU_FCLK) {
+		/* give priority to mclk, if mclk dependent clocks are set */
+		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+			return;
+
+		/* reset clock dependency */
+		smu->user_dpm_profile.clk_dependency = 0;
+		/* set fclk dependent clocks(mclk and socclk) */
+		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
+	} else if (clk == SMU_SOCCLK) {
+		/* give priority to mclk, if mclk dependent clocks are set */
+		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+			return;
+
+		/* reset clock dependency */
+		smu->user_dpm_profile.clk_dependency = 0;
+		/* set socclk dependent clocks(mclk and fclk) */
+		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
+	} else
+		/* add clk dependencies here, if any */
+		return;
+}
+
+/**
+ * smu_restore_dpm_user_profile - reinstate user dpm profile
+ *
+ * @smu:	smu_context pointer
+ *
+ * Restore the saved user power configurations include power limit,
+ * clock frequencies, fan control mode and fan speed.
+ */
+static void smu_restore_dpm_user_profile(struct smu_context *smu)
+{
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+	int ret = 0;
+
+	if (!smu->adev->in_suspend)
+		return;
+
+	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+		return;
+
+	/* Enable restore flag */
+	smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
+
+	/* set the user dpm power limit */
+	if (smu->user_dpm_profile.power_limit) {
+		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+		if (ret)
+			dev_err(smu->adev->dev, "Failed to set power limit value\n");
+	}
+
+	/* set the user dpm clock configurations */
+	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+		enum smu_clk_type clk_type;
+
+		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
+			/*
+			 * Iterate over smu clk type and force the saved user clk
+			 * configs, skip if clock dependency is enabled
+			 */
+			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
+					smu->user_dpm_profile.clk_mask[clk_type]) {
+				ret = smu_force_clk_levels(smu, clk_type,
+						smu->user_dpm_profile.clk_mask[clk_type]);
+				if (ret)
+					dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
+							clk_type);
+			}
+		}
+	}
+
+	/* set the user dpm fan configurations */
+	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
+		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
+		if (ret) {
+			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
+			return;
+		}
+
+		if (!ret && smu->user_dpm_profile.fan_speed_percent) {
+			ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
+			if (ret)
+				dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
+		}
+	}
+
+	/* Disable restore flag */
+	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
+}
+
 int smu_get_power_num_states(struct smu_context *smu,
 			     struct pp_states_info *state_info)
 {
@@ -288,6 +401,20 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
 	return false;
 }
 
+bool is_support_cclk_dpm(struct amdgpu_device *adev)
+{
+	struct smu_context *smu = &adev->smu;
+
+	if (!is_support_sw_smu(adev))
+		return false;
+
+	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
+		return false;
+
+	return true;
+}
+
+
 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
@@ -405,8 +532,6 @@ static int smu_set_funcs(struct amdgpu_device *adev)
 		break;
 	case CHIP_VANGOGH:
 		vangogh_set_ppt_funcs(smu);
-		/* enable the OD by default to allow the fine grain tuning function */
-		smu->od_enabled = true;
 		break;
 	default:
 		return -EINVAL;
@@ -478,9 +603,6 @@ static int smu_late_init(void *handle)
 
 	smu_set_fine_grain_gfx_freq_parameters(smu);
 
-	if (adev->asic_type == CHIP_VANGOGH)
-		return 0;
-
 	if (!smu->pm_enabled)
 		return 0;
 
@@ -517,6 +639,8 @@ static int smu_late_init(void *handle)
 			AMD_PP_TASK_COMPLETE_INIT,
 			false);
 
+	smu_restore_dpm_user_profile(smu);
+
 	return 0;
 }
 
@@ -1610,6 +1734,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
 
 	mutex_unlock(&smu->mutex);
 
+	/* reset user dpm clock state */
+	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
+		smu->user_dpm_profile.clk_dependency = 0;
+	}
+
 	return ret;
 }
 
@@ -1644,8 +1774,13 @@ int smu_force_clk_levels(struct smu_context *smu,
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
+			smu->user_dpm_profile.clk_mask[clk_type] = mask;
+			smu_set_user_clk_dependencies(smu, clk_type);
+		}
+	}
 
 	mutex_unlock(&smu->mutex);
 
@@ -1887,6 +2022,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 
 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
 {
+	u32 percent;
 	int ret = 0;
 
 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1894,8 +2030,12 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_fan_speed_rpm)
-		ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+	if (smu->ppt_funcs->set_fan_speed_percent) {
+		percent = speed * 100 / smu->fan_max_rpm;
+		ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
+		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+			smu->user_dpm_profile.fan_speed_percent = percent;
+	}
 
 	mutex_unlock(&smu->mutex);
 
@@ -1904,22 +2044,40 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
 
 int smu_get_power_limit(struct smu_context *smu,
 			uint32_t *limit,
-			bool max_setting)
+			enum smu_ppt_limit_level limit_level)
 {
+	uint32_t limit_type = *limit >> 24;
+	int ret = 0;
+
 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&smu->mutex);
 
-	*limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
+	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
+		if (smu->ppt_funcs->get_ppt_limit)
+			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
+	} else {
+		switch (limit_level) {
+		case SMU_PPT_LIMIT_CURRENT:
+			*limit = smu->current_power_limit;
+			break;
+		case SMU_PPT_LIMIT_MAX:
+			*limit = smu->max_power_limit;
+			break;
+		default:
+			break;
+		}
+	}
 
 	mutex_unlock(&smu->mutex);
 
-	return 0;
+	return ret;
 }
 
 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 {
+	uint32_t limit_type = limit >> 24;
 	int ret = 0;
 
 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1927,6 +2085,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 
 	mutex_lock(&smu->mutex);
 
+	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+		if (smu->ppt_funcs->set_power_limit) {
+			ret = smu->ppt_funcs->set_power_limit(smu, limit);
+			goto out;
+		}
+
 	if (limit > smu->max_power_limit) {
 		dev_err(smu->adev->dev,
 			"New power limit (%d) is over the max allowed %d\n",
@@ -1937,8 +2101,11 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 	if (!limit)
 		limit = smu->current_power_limit;
 
-	if (smu->ppt_funcs->set_power_limit)
+	if (smu->ppt_funcs->set_power_limit) {
 		ret = smu->ppt_funcs->set_power_limit(smu, limit);
+		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+			smu->user_dpm_profile.power_limit = limit;
+	}
 
 out:
 	mutex_unlock(&smu->mutex);
@@ -2115,11 +2282,19 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_fan_control_mode)
+	if (smu->ppt_funcs->set_fan_control_mode) {
 		ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+			smu->user_dpm_profile.fan_mode = value;
+	}
 
 	mutex_unlock(&smu->mutex);
 
+	/* reset user dpm fan speed */
+	if (!ret && value != AMD_FAN_CTRL_MANUAL &&
+			smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+		smu->user_dpm_profile.fan_speed_percent = 0;
+
 	return ret;
 }
 
@@ -2127,17 +2302,15 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 {
 	int ret = 0;
 	uint32_t percent;
-	uint32_t current_rpm;
 
 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_fan_speed_rpm) {
-		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, &current_rpm);
+	if (smu->ppt_funcs->get_fan_speed_percent) {
+		ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
 		if (!ret) {
-			percent = current_rpm * 100 / smu->fan_max_rpm;
 			*speed = percent > 100 ? 100 : percent;
 		}
 	}
@@ -2157,8 +2330,13 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_fan_speed_percent)
+	if (smu->ppt_funcs->set_fan_speed_percent) {
+		if (speed > 100)
+			speed = 100;
 		ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+			smu->user_dpm_profile.fan_speed_percent = speed;
+	}
 
 	mutex_unlock(&smu->mutex);
 
@@ -2168,14 +2346,17 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
 {
 	int ret = 0;
+	u32 percent;
 
 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_fan_speed_rpm)
-		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+	if (smu->ppt_funcs->get_fan_speed_percent) {
+		ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
+		*speed = percent * smu->fan_max_rpm / 100;
+	}
 
 	mutex_unlock(&smu->mutex);
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 16db0b506b0de0a6ee3be4a80f385e861169e8c5..45564a776e9b2228d48ba299c1123a8ef343ae1c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1080,15 +1080,27 @@ static int arcturus_read_sensor(struct smu_context *smu,
 	return ret;
 }
 
-static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
-				      uint32_t *speed)
+static int arcturus_get_fan_speed_percent(struct smu_context *smu,
+					  uint32_t *speed)
 {
+	int ret;
+	u32 rpm;
+
 	if (!speed)
 		return -EINVAL;
 
-	return arcturus_get_smu_metrics_data(smu,
-					     METRICS_CURR_FANSPEED,
-					     speed);
+	switch (smu_v11_0_get_fan_control_mode(smu)) {
+	case AMD_FAN_CTRL_AUTO:
+		ret = arcturus_get_smu_metrics_data(smu,
+						    METRICS_CURR_FANSPEED,
+						    &rpm);
+		if (!ret && smu->fan_max_rpm)
+			*speed = rpm * 100 / smu->fan_max_rpm;
+		return ret;
+	default:
+		*speed = smu->user_dpm_profile.fan_speed_percent;
+		return 0;
+	}
 }
 
 static int arcturus_get_fan_parameters(struct smu_context *smu)
@@ -2227,7 +2239,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
 	if (ret)
 		return ret;
 
-	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2264,6 +2276,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
 	gpu_metrics->pcie_link_speed =
 			arcturus_get_current_pcie_link_speed(smu);
 
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_0);
@@ -2281,7 +2295,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.print_clk_levels = arcturus_print_clk_levels,
 	.force_clk_levels = arcturus_force_clk_levels,
 	.read_sensor = arcturus_read_sensor,
-	.get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
+	.get_fan_speed_percent = arcturus_get_fan_speed_percent,
 	.get_power_profile_mode = arcturus_get_power_profile_mode,
 	.set_power_profile_mode = arcturus_set_power_profile_mode,
 	.set_performance_level = arcturus_set_performance_level,
@@ -2327,7 +2341,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
 	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
-	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
 	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
 	.gfx_off_control = smu_v11_0_gfx_off_control,
 	.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index cd7efa923195e3fa2a94414adf096fbfc1eccd0b..6e641f1513d80b0b48a2fce00dfb9485c79ae5a7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1317,15 +1317,27 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
 	return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
-static int navi10_get_fan_speed_rpm(struct smu_context *smu,
-				    uint32_t *speed)
+static int navi10_get_fan_speed_percent(struct smu_context *smu,
+					uint32_t *speed)
 {
+	int ret;
+	u32 rpm;
+
 	if (!speed)
 		return -EINVAL;
 
-	return navi10_get_smu_metrics_data(smu,
-					   METRICS_CURR_FANSPEED,
-					   speed);
+	switch (smu_v11_0_get_fan_control_mode(smu)) {
+	case AMD_FAN_CTRL_AUTO:
+		ret = navi10_get_smu_metrics_data(smu,
+						  METRICS_CURR_FANSPEED,
+						  &rpm);
+		if (!ret && smu->fan_max_rpm)
+			*speed = rpm * 100 / smu->fan_max_rpm;
+		return ret;
+	default:
+		*speed = smu->user_dpm_profile.fan_speed_percent;
+		return 0;
+	}
 }
 
 static int navi10_get_fan_parameters(struct smu_context *smu)
@@ -1673,7 +1685,7 @@ static int navi10_read_sensor(struct smu_context *smu,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_GFX_SCLK:
-		ret = navi10_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
+		ret = navi10_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
 		*(uint32_t *)data *= 100;
 		*size = 4;
 		break;
@@ -2302,7 +2314,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2342,6 +2354,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 	gpu_metrics->pcie_link_speed =
 			smu_v11_0_get_current_pcie_link_speed(smu);
 
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_0);
@@ -2413,7 +2427,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.display_config_changed = navi10_display_config_changed,
 	.notify_smc_display_config = navi10_notify_smc_display_config,
 	.is_dpm_running = navi10_is_dpm_running,
-	.get_fan_speed_rpm = navi10_get_fan_speed_rpm,
+	.get_fan_speed_percent = navi10_get_fan_speed_percent,
 	.get_power_profile_mode = navi10_get_power_profile_mode,
 	.set_power_profile_mode = navi10_set_power_profile_mode,
 	.set_watermarks_table = navi10_set_watermarks_table,
@@ -2457,7 +2471,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
 	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
-	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
 	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
 	.gfx_off_control = smu_v11_0_gfx_off_control,
 	.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d68d3dfee51d47c0c92c364c4495c332c7e37439..af73e1430af535beefff194d4140c8a3af4ca042 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -261,6 +261,11 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_GPO_BIT);
 	}
 
+	if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
+	    (adev->asic_type > CHIP_SIENNA_CICHLID) &&
+	    !(adev->flags & AMD_IS_APU))
+		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
+
 	if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
 					| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
@@ -294,6 +299,12 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
 	    smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_PG_BIT);
 
+	if (smu->dc_controlled_by_gpio)
+       *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
+	if (amdgpu_aspm == 1)
+		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+
 	return 0;
 }
 
@@ -314,6 +325,12 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
 	table_context->thermal_controller_type =
 		powerplay_table->thermal_controller_type;
 
+	/*
+	 * Instead of having its own buffer space and get overdrive_table copied,
+	 * smu->od_settings just points to the actual overdrive_table
+	 */
+	smu->od_settings = &powerplay_table->overdrive_table;
+
 	return 0;
 }
 
@@ -907,6 +924,22 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
 	return dpm_desc->SnapToDiscrete == 0 ? true : false;
 }
 
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+						   enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+	return od_table->cap[cap];
+}
+
+static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
+						enum SMU_11_0_7_ODSETTING_ID setting,
+						uint32_t *min, uint32_t *max)
+{
+	if (min)
+		*min = od_table->min[setting];
+	if (max)
+		*max = od_table->max[setting];
+}
+
 static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 			enum smu_clk_type clk_type, char *buf)
 {
@@ -915,11 +948,16 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
 	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+	struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+	OverDriveTable_t *od_table =
+		(OverDriveTable_t *)table_context->overdrive_table;
 	int i, size = 0, ret = 0;
 	uint32_t cur_value = 0, value = 0, count = 0;
 	uint32_t freq_values[3] = {0};
 	uint32_t mark_index = 0;
 	uint32_t gen_speed, lane_width;
+	uint32_t min_value, max_value;
+	uint32_t smu_version;
 
 	switch (clk_type) {
 	case SMU_GFXCLK:
@@ -995,6 +1033,70 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 					(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
 					"*" : "");
 		break;
+	case SMU_OD_SCLK:
+		if (!smu->od_enabled || !od_table || !od_settings)
+			break;
+
+		if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS))
+			break;
+
+		size += sprintf(buf + size, "OD_SCLK:\n");
+		size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+		break;
+
+	case SMU_OD_MCLK:
+		if (!smu->od_enabled || !od_table || !od_settings)
+			break;
+
+		if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS))
+			break;
+
+		size += sprintf(buf + size, "OD_MCLK:\n");
+		size += sprintf(buf + size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
+		break;
+
+	case SMU_OD_VDDGFX_OFFSET:
+		if (!smu->od_enabled || !od_table || !od_settings)
+			break;
+
+		/*
+		 * OD GFX Voltage Offset functionality is supported only by 58.41.0
+		 * and onwards SMU firmwares.
+		 */
+		smu_cmn_get_smc_version(smu, NULL, &smu_version);
+		if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+		     (smu_version < 0x003a2900))
+			break;
+
+		size += sprintf(buf + size, "OD_VDDGFX_OFFSET:\n");
+		size += sprintf(buf + size, "%dmV\n", od_table->VddGfxOffset);
+		break;
+
+	case SMU_OD_RANGE:
+		if (!smu->od_enabled || !od_table || !od_settings)
+			break;
+
+		size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+		if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+			sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
+							    &min_value, NULL);
+			sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMAX,
+							    NULL, &max_value);
+			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+					min_value, max_value);
+		}
+
+		if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+			sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMIN,
+							    &min_value, NULL);
+			sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMAX,
+							    NULL, &max_value);
+			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+					min_value, max_value);
+		}
+		break;
+
 	default:
 		break;
 	}
@@ -1146,15 +1248,27 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
 	return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
-static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
-				    uint32_t *speed)
+static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu,
+						uint32_t *speed)
 {
+	int ret;
+	u32 rpm;
+
 	if (!speed)
 		return -EINVAL;
 
-	return sienna_cichlid_get_smu_metrics_data(smu,
-						METRICS_CURR_FANSPEED,
-						speed);
+	switch (smu_v11_0_get_fan_control_mode(smu)) {
+	case AMD_FAN_CTRL_AUTO:
+		ret = sienna_cichlid_get_smu_metrics_data(smu,
+							  METRICS_CURR_FANSPEED,
+							  &rpm);
+		if (!ret && smu->fan_max_rpm)
+			*speed = rpm * 100 / smu->fan_max_rpm;
+		return ret;
+	default:
+		*speed = smu->user_dpm_profile.fan_speed_percent;
+		return 0;
+	}
 }
 
 static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
@@ -1694,6 +1808,243 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
 	return ret;
 }
 
+static void sienna_cichlid_dump_od_table(struct smu_context *smu,
+					 OverDriveTable_t *od_table)
+{
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t smu_version;
+
+	dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin,
+							  od_table->GfxclkFmax);
+	dev_dbg(smu->adev->dev, "OD: Uclk: (%d, %d)\n", od_table->UclkFmin,
+							od_table->UclkFmax);
+
+	smu_cmn_get_smc_version(smu, NULL, &smu_version);
+	if (!((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+	       (smu_version < 0x003a2900)))
+		dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
+}
+
+static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+{
+	OverDriveTable_t *od_table =
+		(OverDriveTable_t *)smu->smu_table.overdrive_table;
+	OverDriveTable_t *boot_od_table =
+		(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+	int ret = 0;
+
+	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+				   0, (void *)od_table, false);
+	if (ret) {
+		dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+		return ret;
+	}
+
+	memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
+
+	sienna_cichlid_dump_od_table(smu, od_table);
+
+	return 0;
+}
+
+static int sienna_cichlid_od_setting_check_range(struct smu_context *smu,
+						 struct smu_11_0_7_overdrive_table *od_table,
+						 enum SMU_11_0_7_ODSETTING_ID setting,
+						 uint32_t value)
+{
+	if (value < od_table->min[setting]) {
+		dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n",
+					  setting, value, od_table->min[setting]);
+		return -EINVAL;
+	}
+	if (value > od_table->max[setting]) {
+		dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n",
+					  setting, value, od_table->max[setting]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
+					    enum PP_OD_DPM_TABLE_COMMAND type,
+					    long input[], uint32_t size)
+{
+	struct smu_table_context *table_context = &smu->smu_table;
+	OverDriveTable_t *od_table =
+		(OverDriveTable_t *)table_context->overdrive_table;
+	struct smu_11_0_7_overdrive_table *od_settings =
+		(struct smu_11_0_7_overdrive_table *)smu->od_settings;
+	struct amdgpu_device *adev = smu->adev;
+	enum SMU_11_0_7_ODSETTING_ID freq_setting;
+	uint16_t *freq_ptr;
+	int i, ret = 0;
+	uint32_t smu_version;
+
+	if (!smu->od_enabled) {
+		dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
+		return -EINVAL;
+	}
+
+	if (!smu->od_settings) {
+		dev_err(smu->adev->dev, "OD board limits are not set!\n");
+		return -ENOENT;
+	}
+
+	if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+		dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case PP_OD_EDIT_SCLK_VDDC_TABLE:
+		if (!sienna_cichlid_is_od_feature_supported(od_settings,
+							    SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+			dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
+			return -ENOTSUPP;
+		}
+
+		for (i = 0; i < size; i += 2) {
+			if (i + 2 > size) {
+				dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+				return -EINVAL;
+			}
+
+			switch (input[i]) {
+			case 0:
+				if (input[i + 1] > od_table->GfxclkFmax) {
+					dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+						input[i + 1], od_table->GfxclkFmax);
+					return -EINVAL;
+				}
+
+				freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMIN;
+				freq_ptr = &od_table->GfxclkFmin;
+				break;
+
+			case 1:
+				if (input[i + 1] < od_table->GfxclkFmin) {
+					dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+						input[i + 1], od_table->GfxclkFmin);
+					return -EINVAL;
+				}
+
+				freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMAX;
+				freq_ptr = &od_table->GfxclkFmax;
+				break;
+
+			default:
+				dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+				dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+				return -EINVAL;
+			}
+
+			ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+								    freq_setting, input[i + 1]);
+			if (ret)
+				return ret;
+
+			*freq_ptr = (uint16_t)input[i + 1];
+		}
+		break;
+
+	case PP_OD_EDIT_MCLK_VDDC_TABLE:
+		if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+			dev_warn(smu->adev->dev, "UCLK_LIMITS not supported!\n");
+			return -ENOTSUPP;
+		}
+
+		for (i = 0; i < size; i += 2) {
+			if (i + 2 > size) {
+				dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+				return -EINVAL;
+			}
+
+			switch (input[i]) {
+			case 0:
+				if (input[i + 1] > od_table->UclkFmax) {
+					dev_info(smu->adev->dev, "UclkFmin (%ld) must be <= UclkFmax (%u)!\n",
+						input[i + 1], od_table->UclkFmax);
+					return -EINVAL;
+				}
+
+				freq_setting = SMU_11_0_7_ODSETTING_UCLKFMIN;
+				freq_ptr = &od_table->UclkFmin;
+				break;
+
+			case 1:
+				if (input[i + 1] < od_table->UclkFmin) {
+					dev_info(smu->adev->dev, "UclkFmax (%ld) must be >= UclkFmin (%u)!\n",
+						input[i + 1], od_table->UclkFmin);
+					return -EINVAL;
+				}
+
+				freq_setting = SMU_11_0_7_ODSETTING_UCLKFMAX;
+				freq_ptr = &od_table->UclkFmax;
+				break;
+
+			default:
+				dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+				dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+				return -EINVAL;
+			}
+
+			ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+								    freq_setting, input[i + 1]);
+			if (ret)
+				return ret;
+
+			*freq_ptr = (uint16_t)input[i + 1];
+		}
+		break;
+
+	case PP_OD_RESTORE_DEFAULT_TABLE:
+		memcpy(table_context->overdrive_table,
+				table_context->boot_overdrive_table,
+				sizeof(OverDriveTable_t));
+		fallthrough;
+
+	case PP_OD_COMMIT_DPM_TABLE:
+		sienna_cichlid_dump_od_table(smu, od_table);
+
+		ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+					   0, (void *)od_table, true);
+		if (ret) {
+			dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
+			return ret;
+		}
+		break;
+
+	case PP_OD_EDIT_VDDGFX_OFFSET:
+		if (size != 1) {
+			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
+			return -EINVAL;
+		}
+
+		/*
+		 * OD GFX Voltage Offset functionality is supported only by 58.41.0
+		 * and onwards SMU firmwares.
+		 */
+		smu_cmn_get_smc_version(smu, NULL, &smu_version);
+		if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+		     (smu_version < 0x003a2900)) {
+			dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
+						"only by 58.41.0 and onwards SMU firmwares!\n");
+			return -EOPNOTSUPP;
+		}
+
+		od_table->VddGfxOffset = (int16_t)input[0];
+
+		sienna_cichlid_dump_od_table(smu, od_table);
+		break;
+
+	default:
+		return -ENOSYS;
+	}
+
+	return ret;
+}
+
 static int sienna_cichlid_run_btc(struct smu_context *smu)
 {
 	return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
@@ -2610,7 +2961,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 	if (ret)
 		return ret;
 
-	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
 
 	gpu_metrics->temperature_edge = metrics->TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
@@ -2653,6 +3004,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 	gpu_metrics->pcie_link_speed =
 			smu_v11_0_get_current_pcie_link_speed(smu);
 
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_0);
@@ -2759,7 +3112,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
 	.display_config_changed = sienna_cichlid_display_config_changed,
 	.notify_smc_display_config = sienna_cichlid_notify_smc_display_config,
 	.is_dpm_running = sienna_cichlid_is_dpm_running,
-	.get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm,
+	.get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent,
 	.get_power_profile_mode = sienna_cichlid_get_power_profile_mode,
 	.set_power_profile_mode = sienna_cichlid_set_power_profile_mode,
 	.set_watermarks_table = sienna_cichlid_set_watermarks_table,
@@ -2803,7 +3156,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
 	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
-	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
 	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
 	.gfx_off_control = smu_v11_0_gfx_off_control,
 	.register_irq_handler = smu_v11_0_register_irq_handler,
@@ -2818,6 +3170,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
 	.mode1_reset = smu_v11_0_mode1_reset,
 	.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+	.set_default_od_settings = sienna_cichlid_set_default_od_settings,
+	.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
 	.run_btc = sienna_cichlid_run_btc,
 	.set_power_source = smu_v11_0_set_power_source,
 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 5aeb5f5a04478a04f75dc94bfcee685a672c3ee5..90585461a56e9c17cf6ccb873bdfcb06868be694 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -474,12 +474,14 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 int smu_v11_0_init_power(struct smu_context *smu)
 {
 	struct smu_power_context *smu_power = &smu->smu_power;
+	size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
+			sizeof(struct smu_11_5_power_context) :
+			sizeof(struct smu_11_0_power_context);
 
-	smu_power->power_context = kzalloc(sizeof(struct smu_11_0_power_context),
-					   GFP_KERNEL);
+	smu_power->power_context = kzalloc(size, GFP_KERNEL);
 	if (!smu_power->power_context)
 		return -ENOMEM;
-	smu_power->power_context_size = sizeof(struct smu_11_0_power_context);
+	smu_power->power_context_size = size;
 
 	return 0;
 }
@@ -1119,6 +1121,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
 	case CHIP_SIENNA_CICHLID:
 	case CHIP_NAVY_FLOUNDER:
 	case CHIP_DIMGREY_CAVEFISH:
+	case CHIP_VANGOGH:
 		if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
 			return 0;
 		if (enable)
@@ -1136,10 +1139,10 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
 uint32_t
 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
 {
-	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
-		return AMD_FAN_CTRL_MANUAL;
-	else
+	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
 		return AMD_FAN_CTRL_AUTO;
+	else
+		return smu->user_dpm_profile.fan_mode;
 }
 
 static int
@@ -1230,58 +1233,6 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
 	return ret;
 }
 
-int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
-				       uint32_t speed)
-{
-	struct amdgpu_device *adev = smu->adev;
-	int ret;
-	uint32_t tach_period, crystal_clock_freq;
-
-	if (!speed)
-		return -EINVAL;
-
-	ret = smu_v11_0_auto_fan_control(smu, 0);
-	if (ret)
-		return ret;
-
-	/*
-	 * crystal_clock_freq div by 4 is required since the fan control
-	 * module refers to 25MHz
-	 */
-
-	crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
-	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-	WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
-		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
-				   CG_TACH_CTRL, TARGET_PERIOD,
-				   tach_period));
-
-	ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-
-	return ret;
-}
-
-int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
-				uint32_t *speed)
-{
-	struct amdgpu_device *adev = smu->adev;
-	uint32_t tach_period, crystal_clock_freq;
-	uint64_t tmp64;
-
-	tach_period = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
-				    CG_TACH_CTRL, TARGET_PERIOD);
-	if (!tach_period)
-		return -EINVAL;
-
-	crystal_clock_freq = amdgpu_asic_get_xclk(adev);
-
-	tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
-	do_div(tmp64, (tach_period * 8));
-	*speed = (uint32_t)tmp64;
-
-	return 0;
-}
-
 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
 				     uint32_t pstate)
 {
@@ -2072,30 +2023,6 @@ int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
 	return link_speed[speed_level];
 }
 
-void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
-{
-	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
-
-	gpu_metrics->common_header.structure_size =
-				sizeof(struct gpu_metrics_v1_0);
-	gpu_metrics->common_header.format_revision = 1;
-	gpu_metrics->common_header.content_revision = 0;
-
-	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
-
-void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
-{
-	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
-
-	gpu_metrics->common_header.structure_size =
-				sizeof(struct gpu_metrics_v2_0);
-	gpu_metrics->common_header.format_revision = 2;
-	gpu_metrics->common_header.content_revision = 0;
-
-	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
-
 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
 			      bool enablement)
 {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 92ad2cdbae107c18aef29ac906e3ea4a14e08ba4..093b01159408d7661d04606664b8189bad017dca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -31,6 +31,10 @@
 #include "smu_v11_5_ppsmc.h"
 #include "smu_v11_5_pmfw.h"
 #include "smu_cmn.h"
+#include "soc15_common.h"
+#include "asic_reg/gc/gc_10_3_0_offset.h"
+#include "asic_reg/gc/gc_10_3_0_sh_mask.h"
+#include <asm/processor.h>
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
@@ -59,7 +63,8 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,		0),
 	MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,	0),
 	MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,			0),
-	MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisableGfxOff,		0),
+	MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,          0),
+	MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,		0),
 	MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile,	0),
 	MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile,		0),
 	MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,			0),
@@ -76,7 +81,6 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,	0),
 	MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,		0),
 	MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,	0),
-	MSG_MAP(Spare1,                         PPSMC_MSG_spare1,					0),
 	MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,	0),
 	MSG_MAP(SetSoftMinFclk,                 PPSMC_MSG_SetSoftMinFclk,		0),
 	MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,		0),
@@ -88,7 +92,6 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,	0),
 	MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,		0),
 	MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,			0),
-	MSG_MAP(Spare2,                         PPSMC_MSG_spare2,					0),
 	MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,	0),
 	MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,			0),
 	MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,				0),
@@ -118,6 +121,11 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(StopDramLogging,                    PPSMC_MSG_StopDramLogging,						0),
 	MSG_MAP(SetSoftMinCclk,                     PPSMC_MSG_SetSoftMinCclk,						0),
 	MSG_MAP(SetSoftMaxCclk,                     PPSMC_MSG_SetSoftMaxCclk,						0),
+	MSG_MAP(RequestActiveWgp,                   PPSMC_MSG_RequestActiveWgp,                     0),
+	MSG_MAP(SetFastPPTLimit,                    PPSMC_MSG_SetFastPPTLimit,						0),
+	MSG_MAP(SetSlowPPTLimit,                    PPSMC_MSG_SetSlowPPTLimit,						0),
+	MSG_MAP(GetFastPPTLimit,                    PPSMC_MSG_GetFastPPTLimit,						0),
+	MSG_MAP(GetSlowPPTLimit,                    PPSMC_MSG_GetSlowPPTLimit,						0),
 };
 
 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -162,6 +170,9 @@ static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
 	FEA_MAP(A55_DPM),
 	FEA_MAP(CVIP_DSP_DPM),
 	FEA_MAP(MSMU_LOW_POWER),
+	FEA_MAP_REVERSE(SOCCLK),
+	FEA_MAP_REVERSE(FCLK),
+	FEA_MAP_HALF_REVERSE(GFX),
 };
 
 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
@@ -171,6 +182,14 @@ static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
 	TAB_MAP_VALID(DPMCLOCKS),
 };
 
+static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
+	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
+	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
+	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
+};
+
 static int vangogh_tables_init(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
@@ -242,6 +261,12 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
 	case METRICS_AVERAGE_SOCCLK:
 		*value = metrics->SocclkFrequency;
 		break;
+	case METRICS_AVERAGE_VCLK:
+		*value = metrics->VclkFrequency;
+		break;
+	case METRICS_AVERAGE_DCLK:
+		*value = metrics->DclkFrequency;
+		break;
 	case METRICS_AVERAGE_UCLK:
 		*value = metrics->MemclkFrequency;
 		break;
@@ -272,6 +297,10 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
 	case METRICS_VOLTAGE_VDDSOC:
 		*value = metrics->Voltage[1];
 		break;
+	case METRICS_AVERAGE_CPUCLK:
+		memcpy(value, &metrics->CoreFrequency[0],
+		       smu->cpu_core_num * sizeof(uint16_t));
+		break;
 	default:
 		*value = UINT_MAX;
 		break;
@@ -308,6 +337,13 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
 	if (ret)
 		return ret;
 
+#ifdef CONFIG_X86
+	/* AMD x86 APU only */
+	smu->cpu_core_num = boot_cpu_data.x86_max_cores;
+#else
+	smu->cpu_core_num = 4;
+#endif
+
 	return smu_v11_0_init_smc_tables(smu);
 }
 
@@ -317,17 +353,13 @@ static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
 	if (enable) {
 		/* vcn dpm on is a prerequisite for vcn power gate messages */
-		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
-			if (ret)
-				return ret;
-		}
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+		if (ret)
+			return ret;
 	} else {
-		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
-			if (ret)
-				return ret;
-		}
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
+		if (ret)
+			return ret;
 	}
 
 	return ret;
@@ -338,54 +370,18 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 	int ret = 0;
 
 	if (enable) {
-		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
-			if (ret)
-				return ret;
-		}
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+		if (ret)
+			return ret;
 	} else {
-		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
-			if (ret)
-				return ret;
-		}
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+		if (ret)
+			return ret;
 	}
 
 	return ret;
 }
 
-static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
-					    uint32_t *feature_mask,
-					    uint32_t num)
-{
-	struct amdgpu_device *adev = smu->adev;
-
-	if (num > 2)
-		return -EINVAL;
-
-	memset(feature_mask, 0, sizeof(uint32_t) * num);
-
-	*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT)
-				| FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)
-				| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
-				| FEATURE_MASK(FEATURE_PPT_BIT)
-				| FEATURE_MASK(FEATURE_TDC_BIT)
-				| FEATURE_MASK(FEATURE_FAN_CONTROLLER_BIT)
-				| FEATURE_MASK(FEATURE_DS_LCLK_BIT)
-				| FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
-	if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
-		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT);
-
-	if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
-		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT);
-
-	if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
-		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
-
-	return 0;
-}
-
 static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
 	int ret = 0;
@@ -403,14 +399,68 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
 	return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
+static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+						uint32_t dpm_level, uint32_t *freq)
+{
+	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+	if (!clk_table || clk_type >= SMU_CLK_COUNT)
+		return -EINVAL;
+
+	switch (clk_type) {
+	case SMU_SOCCLK:
+		if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+			return -EINVAL;
+		*freq = clk_table->SocClocks[dpm_level];
+		break;
+	case SMU_VCLK:
+		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+			return -EINVAL;
+		*freq = clk_table->VcnClocks[dpm_level].vclk;
+		break;
+	case SMU_DCLK:
+		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+			return -EINVAL;
+		*freq = clk_table->VcnClocks[dpm_level].dclk;
+		break;
+	case SMU_UCLK:
+	case SMU_MCLK:
+		if (dpm_level >= clk_table->NumDfPstatesEnabled)
+			return -EINVAL;
+		*freq = clk_table->DfPstateTable[dpm_level].memclk;
+
+		break;
+	case SMU_FCLK:
+		if (dpm_level >= clk_table->NumDfPstatesEnabled)
+			return -EINVAL;
+		*freq = clk_table->DfPstateTable[dpm_level].fclk;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int vangogh_print_fine_grain_clk(struct smu_context *smu,
 			enum smu_clk_type clk_type, char *buf)
 {
-	int size = 0;
+	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+	SmuMetrics_t metrics;
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+	int i, size = 0, ret = 0;
+	uint32_t cur_value = 0, value = 0, count = 0;
+	bool cur_value_match_level = false;
+
+	memset(&metrics, 0, sizeof(metrics));
+
+	ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+	if (ret)
+		return ret;
 
 	switch (clk_type) {
 	case SMU_OD_SCLK:
-		if (smu->od_enabled) {
+		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
 			size = sprintf(buf, "%s:\n", "OD_SCLK");
 			size += sprintf(buf + size, "0: %10uMhz\n",
 			(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
@@ -418,12 +468,71 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
 			(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
 		}
 		break;
+	case SMU_OD_CCLK:
+		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+			size = sprintf(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+			size += sprintf(buf + size, "0: %10uMhz\n",
+			(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+			size += sprintf(buf + size, "1: %10uMhz\n",
+			(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+		}
+		break;
 	case SMU_OD_RANGE:
-		if (smu->od_enabled) {
+		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
 			size = sprintf(buf, "%s:\n", "OD_RANGE");
 			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
 				smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+			size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+				smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+		}
+		break;
+	case SMU_SOCCLK:
+		/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+		count = clk_table->NumSocClkLevelsEnabled;
+		cur_value = metrics.SocclkFrequency;
+		break;
+	case SMU_VCLK:
+		count = clk_table->VcnClkLevelsEnabled;
+		cur_value = metrics.VclkFrequency;
+		break;
+	case SMU_DCLK:
+		count = clk_table->VcnClkLevelsEnabled;
+		cur_value = metrics.DclkFrequency;
+		break;
+	case SMU_MCLK:
+		count = clk_table->NumDfPstatesEnabled;
+		cur_value = metrics.MemclkFrequency;
+		break;
+	case SMU_FCLK:
+		count = clk_table->NumDfPstatesEnabled;
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+		if (ret)
+			return ret;
+		break;
+	default:
+		break;
+	}
+
+	switch (clk_type) {
+	case SMU_SOCCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
+	case SMU_MCLK:
+	case SMU_FCLK:
+		for (i = 0; i < count; i++) {
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+			if (ret)
+				return ret;
+			if (!value)
+				continue;
+			size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+					cur_value == value ? "*" : "");
+			if (cur_value == value)
+				cur_value_match_level = true;
 		}
+
+		if (!cur_value_match_level)
+			size += sprintf(buf + size, "   %uMhz *\n", cur_value);
 		break;
 	default:
 		break;
@@ -432,6 +541,726 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
 	return size;
 }
 
+static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
+					 enum amd_dpm_forced_level level,
+					 uint32_t *vclk_mask,
+					 uint32_t *dclk_mask,
+					 uint32_t *mclk_mask,
+					 uint32_t *fclk_mask,
+					 uint32_t *soc_mask)
+{
+	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+		if (mclk_mask)
+			*mclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+		if (fclk_mask)
+			*fclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+		if (soc_mask)
+			*soc_mask = 0;
+	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+		if (mclk_mask)
+			*mclk_mask = 0;
+
+		if (fclk_mask)
+			*fclk_mask = 0;
+
+		if (soc_mask)
+			*soc_mask = 1;
+
+		if (vclk_mask)
+			*vclk_mask = 1;
+
+		if (dclk_mask)
+			*dclk_mask = 1;
+	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
+		if (mclk_mask)
+			*mclk_mask = 0;
+
+		if (fclk_mask)
+			*fclk_mask = 0;
+
+		if (soc_mask)
+			*soc_mask = 1;
+
+		if (vclk_mask)
+			*vclk_mask = 1;
+
+		if (dclk_mask)
+			*dclk_mask = 1;
+	}
+
+	return 0;
+}
+
+static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
+				enum smu_clk_type clk_type)
+{
+	enum smu_feature_mask feature_id = 0;
+
+	switch (clk_type) {
+	case SMU_MCLK:
+	case SMU_UCLK:
+	case SMU_FCLK:
+		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+		break;
+	case SMU_GFXCLK:
+	case SMU_SCLK:
+		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+		break;
+	case SMU_SOCCLK:
+		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+		break;
+	case SMU_VCLK:
+	case SMU_DCLK:
+		feature_id = SMU_FEATURE_VCN_DPM_BIT;
+		break;
+	default:
+		return true;
+	}
+
+	if (!smu_cmn_feature_is_enabled(smu, feature_id))
+		return false;
+
+	return true;
+}
+
+static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+					enum smu_clk_type clk_type,
+					uint32_t *min,
+					uint32_t *max)
+{
+	int ret = 0;
+	uint32_t soc_mask;
+	uint32_t vclk_mask;
+	uint32_t dclk_mask;
+	uint32_t mclk_mask;
+	uint32_t fclk_mask;
+	uint32_t clock_limit;
+
+	if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
+		switch (clk_type) {
+		case SMU_MCLK:
+		case SMU_UCLK:
+			clock_limit = smu->smu_table.boot_values.uclk;
+			break;
+		case SMU_FCLK:
+			clock_limit = smu->smu_table.boot_values.fclk;
+			break;
+		case SMU_GFXCLK:
+		case SMU_SCLK:
+			clock_limit = smu->smu_table.boot_values.gfxclk;
+			break;
+		case SMU_SOCCLK:
+			clock_limit = smu->smu_table.boot_values.socclk;
+			break;
+		case SMU_VCLK:
+			clock_limit = smu->smu_table.boot_values.vclk;
+			break;
+		case SMU_DCLK:
+			clock_limit = smu->smu_table.boot_values.dclk;
+			break;
+		default:
+			clock_limit = 0;
+			break;
+		}
+
+		/* clock in Mhz unit */
+		if (min)
+			*min = clock_limit / 100;
+		if (max)
+			*max = clock_limit / 100;
+
+		return 0;
+	}
+	if (max) {
+		ret = vangogh_get_profiling_clk_mask(smu,
+							AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+							&vclk_mask,
+							&dclk_mask,
+							&mclk_mask,
+							&fclk_mask,
+							&soc_mask);
+		if (ret)
+			goto failed;
+
+		switch (clk_type) {
+		case SMU_UCLK:
+		case SMU_MCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_SOCCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_FCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_VCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_DCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
+			if (ret)
+				goto failed;
+			break;
+		default:
+			ret = -EINVAL;
+			goto failed;
+		}
+	}
+	if (min) {
+		switch (clk_type) {
+		case SMU_UCLK:
+		case SMU_MCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_SOCCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_FCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_VCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
+			if (ret)
+				goto failed;
+			break;
+		case SMU_DCLK:
+			ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
+			if (ret)
+				goto failed;
+			break;
+		default:
+			ret = -EINVAL;
+			goto failed;
+		}
+	}
+failed:
+	return ret;
+}
+
+static int vangogh_get_power_profile_mode(struct smu_context *smu,
+					   char *buf)
+{
+	static const char *profile_name[] = {
+					"BOOTUP_DEFAULT",
+					"3D_FULL_SCREEN",
+					"POWER_SAVING",
+					"VIDEO",
+					"VR",
+					"COMPUTE",
+					"CUSTOM"};
+	uint32_t i, size = 0;
+	int16_t workload_type = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+		/*
+		 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+		 * Not all profile modes are supported on vangogh.
+		 */
+		workload_type = smu_cmn_to_asic_specific_index(smu,
+							       CMN2ASIC_MAPPING_WORKLOAD,
+							       i);
+
+		if (workload_type < 0)
+			continue;
+
+		size += sprintf(buf + size, "%2d %14s%s\n",
+			i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+	}
+
+	return size;
+}
+
+static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+	int workload_type, ret;
+	uint32_t profile_mode = input[size];
+
+	if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+		return -EINVAL;
+	}
+
+	if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+			profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+		return 0;
+
+	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+	workload_type = smu_cmn_to_asic_specific_index(smu,
+						       CMN2ASIC_MAPPING_WORKLOAD,
+						       profile_mode);
+	if (workload_type < 0) {
+		dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
+					profile_mode);
+		return -EINVAL;
+	}
+
+	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+				    1 << workload_type,
+				    NULL);
+	if (ret) {
+		dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
+					workload_type);
+		return ret;
+	}
+
+	smu->power_profile_mode = profile_mode;
+
+	return 0;
+}
+
+static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
+					  enum smu_clk_type clk_type,
+					  uint32_t min,
+					  uint32_t max)
+{
+	int ret = 0;
+
+	if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
+		return 0;
+
+	switch (clk_type) {
+	case SMU_GFXCLK:
+	case SMU_SCLK:
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetHardMinGfxClk,
+							min, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetSoftMaxGfxClk,
+							max, NULL);
+		if (ret)
+			return ret;
+		break;
+	case SMU_FCLK:
+	case SMU_MCLK:
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetHardMinFclkByFreq,
+							min, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetSoftMaxFclkByFreq,
+							max, NULL);
+		if (ret)
+			return ret;
+		break;
+	case SMU_SOCCLK:
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetHardMinSocclkByFreq,
+							min, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetSoftMaxSocclkByFreq,
+							max, NULL);
+		if (ret)
+			return ret;
+		break;
+	case SMU_VCLK:
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetHardMinVcn,
+							min << 16, NULL);
+		if (ret)
+			return ret;
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetSoftMaxVcn,
+							max << 16, NULL);
+		if (ret)
+			return ret;
+		break;
+	case SMU_DCLK:
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetHardMinVcn,
+							min, NULL);
+		if (ret)
+			return ret;
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetSoftMaxVcn,
+							max, NULL);
+		if (ret)
+			return ret;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int vangogh_force_clk_levels(struct smu_context *smu,
+				   enum smu_clk_type clk_type, uint32_t mask)
+{
+	uint32_t soft_min_level = 0, soft_max_level = 0;
+	uint32_t min_freq = 0, max_freq = 0;
+	int ret = 0 ;
+
+	soft_min_level = mask ? (ffs(mask) - 1) : 0;
+	soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+	switch (clk_type) {
+	case SMU_SOCCLK:
+		ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+						soft_min_level, &min_freq);
+		if (ret)
+			return ret;
+		ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+						soft_max_level, &max_freq);
+		if (ret)
+			return ret;
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetSoftMaxSocclkByFreq,
+								max_freq, NULL);
+		if (ret)
+			return ret;
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetHardMinSocclkByFreq,
+								min_freq, NULL);
+		if (ret)
+			return ret;
+		break;
+	case SMU_MCLK:
+	case SMU_FCLK:
+		ret = vangogh_get_dpm_clk_limited(smu,
+							clk_type, soft_min_level, &min_freq);
+		if (ret)
+			return ret;
+		ret = vangogh_get_dpm_clk_limited(smu,
+							clk_type, soft_max_level, &max_freq);
+		if (ret)
+			return ret;
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetSoftMaxFclkByFreq,
+								max_freq, NULL);
+		if (ret)
+			return ret;
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetHardMinFclkByFreq,
+								min_freq, NULL);
+		if (ret)
+			return ret;
+		break;
+	case SMU_VCLK:
+		ret = vangogh_get_dpm_clk_limited(smu,
+							clk_type, soft_min_level, &min_freq);
+		if (ret)
+			return ret;
+
+		ret = vangogh_get_dpm_clk_limited(smu,
+							clk_type, soft_max_level, &max_freq);
+		if (ret)
+			return ret;
+
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetHardMinVcn,
+								min_freq << 16, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetSoftMaxVcn,
+								max_freq << 16, NULL);
+		if (ret)
+			return ret;
+
+		break;
+	case SMU_DCLK:
+		ret = vangogh_get_dpm_clk_limited(smu,
+							clk_type, soft_min_level, &min_freq);
+		if (ret)
+			return ret;
+
+		ret = vangogh_get_dpm_clk_limited(smu,
+							clk_type, soft_max_level, &max_freq);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetHardMinVcn,
+							min_freq, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+							SMU_MSG_SetSoftMaxVcn,
+							max_freq, NULL);
+		if (ret)
+			return ret;
+
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+	int ret = 0, i = 0;
+	uint32_t min_freq, max_freq, force_freq;
+	enum smu_clk_type clk_type;
+
+	enum smu_clk_type clks[] = {
+		SMU_SOCCLK,
+		SMU_VCLK,
+		SMU_DCLK,
+		SMU_MCLK,
+		SMU_FCLK,
+	};
+
+	for (i = 0; i < ARRAY_SIZE(clks); i++) {
+		clk_type = clks[i];
+		ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+		if (ret)
+			return ret;
+
+		force_freq = highest ? max_freq : min_freq;
+		ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int vangogh_unforce_dpm_levels(struct smu_context *smu)
+{
+	int ret = 0, i = 0;
+	uint32_t min_freq, max_freq;
+	enum smu_clk_type clk_type;
+
+	struct clk_feature_map {
+		enum smu_clk_type clk_type;
+		uint32_t	feature;
+	} clk_feature_map[] = {
+		{SMU_MCLK,   SMU_FEATURE_DPM_FCLK_BIT},
+		{SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
+		{SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+		{SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
+		{SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
+	};
+
+	for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+
+		if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
+		    continue;
+
+		clk_type = clk_feature_map[i].clk_type;
+
+		ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+
+		if (ret)
+			return ret;
+
+		ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
+{
+	int ret = 0;
+	uint32_t socclk_freq = 0, fclk_freq = 0;
+	uint32_t vclk_freq = 0, dclk_freq = 0;
+
+	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
+	if (ret)
+		return ret;
+
+	ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int vangogh_set_performance_level(struct smu_context *smu,
+					enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+	uint32_t soc_mask, mclk_mask, fclk_mask;
+	uint32_t vclk_mask = 0, dclk_mask = 0;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = vangogh_force_dpm_limit_value(smu, true);
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = vangogh_force_dpm_limit_value(smu, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = vangogh_unforce_dpm_levels(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+					SMU_MSG_SetHardMinGfxClk,
+					VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+					SMU_MSG_SetSoftMaxGfxClk,
+					VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+		if (ret)
+			return ret;
+
+		ret = vangogh_get_profiling_clk_mask(smu, level,
+							&vclk_mask,
+							&dclk_mask,
+							&mclk_mask,
+							&fclk_mask,
+							&soc_mask);
+		if (ret)
+			return ret;
+
+		vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+		vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+		vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+		vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
+		vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
+
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
+								VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
+								VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = vangogh_get_profiling_clk_mask(smu, level,
+							NULL,
+							NULL,
+							&mclk_mask,
+							&fclk_mask,
+							NULL);
+		if (ret)
+			return ret;
+
+		vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+		vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+		smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+				VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+		if (ret)
+			return ret;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+				VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+		if (ret)
+			return ret;
+
+		ret = vangogh_set_peak_clock_by_device(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+	default:
+		break;
+	}
+	return ret;
+}
+
 static int vangogh_read_sensor(struct smu_context *smu,
 				 enum amd_pp_sensors sensor,
 				 void *data, uint32_t *size)
@@ -493,6 +1322,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
 						   (uint32_t *)data);
 		*size = 4;
 		break;
+	case AMDGPU_PP_SENSOR_CPU_CLK:
+		ret = vangogh_get_smu_metrics_data(smu,
+						   METRICS_AVERAGE_CPUCLK,
+						   (uint32_t *)data);
+		*size = smu->cpu_core_num * sizeof(uint16_t);
+		break;
 	default:
 		ret = -EOPNOTSUPP;
 		break;
@@ -514,7 +1349,7 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
 
 	if (clock_ranges) {
 		if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
-		    clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+			clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
 			return -EINVAL;
 
 		for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
@@ -575,7 +1410,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
 	if (ret)
 		return ret;
 
-	smu_v11_0_init_gpu_metrics_v2_0(gpu_metrics);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
 
 	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
 	gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -611,22 +1446,55 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
 
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v2_0);
 }
 
 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
-							long input[], uint32_t size)
+					long input[], uint32_t size)
 {
 	int ret = 0;
+	int i;
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-	if (!smu->od_enabled) {
-		dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
+	if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+		dev_warn(smu->adev->dev,
+			"pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
 		return -EINVAL;
 	}
 
 	switch (type) {
+	case PP_OD_EDIT_CCLK_VDDC_TABLE:
+		if (size != 3) {
+			dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
+			return -EINVAL;
+		}
+		if (input[0] >= smu->cpu_core_num) {
+			dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
+				smu->cpu_core_num);
+		}
+		smu->cpu_core_id_select = input[0];
+		if (input[1] == 0) {
+			if (input[2] < smu->cpu_default_soft_min_freq) {
+				dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+					input[2], smu->cpu_default_soft_min_freq);
+				return -EINVAL;
+			}
+			smu->cpu_actual_soft_min_freq = input[2];
+		} else if (input[1] == 1) {
+			if (input[2] > smu->cpu_default_soft_max_freq) {
+				dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+					input[2], smu->cpu_default_soft_max_freq);
+				return -EINVAL;
+			}
+			smu->cpu_actual_soft_max_freq = input[2];
+		} else {
+			return -EINVAL;
+		}
+		break;
 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
 		if (size != 2) {
 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
@@ -635,14 +1503,16 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
 
 		if (input[0] == 0) {
 			if (input[1] < smu->gfx_default_hard_min_freq) {
-				dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+				dev_warn(smu->adev->dev,
+					"Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
 					input[1], smu->gfx_default_hard_min_freq);
 				return -EINVAL;
 			}
 			smu->gfx_actual_hard_min_freq = input[1];
 		} else if (input[0] == 1) {
 			if (input[1] > smu->gfx_default_soft_max_freq) {
-				dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+				dev_warn(smu->adev->dev,
+					"Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
 					input[1], smu->gfx_default_soft_max_freq);
 				return -EINVAL;
 			}
@@ -658,6 +1528,8 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
 		} else {
 			smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
 			smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+			smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+			smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
 
 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
 									smu->gfx_actual_hard_min_freq, NULL);
@@ -672,6 +1544,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
 				dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
 				return ret;
 			}
+
+			if (smu->adev->pm.fw_version < 0x43f1b00) {
+				dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+				break;
+			}
+
+			for (i = 0; i < smu->cpu_core_num; i++) {
+				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+								      (i << 20) | smu->cpu_actual_soft_min_freq,
+								      NULL);
+				if (ret) {
+					dev_err(smu->adev->dev, "Set hard min cclk failed!");
+					return ret;
+				}
+
+				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+								      (i << 20) | smu->cpu_actual_soft_max_freq,
+								      NULL);
+				if (ret) {
+					dev_err(smu->adev->dev, "Set soft max cclk failed!");
+					return ret;
+				}
+			}
 		}
 		break;
 	case PP_OD_COMMIT_DPM_TABLE:
@@ -680,8 +1575,10 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
 			return -EINVAL;
 		} else {
 			if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
-				dev_err(smu->adev->dev, "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
-				smu->gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq);
+				dev_err(smu->adev->dev,
+					"The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+					smu->gfx_actual_hard_min_freq,
+					smu->gfx_actual_soft_max_freq);
 				return -EINVAL;
 			}
 
@@ -698,6 +1595,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
 				dev_err(smu->adev->dev, "Set soft max sclk failed!");
 				return ret;
 			}
+
+			if (smu->adev->pm.fw_version < 0x43f1b00) {
+				dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+				break;
+			}
+
+			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+							      ((smu->cpu_core_id_select << 20)
+							       | smu->cpu_actual_soft_min_freq),
+							      NULL);
+			if (ret) {
+				dev_err(smu->adev->dev, "Set hard min cclk failed!");
+				return ret;
+			}
+
+			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+							      ((smu->cpu_core_id_select << 20)
+							       | smu->cpu_actual_soft_max_freq),
+							      NULL);
+			if (ret) {
+				dev_err(smu->adev->dev, "Set soft max cclk failed!");
+				return ret;
+			}
 		}
 		break;
 	default:
@@ -723,18 +1643,245 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
 	smu->gfx_actual_hard_min_freq = 0;
 	smu->gfx_actual_soft_max_freq = 0;
 
+	smu->cpu_default_soft_min_freq = 1400;
+	smu->cpu_default_soft_max_freq = 3500;
+	smu->cpu_actual_soft_min_freq = 0;
+	smu->cpu_actual_soft_max_freq = 0;
+
+	return 0;
+}
+
+static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+	DpmClocks_t *table = smu->smu_table.clocks_table;
+	int i;
+
+	if (!clock_table || !table)
+		return -EINVAL;
+
+	for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+		clock_table->SocClocks[i].Freq = table->SocClocks[i];
+		clock_table->SocClocks[i].Vol = table->SocVoltage[i];
+	}
+
+	for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+		clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
+		clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
+	}
+
+	for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+		clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
+		clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
+	}
+
 	return 0;
 }
 
+
 static int vangogh_system_features_control(struct smu_context *smu, bool en)
 {
 	struct amdgpu_device *adev = smu->adev;
+	struct smu_feature *feature = &smu->smu_feature;
+	uint32_t feature_mask[2];
+	int ret = 0;
 
 	if (adev->pm.fw_version >= 0x43f1700)
-		return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
-						en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
-	else
+		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
+						      en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
+
+	bitmap_zero(feature->enabled, feature->feature_num);
+	bitmap_zero(feature->supported, feature->feature_num);
+
+	if (!en)
+		return ret;
+
+	ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+	if (ret)
+		return ret;
+
+	bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+		    feature->feature_num);
+	bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+		    feature->feature_num);
+
+	return 0;
+}
+
+static int vangogh_post_smu_init(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t tmp;
+	int ret = 0;
+	uint8_t aon_bits = 0;
+	/* Two CUs in one WGP */
+	uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
+	uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
+		adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+	/* allow message will be sent after enable message on Vangogh*/
+	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+			(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
+		if (ret) {
+			dev_err(adev->dev, "Failed to Enable GfxOff!\n");
+			return ret;
+		}
+	} else {
+		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+		dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
+	}
+
+	/* if all CUs are active, no need to power off any WGPs */
+	if (total_cu == adev->gfx.cu_info.number)
 		return 0;
+
+	/*
+	 * Calculate the total bits number of always on WGPs for all SA/SEs in
+	 * RLC_PG_ALWAYS_ON_WGP_MASK.
+	 */
+	tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
+	tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
+
+	aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+	/* Do not request any WGPs less than set in the AON_WGP_MASK */
+	if (aon_bits > req_active_wgps) {
+		dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
+		return 0;
+	} else {
+		return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
+	}
+}
+
+static int vangogh_mode_reset(struct smu_context *smu, int type)
+{
+	int ret = 0, index = 0;
+
+	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+					       SMU_MSG_GfxDeviceDriverReset);
+	if (index < 0)
+		return index == -EACCES ? 0 : index;
+
+	mutex_lock(&smu->message_lock);
+
+	ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
+
+	mutex_unlock(&smu->message_lock);
+
+	mdelay(10);
+
+	return ret;
+}
+
+static int vangogh_mode2_reset(struct smu_context *smu)
+{
+	return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
+}
+
+static int vangogh_get_power_limit(struct smu_context *smu)
+{
+	struct smu_11_5_power_context *power_context =
+								smu->smu_power.power_context;
+	uint32_t ppt_limit;
+	int ret = 0;
+
+	if (smu->adev->pm.fw_version < 0x43f1e00)
+		return ret;
+
+	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
+	if (ret) {
+		dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
+		return ret;
+	}
+	/* convert from milliwatt to watt */
+	smu->current_power_limit = ppt_limit / 1000;
+	smu->max_power_limit = 29;
+
+	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
+	if (ret) {
+		dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
+		return ret;
+	}
+	/* convert from milliwatt to watt */
+	power_context->current_fast_ppt_limit = ppt_limit / 1000;
+	power_context->max_fast_ppt_limit = 30;
+
+	return ret;
+}
+
+static int vangogh_get_ppt_limit(struct smu_context *smu,
+								uint32_t *ppt_limit,
+								enum smu_ppt_limit_type type,
+								enum smu_ppt_limit_level level)
+{
+	struct smu_11_5_power_context *power_context =
+							smu->smu_power.power_context;
+
+	if (!power_context)
+		return -EOPNOTSUPP;
+
+	if (type == SMU_FAST_PPT_LIMIT) {
+		switch (level) {
+		case SMU_PPT_LIMIT_MAX:
+			*ppt_limit = power_context->max_fast_ppt_limit;
+			break;
+		case SMU_PPT_LIMIT_CURRENT:
+			*ppt_limit = power_context->current_fast_ppt_limit;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit)
+{
+	struct smu_11_5_power_context *power_context =
+							smu->smu_power.power_context;
+	uint32_t limit_type = ppt_limit >> 24;
+	int ret = 0;
+
+	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+		dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+		return -EOPNOTSUPP;
+	}
+
+	switch (limit_type) {
+	case SMU_DEFAULT_PPT_LIMIT:
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+				SMU_MSG_SetSlowPPTLimit,
+				ppt_limit * 1000, /* convert from watt to milliwatt */
+				NULL);
+		if (ret)
+			return ret;
+
+		smu->current_power_limit = ppt_limit;
+		break;
+	case SMU_FAST_PPT_LIMIT:
+		ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
+		if (ppt_limit > power_context->max_fast_ppt_limit) {
+			dev_err(smu->adev->dev,
+				"New power limit (%d) is over the max allowed %d\n",
+				ppt_limit, power_context->max_fast_ppt_limit);
+			return ret;
+		}
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+				SMU_MSG_SetFastPPTLimit,
+				ppt_limit * 1000, /* convert from watt to milliwatt */
+				NULL);
+		if (ret)
+			return ret;
+
+		power_context->current_fast_ppt_limit = ppt_limit;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
 }
 
 static const struct pptable_funcs vangogh_ppt_funcs = {
@@ -746,7 +1893,6 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
 	.init_power = smu_v11_0_init_power,
 	.fini_power = smu_v11_0_fini_power,
 	.register_irq_handler = smu_v11_0_register_irq_handler,
-	.get_allowed_feature_mask = vangogh_get_allowed_feature_mask,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
 	.send_smc_msg = smu_cmn_send_smc_msg,
@@ -765,6 +1911,18 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
 	.set_default_dpm_table = vangogh_set_default_dpm_tables,
 	.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
 	.system_features_control = vangogh_system_features_control,
+	.feature_is_enabled = smu_cmn_feature_is_enabled,
+	.set_power_profile_mode = vangogh_set_power_profile_mode,
+	.get_power_profile_mode = vangogh_get_power_profile_mode,
+	.get_dpm_clock_table = vangogh_get_dpm_clock_table,
+	.force_clk_levels = vangogh_force_clk_levels,
+	.set_performance_level = vangogh_set_performance_level,
+	.post_init = vangogh_post_smu_init,
+	.mode2_reset = vangogh_mode2_reset,
+	.gfx_off_control = smu_v11_0_gfx_off_control,
+	.get_ppt_limit = vangogh_get_ppt_limit,
+	.get_power_limit = vangogh_get_power_limit,
+	.set_power_limit = vangogh_set_power_limit,
 };
 
 void vangogh_set_ppt_funcs(struct smu_context *smu)
@@ -773,5 +1931,6 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
 	smu->message_map = vangogh_message_map;
 	smu->feature_map = vangogh_feature_mask_map;
 	smu->table_map = vangogh_table_map;
+	smu->workload_map = vangogh_workload_map;
 	smu->is_apu = true;
 }
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
index eab45549307646c1a3665d44d41d2b91034e005c..c56d4583dc7233aa4ab4a6f6dfbe2e96dea74ff2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
@@ -28,9 +28,29 @@
 extern void vangogh_set_ppt_funcs(struct smu_context *smu);
 
 /* UMD PState Vangogh Msg Parameters in MHz */
-#define VANGOGH_UMD_PSTATE_GFXCLK       700
-#define VANGOGH_UMD_PSTATE_SOCCLK       678
-#define VANGOGH_UMD_PSTATE_FCLK         800
+#define VANGOGH_UMD_PSTATE_STANDARD_GFXCLK       1100
+#define VANGOGH_UMD_PSTATE_STANDARD_SOCCLK       600
+#define VANGOGH_UMD_PSTATE_STANDARD_FCLK         800
+#define VANGOGH_UMD_PSTATE_STANDARD_VCLK         705
+#define VANGOGH_UMD_PSTATE_STANDARD_DCLK         600
+
+#define VANGOGH_UMD_PSTATE_PEAK_GFXCLK       1300
+#define VANGOGH_UMD_PSTATE_PEAK_SOCCLK       600
+#define VANGOGH_UMD_PSTATE_PEAK_FCLK         800
+#define VANGOGH_UMD_PSTATE_PEAK_VCLK         705
+#define VANGOGH_UMD_PSTATE_PEAK_DCLK         600
+
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_GFXCLK       400
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_SOCCLK       1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_FCLK         800
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_VCLK         1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_DCLK         800
+
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_GFXCLK       1100
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_SOCCLK       1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_FCLK         400
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_VCLK         1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_DCLK         800
 
 /* RLC Power Status */
 #define RLC_STATUS_OFF          0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9a9697038016027d84c2cc2696a86922df5a8eda..5faa509f0dba9fb84c1a4699cd826c25218ba88d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -56,8 +56,6 @@ static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(PowerUpSdma,                    PPSMC_MSG_PowerUpSdma,                  1),
 	MSG_MAP(SetHardMinIspclkByFreq,         PPSMC_MSG_SetHardMinIspclkByFreq,       1),
 	MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,                1),
-	MSG_MAP(Spare1,                         PPSMC_MSG_spare1,                       1),
-	MSG_MAP(Spare2,                         PPSMC_MSG_spare2,                       1),
 	MSG_MAP(SetAllowFclkSwitch,             PPSMC_MSG_SetAllowFclkSwitch,           1),
 	MSG_MAP(SetMinVideoGfxclkFreq,          PPSMC_MSG_SetMinVideoGfxclkFreq,        1),
 	MSG_MAP(ActiveProcessNotify,            PPSMC_MSG_ActiveProcessNotify,          1),
@@ -344,12 +342,142 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
 	return ret;
 }
 
+static int renoir_od_edit_dpm_table(struct smu_context *smu,
+							enum PP_OD_DPM_TABLE_COMMAND type,
+							long input[], uint32_t size)
+{
+	int ret = 0;
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+	if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+		dev_warn(smu->adev->dev,
+			"pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case PP_OD_EDIT_SCLK_VDDC_TABLE:
+		if (size != 2) {
+			dev_err(smu->adev->dev, "Input parameter number not correct\n");
+			return -EINVAL;
+		}
+
+		if (input[0] == 0) {
+			if (input[1] < smu->gfx_default_hard_min_freq) {
+				dev_warn(smu->adev->dev,
+					"Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+					input[1], smu->gfx_default_hard_min_freq);
+				return -EINVAL;
+			}
+			smu->gfx_actual_hard_min_freq = input[1];
+		} else if (input[0] == 1) {
+			if (input[1] > smu->gfx_default_soft_max_freq) {
+				dev_warn(smu->adev->dev,
+					"Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+					input[1], smu->gfx_default_soft_max_freq);
+				return -EINVAL;
+			}
+			smu->gfx_actual_soft_max_freq = input[1];
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case PP_OD_RESTORE_DEFAULT_TABLE:
+		if (size != 0) {
+			dev_err(smu->adev->dev, "Input parameter number not correct\n");
+			return -EINVAL;
+		}
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetHardMinGfxClk,
+								smu->gfx_actual_hard_min_freq,
+								NULL);
+		if (ret) {
+			dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
+			return ret;
+		}
+
+		ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetSoftMaxGfxClk,
+								smu->gfx_actual_soft_max_freq,
+								NULL);
+		if (ret) {
+			dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
+			return ret;
+		}
+		break;
+	case PP_OD_COMMIT_DPM_TABLE:
+		if (size != 0) {
+			dev_err(smu->adev->dev, "Input parameter number not correct\n");
+			return -EINVAL;
+		} else {
+			if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+				dev_err(smu->adev->dev,
+					"The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+					smu->gfx_actual_hard_min_freq,
+					smu->gfx_actual_soft_max_freq);
+				return -EINVAL;
+			}
+
+			ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetHardMinGfxClk,
+								smu->gfx_actual_hard_min_freq,
+								NULL);
+			if (ret) {
+				dev_err(smu->adev->dev, "Set hard min sclk failed!");
+				return ret;
+			}
+
+			ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_SetSoftMaxGfxClk,
+								smu->gfx_actual_soft_max_freq,
+								NULL);
+			if (ret) {
+				dev_err(smu->adev->dev, "Set soft max sclk failed!");
+				return ret;
+			}
+		}
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+	uint32_t min = 0, max = 0;
+	uint32_t ret = 0;
+
+	ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_GetMinGfxclkFrequency,
+								0, &min);
+	if (ret)
+		return ret;
+	ret = smu_cmn_send_smc_msg_with_param(smu,
+								SMU_MSG_GetMaxGfxclkFrequency,
+								0, &max);
+	if (ret)
+		return ret;
+
+	smu->gfx_default_hard_min_freq = min;
+	smu->gfx_default_soft_max_freq = max;
+	smu->gfx_actual_hard_min_freq = 0;
+	smu->gfx_actual_soft_max_freq = 0;
+
+	return 0;
+}
+
 static int renoir_print_clk_levels(struct smu_context *smu,
 			enum smu_clk_type clk_type, char *buf)
 {
 	int i, size = 0, ret = 0;
 	uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
 	SmuMetrics_t metrics;
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	bool cur_value_match_level = false;
 
 	memset(&metrics, 0, sizeof(metrics));
@@ -359,6 +487,30 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 		return ret;
 
 	switch (clk_type) {
+	case SMU_OD_RANGE:
+		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+			ret = smu_cmn_send_smc_msg_with_param(smu,
+						SMU_MSG_GetMinGfxclkFrequency,
+						0, &min);
+			if (ret)
+				return ret;
+			ret = smu_cmn_send_smc_msg_with_param(smu,
+						SMU_MSG_GetMaxGfxclkFrequency,
+						0, &max);
+			if (ret)
+				return ret;
+			size += sprintf(buf + size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
+		}
+		break;
+	case SMU_OD_SCLK:
+		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+			min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+			max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+			size += sprintf(buf + size, "OD_SCLK\n");
+			size += sprintf(buf + size, "0:%10uMhz\n", min);
+			size += sprintf(buf + size, "1:%10uMhz\n", max);
+		}
+		break;
 	case SMU_GFXCLK:
 	case SMU_SCLK:
 		/* retirve table returned paramters unit is MHz */
@@ -399,23 +551,35 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
 		break;
 	default:
-		return -EINVAL;
+		break;
 	}
 
-	for (i = 0; i < count; i++) {
-		ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
-		if (ret)
-			return ret;
-		if (!value)
-			continue;
-		size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
-				cur_value == value ? "*" : "");
-		if (cur_value == value)
-			cur_value_match_level = true;
-	}
+	switch (clk_type) {
+	case SMU_GFXCLK:
+	case SMU_SCLK:
+	case SMU_SOCCLK:
+	case SMU_MCLK:
+	case SMU_DCEFCLK:
+	case SMU_FCLK:
+		for (i = 0; i < count; i++) {
+			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+			if (ret)
+				return ret;
+			if (!value)
+				continue;
+			size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+					cur_value == value ? "*" : "");
+			if (cur_value == value)
+				cur_value_match_level = true;
+		}
+
+		if (!cur_value_match_level)
+			size += sprintf(buf + size, "   %uMhz *\n", cur_value);
 
-	if (!cur_value_match_level)
-		size += sprintf(buf + size, "   %uMhz *\n", cur_value);
+		break;
+	default:
+		break;
+	}
 
 	return size;
 }
@@ -667,6 +831,10 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
 		return -EINVAL;
 	}
 
+	if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+			profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+		return 0;
+
 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
 	workload_type = smu_cmn_to_asic_specific_index(smu,
 						       CMN2ASIC_MAPPING_WORKLOAD,
@@ -725,15 +893,27 @@ static int renoir_set_performance_level(struct smu_context *smu,
 
 	switch (level) {
 	case AMD_DPM_FORCED_LEVEL_HIGH:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
 		ret = renoir_force_dpm_limit_value(smu, true);
 		break;
 	case AMD_DPM_FORCED_LEVEL_LOW:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
 		ret = renoir_force_dpm_limit_value(smu, false);
 		break;
 	case AMD_DPM_FORCED_LEVEL_AUTO:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
 		ret = renoir_unforce_dpm_levels(smu);
 		break;
 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
 		ret = smu_cmn_send_smc_msg_with_param(smu,
 						      SMU_MSG_SetHardMinGfxClk,
 						      RENOIR_UMD_PSTATE_GFXCLK,
@@ -786,6 +966,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
 		break;
 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
 		ret = renoir_get_profiling_clk_mask(smu, level,
 						    &sclk_mask,
 						    &mclk_mask,
@@ -797,6 +980,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
 		renoir_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
 		break;
 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
 		ret = renoir_set_peak_clock_by_device(smu);
 		break;
 	case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -944,7 +1130,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
 		*value = metrics->AverageUvdActivity / 100;
 		break;
 	case METRICS_AVERAGE_SOCKETPOWER:
-		*value = metrics->CurrentSocketPower << 8;
+		*value = (metrics->CurrentSocketPower << 8) / 1000;
 		break;
 	case METRICS_TEMPERATURE_EDGE:
 		*value = (metrics->GfxTemperature / 100) *
@@ -1072,7 +1258,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
 	if (ret)
 		return ret;
 
-	smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
 
 	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
 	gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -1113,6 +1299,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->fan_pwm = metrics.FanPwm;
 
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v2_0);
@@ -1160,6 +1348,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
 	.get_gpu_metrics = renoir_get_gpu_metrics,
 	.gfx_state_change_set = renoir_gfx_state_change_set,
+	.set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
+	.od_edit_dpm_table = renoir_od_edit_dpm_table,
 };
 
 void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 06abf2a7ce9e9e0f7f116f5c36223f5264d838b3..6cc4855c8a37148431716dac2152178001f3542a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -278,15 +278,3 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
 
 	return ret;
 }
-
-void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
-{
-	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
-
-	gpu_metrics->common_header.structure_size =
-				sizeof(struct gpu_metrics_v2_0);
-	gpu_metrics->common_header.format_revision = 2;
-	gpu_metrics->common_header.content_revision = 0;
-
-	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f8260769061c538e630b60f1857d161806d9ba48..bb620fdd4cd2970268402997f28ed1b6bde57a5d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -68,14 +68,6 @@ static const char *smu_get_message_name(struct smu_context *smu,
 	return __smu_message_names[type];
 }
 
-static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
-					     uint16_t msg)
-{
-	struct amdgpu_device *adev = smu->adev;
-
-	WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-}
-
 static void smu_cmn_read_arg(struct smu_context *smu,
 			     uint32_t *arg)
 {
@@ -92,7 +84,7 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
 	for (i = 0; i < timeout; i++) {
 		cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
 		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-			return cur_value == 0x1 ? 0 : -EIO;
+			return cur_value;
 
 		udelay(1);
 	}
@@ -101,7 +93,29 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
 	if (i == timeout)
 		return -ETIME;
 
-	return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+	return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+				     uint16_t msg, uint32_t param)
+{
+	struct amdgpu_device *adev = smu->adev;
+	int ret;
+
+	ret = smu_cmn_wait_for_response(smu);
+	if (ret != 0x1) {
+		dev_err(adev->dev, "Msg issuing pre-check failed and "
+		       "SMU may be not in the right state!\n");
+		if (ret != -ETIME)
+			ret = -EIO;
+		return ret;
+	}
+
+	WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+	WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+	WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+	return 0;
 }
 
 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
@@ -122,29 +136,28 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
 		return index == -EACCES ? 0 : index;
 
 	mutex_lock(&smu->message_lock);
-	ret = smu_cmn_wait_for_response(smu);
-	if (ret) {
-		dev_err(adev->dev, "Msg issuing pre-check failed and "
-		       "SMU may be not in the right state!\n");
+	ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
+	if (ret)
 		goto out;
-	}
-
-	WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-	WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
-	smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
 
 	ret = smu_cmn_wait_for_response(smu);
-	if (ret) {
-		dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
-		       smu_get_message_name(smu, msg), index, param, ret);
+	if (ret != 0x1) {
+		if (ret == -ETIME) {
+			dev_err(adev->dev, "message: %15s (%d) \tparam: 0x%08x is timeout (no response)\n",
+				smu_get_message_name(smu, msg), index, param);
+		} else {
+			dev_err(adev->dev, "failed send message: %15s (%d) \tparam: 0x%08x response %#x\n",
+				smu_get_message_name(smu, msg), index, param,
+				ret);
+			ret = -EIO;
+		}
 		goto out;
 	}
 
 	if (read_arg)
 		smu_cmn_read_arg(smu, read_arg);
 
+	ret = 0; /* 0 as driver return value */
 out:
 	mutex_unlock(&smu->message_lock);
 	return ret;
@@ -269,11 +282,13 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
 			       enum smu_feature_mask mask)
 {
 	struct smu_feature *feature = &smu->smu_feature;
+	struct amdgpu_device *adev = smu->adev;
 	int feature_id;
 	int ret = 0;
 
-	if (smu->is_apu)
+	if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
 		return 1;
+
 	feature_id = smu_cmn_to_asic_specific_index(smu,
 						    CMN2ASIC_MAPPING_FEATURE,
 						    mask);
@@ -731,3 +746,31 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
 
 	return ret;
 }
+
+void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
+{
+	struct metrics_table_header *header = (struct metrics_table_header *)table;
+	uint16_t structure_size;
+
+#define METRICS_VERSION(a, b)	((a << 16) | b )
+
+	switch (METRICS_VERSION(frev, crev)) {
+	case METRICS_VERSION(1, 0):
+		structure_size = sizeof(struct gpu_metrics_v1_0);
+		break;
+	case METRICS_VERSION(2, 0):
+		structure_size = sizeof(struct gpu_metrics_v2_0);
+		break;
+	default:
+		break;
+	}
+
+#undef METRICS_VERSION
+
+	memset(header, 0xFF, structure_size);
+
+	header->format_revision = frev;
+	header->content_revision = crev;
+	header->structure_size = structure_size;
+
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 01e825d83d8df87c59b1ef39afa1eae6d15b0a1f..c6925018557555c91e4efc8ab4ee3263f07b52b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -26,6 +26,8 @@
 #include "amdgpu_smu.h"
 
 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+				     uint16_t msg, uint32_t param);
 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
 				    enum smu_message_type msg,
 				    uint32_t param,
@@ -95,5 +97,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
 			      void *metrics_table,
 			      bool bypass_cache);
 
+void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
+
 #endif
 #endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 042d7b54a6deae4264620c2d10d934b9982e3210..895cdd991af688b6aa5482adadfc3f05387efd4f 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -162,15 +162,10 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
 	.atomic_update = arc_pgu_plane_atomic_update,
 };
 
-static void arc_pgu_plane_destroy(struct drm_plane *plane)
-{
-	drm_plane_cleanup(plane);
-}
-
 static const struct drm_plane_funcs arc_pgu_plane_funcs = {
 	.update_plane		= drm_atomic_helper_update_plane,
 	.disable_plane		= drm_atomic_helper_disable_plane,
-	.destroy		= arc_pgu_plane_destroy,
+	.destroy		= drm_plane_cleanup,
 	.reset			= drm_atomic_helper_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
@@ -213,7 +208,7 @@ int arc_pgu_setup_crtc(struct drm_device *drm)
 	ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
 					&arc_pgu_crtc_funcs, NULL);
 	if (ret) {
-		arc_pgu_plane_destroy(primary);
+		drm_plane_cleanup(primary);
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f164818ec477a5dfbf694231d400ed0b6c4e3d03..077d006b1fbf50f9488203969d0fab5c8a9498b5 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -145,7 +145,7 @@ static void arcpgu_debugfs_init(struct drm_minor *minor)
 }
 #endif
 
-static struct drm_driver arcpgu_drm_driver = {
+static const struct drm_driver arcpgu_drm_driver = {
 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
 	.name = "arcpgu",
 	.desc = "ARC PGU Controller",
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 4b485eb512e2e3686b2025aac69b0beb5f0a36eb..59172acb973803d29c853ea4f2ad9a5538770ae3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -550,7 +550,6 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs komeda_crtc_funcs = {
-	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
 	.destroy		= drm_crtc_cleanup,
 	.set_config		= drm_atomic_helper_set_config,
 	.page_flip		= drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 108e7a31bd26bed69a4349b3a8c534befb7ce38d..494075ddbef68374aaca8927a1141379925adb38 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -510,7 +510,6 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs malidp_crtc_funcs = {
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.destroy = drm_crtc_cleanup,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3ebcf5a52c8bc42421e99a90638dc0a1cdc25a3b..b7bb90ae787f7cb209aedff1caf3fa383f340f50 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -820,7 +820,6 @@ static const struct drm_crtc_funcs armada_crtc_funcs = {
 	.cursor_set	= armada_drm_crtc_cursor_set,
 	.cursor_move	= armada_drm_crtc_cursor_move,
 	.destroy	= armada_drm_crtc_destroy,
-	.gamma_set	= drm_atomic_helper_legacy_gamma_set,
 	.set_config	= drm_atomic_helper_set_config,
 	.page_flip	= drm_atomic_helper_page_flip,
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 742d43a7edf433f6d6631f44bd72a3146c44aa06..fac1ee79c372e65af6be7d2d1cc7479f111a27a5 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -39,7 +39,6 @@ static void ast_cursor_fini(struct ast_private *ast)
 
 	for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
 		gbo = ast->cursor.gbo[i];
-		drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
 		drm_gem_vram_unpin(gbo);
 		drm_gem_vram_put(gbo);
 	}
@@ -53,14 +52,13 @@ static void ast_cursor_release(struct drm_device *dev, void *ptr)
 }
 
 /*
- * Allocate cursor BOs and pins them at the end of VRAM.
+ * Allocate cursor BOs and pin them at the end of VRAM.
  */
 int ast_cursor_init(struct ast_private *ast)
 {
 	struct drm_device *dev = &ast->base;
 	size_t size, i;
 	struct drm_gem_vram_object *gbo;
-	struct dma_buf_map map;
 	int ret;
 
 	size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
@@ -77,15 +75,7 @@ int ast_cursor_init(struct ast_private *ast)
 			drm_gem_vram_put(gbo);
 			goto err_drm_gem_vram_put;
 		}
-		ret = drm_gem_vram_vmap(gbo, &map);
-		if (ret) {
-			drm_gem_vram_unpin(gbo);
-			drm_gem_vram_put(gbo);
-			goto err_drm_gem_vram_put;
-		}
-
 		ast->cursor.gbo[i] = gbo;
-		ast->cursor.map[i] = map;
 	}
 
 	return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
@@ -94,7 +84,6 @@ int ast_cursor_init(struct ast_private *ast)
 	while (i) {
 		--i;
 		gbo = ast->cursor.gbo[i];
-		drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
 		drm_gem_vram_unpin(gbo);
 		drm_gem_vram_put(gbo);
 	}
@@ -168,38 +157,37 @@ static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int h
 int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = &ast->base;
-	struct drm_gem_vram_object *gbo;
-	struct dma_buf_map map;
-	int ret;
-	void *src;
+	struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
+	struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
+	struct dma_buf_map src_map, dst_map;
 	void __iomem *dst;
+	void *src;
+	int ret;
 
 	if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
 	    drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
 		return -EINVAL;
 
-	gbo = drm_gem_vram_of_gem(fb->obj[0]);
-
-	ret = drm_gem_vram_pin(gbo, 0);
+	ret = drm_gem_vram_vmap(src_gbo, &src_map);
 	if (ret)
 		return ret;
-	ret = drm_gem_vram_vmap(gbo, &map);
-	if (ret)
-		goto err_drm_gem_vram_unpin;
-	src = map.vaddr; /* TODO: Use mapping abstraction properly */
+	src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
 
-	dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;
+	ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
+	if (ret)
+		goto err_drm_gem_vram_vunmap;
+	dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
 
 	/* do data transfer to cursor BO */
 	update_cursor_image(dst, src, fb->width, fb->height);
 
-	drm_gem_vram_vunmap(gbo, &map);
-	drm_gem_vram_unpin(gbo);
+	drm_gem_vram_vunmap(dst_gbo, &dst_map);
+	drm_gem_vram_vunmap(src_gbo, &src_map);
 
 	return 0;
 
-err_drm_gem_vram_unpin:
-	drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+	drm_gem_vram_vunmap(src_gbo, &src_map);
 	return ret;
 }
 
@@ -251,17 +239,26 @@ static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
 void ast_cursor_show(struct ast_private *ast, int x, int y,
 		     unsigned int offset_x, unsigned int offset_y)
 {
+	struct drm_device *dev = &ast->base;
+	struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
+	struct dma_buf_map map;
 	u8 x_offset, y_offset;
 	u8 __iomem *dst;
 	u8 __iomem *sig;
 	u8 jreg;
+	int ret;
 
-	dst = ast->cursor.map[ast->cursor.next_index].vaddr;
+	ret = drm_gem_vram_vmap(gbo, &map);
+	if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
+		return;
+	dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
 
 	sig = dst + AST_HWC_SIZE;
 	writel(x, sig + AST_HWC_SIGNATURE_X);
 	writel(y, sig + AST_HWC_SIGNATURE_Y);
 
+	drm_gem_vram_vunmap(gbo, &map);
+
 	if (x < 0) {
 		x_offset = (-x) + offset_x;
 		x = 0;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 667b450606ef8a0284bc3808dd9992f76235a2de..ea8164e7a6dc6f81bb6db7a26baa16101e3299f2 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -147,7 +147,7 @@ static int ast_drm_freeze(struct drm_device *dev)
 	error = drm_mode_config_helper_suspend(dev);
 	if (error)
 		return error;
-	pci_save_state(dev->pdev);
+	pci_save_state(to_pci_dev(dev->dev));
 	return 0;
 }
 
@@ -162,7 +162,7 @@ static int ast_drm_resume(struct drm_device *dev)
 {
 	int ret;
 
-	if (pci_enable_device(dev->pdev))
+	if (pci_enable_device(to_pci_dev(dev->dev)))
 		return -EIO;
 
 	ret = ast_drm_thaw(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ccaff81924ee00faee43c830b35c6ec34d8b864a..f871fc36c2f7aaa320de71b2bc60b516ae2616f5 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,7 +28,6 @@
 #ifndef __AST_DRV_H__
 #define __AST_DRV_H__
 
-#include <linux/dma-buf-map.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
@@ -133,7 +132,6 @@ struct ast_private {
 
 	struct {
 		struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
-		struct dma_buf_map map[AST_DEFAULT_HWC_NUM];
 		unsigned int next_index;
 	} cursor;
 
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 1b13199858cbafa2b80be32c1cfe306009c66905..0ac3c2039c4b19ec0f2e5b7adfbd43cb50397a92 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,8 +67,9 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
 
 static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
 {
-	struct device_node *np = dev->pdev->dev.of_node;
+	struct device_node *np = dev->dev->of_node;
 	struct ast_private *ast = to_ast_private(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	uint32_t data, jregd0, jregd1;
 
 	/* Defaults */
@@ -85,7 +86,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
 	}
 
 	/* Not all families have a P2A bridge */
-	if (dev->pdev->device != PCI_CHIP_AST2000)
+	if (pdev->device != PCI_CHIP_AST2000)
 		return;
 
 	/*
@@ -119,6 +120,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
 static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 {
 	struct ast_private *ast = to_ast_private(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	uint32_t jreg, scu_rev;
 
 	/*
@@ -143,19 +145,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 	ast_detect_config_mode(dev, &scu_rev);
 
 	/* Identify chipset */
-	if (dev->pdev->revision >= 0x50) {
+	if (pdev->revision >= 0x50) {
 		ast->chip = AST2600;
 		drm_info(dev, "AST 2600 detected\n");
-	} else if (dev->pdev->revision >= 0x40) {
+	} else if (pdev->revision >= 0x40) {
 		ast->chip = AST2500;
 		drm_info(dev, "AST 2500 detected\n");
-	} else if (dev->pdev->revision >= 0x30) {
+	} else if (pdev->revision >= 0x30) {
 		ast->chip = AST2400;
 		drm_info(dev, "AST 2400 detected\n");
-	} else if (dev->pdev->revision >= 0x20) {
+	} else if (pdev->revision >= 0x20) {
 		ast->chip = AST2300;
 		drm_info(dev, "AST 2300 detected\n");
-	} else if (dev->pdev->revision >= 0x10) {
+	} else if (pdev->revision >= 0x10) {
 		switch (scu_rev & 0x0300) {
 		case 0x0200:
 			ast->chip = AST1100;
@@ -265,7 +267,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 
 static int ast_get_dram_info(struct drm_device *dev)
 {
-	struct device_node *np = dev->pdev->dev.of_node;
+	struct device_node *np = dev->dev->of_node;
 	struct ast_private *ast = to_ast_private(dev);
 	uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
 	uint32_t denum, num, div, ref_pll, dsel;
@@ -409,10 +411,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
 		return ast;
 	dev = &ast->base;
 
-	dev->pdev = pdev;
 	pci_set_drvdata(pdev, dev);
 
-	ast->regs = pci_iomap(dev->pdev, 1, 0);
+	ast->regs = pci_iomap(pdev, 1, 0);
 	if (!ast->regs)
 		return ERR_PTR(-EIO);
 
@@ -421,14 +422,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
 	 * assume the chip has MMIO enabled by default (rev 0x20
 	 * and higher).
 	 */
-	if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) {
+	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
 		drm_info(dev, "platform has no IO space, trying MMIO\n");
 		ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
 	}
 
 	/* "map" IO regs if the above hasn't done so already */
 	if (!ast->ioregs) {
-		ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+		ast->ioregs = pci_iomap(pdev, 2, 0);
 		if (!ast->ioregs)
 			return ERR_PTR(-EIO);
 	}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 8392ebde504b33b226c8f71c718ccc12b9f15cb1..7592f1b9e1f1973107aca1a52b38c02a3a1f2700 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -77,31 +77,32 @@ static u32 ast_get_vram_size(struct ast_private *ast)
 static void ast_mm_release(struct drm_device *dev, void *ptr)
 {
 	struct ast_private *ast = to_ast_private(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	arch_phys_wc_del(ast->fb_mtrr);
-	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
-				pci_resource_len(dev->pdev, 0));
+	arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
 }
 
 int ast_mm_init(struct ast_private *ast)
 {
 	struct drm_device *dev = &ast->base;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 vram_size;
 	int ret;
 
 	vram_size = ast_get_vram_size(ast);
 
-	ret = drmm_vram_helper_init(dev, pci_resource_start(dev->pdev, 0),
-				    vram_size);
+	ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
 	if (ret) {
 		drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
 		return ret;
 	}
 
-	arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
-				   pci_resource_len(dev->pdev, 0));
-	ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
-					pci_resource_len(dev->pdev, 0));
+	arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
+				   pci_resource_len(pdev, 0));
+	ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+					pci_resource_len(pdev, 0));
 
 	return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
 }
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9db371f4054f3c3996e569602d2f37d06863a5a6..988b270fea5ebe9cfd682e5f6fb3003d8f2c4372 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -903,7 +903,6 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
 
 static const struct drm_crtc_funcs ast_crtc_funcs = {
 	.reset = ast_crtc_reset,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.destroy = drm_crtc_cleanup,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
@@ -1107,6 +1106,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
 int ast_mode_config_init(struct ast_private *ast)
 {
 	struct drm_device *dev = &ast->base;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int ret;
 
 	ret = ast_cursor_init(ast);
@@ -1122,7 +1122,7 @@ int ast_mode_config_init(struct ast_private *ast)
 	dev->mode_config.min_height = 0;
 	dev->mode_config.preferred_depth = 24;
 	dev->mode_config.prefer_shadow = 1;
-	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+	dev->mode_config.fb_base = pci_resource_start(pdev, 0);
 
 	if (ast->chip == AST2100 ||
 	    ast->chip == AST2200 ||
@@ -1259,7 +1259,7 @@ static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
 
 	i2c->adapter.owner = THIS_MODULE;
 	i2c->adapter.class = I2C_CLASS_DDC;
-	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->adapter.dev.parent = dev->dev;
 	i2c->dev = dev;
 	i2c_set_adapdata(&i2c->adapter, i2c);
 	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 8902c2f84bf99ab0def4b95870cfcc1816d66b22..0607658dde51b70518c27fb28ab396c5e6708de2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -71,6 +71,7 @@ static void
 ast_set_def_ext_reg(struct drm_device *dev)
 {
 	struct ast_private *ast = to_ast_private(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u8 i, index, reg;
 	const u8 *ext_reg_info;
 
@@ -80,7 +81,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
 
 	if (ast->chip == AST2300 || ast->chip == AST2400 ||
 	    ast->chip == AST2500) {
-		if (dev->pdev->revision >= 0x20)
+		if (pdev->revision >= 0x20)
 			ext_reg_info = extreginfo_ast2300;
 		else
 			ext_reg_info = extreginfo_ast2300a0;
@@ -366,11 +367,12 @@ static void ast_init_dram_reg(struct drm_device *dev)
 void ast_post_gpu(struct drm_device *dev)
 {
 	struct ast_private *ast = to_ast_private(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 reg;
 
-	pci_read_config_dword(dev->pdev, 0x04, &reg);
+	pci_read_config_dword(pdev, 0x04, &reg);
 	reg |= 0x3;
-	pci_write_config_dword(dev->pdev, 0x04, reg);
+	pci_write_config_dword(pdev, 0x04, reg);
 
 	ast_enable_vga(dev);
 	ast_open_key(ast);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index c58fa00b4848c45b9d8acb0d33d31f46b902dada..05ad75d155e8407f18a3b6af72a835a958cb8d32 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -473,7 +473,6 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
 	.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
 	.enable_vblank = atmel_hlcdc_crtc_enable_vblank,
 	.disable_vblank = atmel_hlcdc_crtc_disable_vblank,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
 int atmel_hlcdc_crtc_create(struct drm_device *dev)
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index fd454225fd196983173183c9cb1924b023fd0148..b469624fe40dee985d67a76ba3a44a0dbf153bfb 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -121,7 +121,6 @@ static int bochs_pci_probe(struct pci_dev *pdev,
 	if (ret)
 		goto err_free_dev;
 
-	dev->pdev = pdev;
 	pci_set_drvdata(pdev, dev);
 
 	ret = bochs_load(dev);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dce4672e3fc8d15529fde53e3e66984cfc6797d6..2d7380a9890ed1ef4ebb42549a9f36e94f3cdc1e 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -110,7 +110,7 @@ int bochs_hw_load_edid(struct bochs_device *bochs)
 int bochs_hw_init(struct drm_device *dev)
 {
 	struct bochs_device *bochs = dev->dev_private;
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	unsigned long addr, size, mem, ioaddr, iosize;
 	u16 id;
 
@@ -201,7 +201,7 @@ void bochs_hw_fini(struct drm_device *dev)
 		release_region(VBE_DISPI_IOPORT_INDEX, 2);
 	if (bochs->fb_map)
 		iounmap(bochs->fb_map);
-	pci_release_regions(dev->pdev);
+	pci_release_regions(to_pci_dev(dev->dev));
 	kfree(bochs->edid);
 }
 
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a0d392c338da5241744835e0ed3d89ba14440e8b..76555ae64e9ce1272eed82a32d66e1bfd89aa604 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1292,8 +1292,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 
 err_unregister_cec:
 	i2c_unregister_device(adv7511->i2c_cec);
-	if (adv7511->cec_clk)
-		clk_disable_unprepare(adv7511->cec_clk);
+	clk_disable_unprepare(adv7511->cec_clk);
 err_i2c_unregister_packet:
 	i2c_unregister_device(adv7511->i2c_packet);
 err_i2c_unregister_edid:
@@ -1311,8 +1310,7 @@ static int adv7511_remove(struct i2c_client *i2c)
 	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
 		adv7533_detach_dsi(adv7511);
 	i2c_unregister_device(adv7511->i2c_cec);
-	if (adv7511->cec_clk)
-		clk_disable_unprepare(adv7511->cec_clk);
+	clk_disable_unprepare(adv7511->cec_clk);
 
 	adv7511_uninit_regulators(adv7511);
 
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 4d278573cdb99817bab4e58705fedcd9f43f8949..05eb759da6fc62395707b98023ada082c0fe1626 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -11,6 +11,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 
 #include <drm/drm_bridge.h>
 #include <drm/drm_edid.h>
@@ -20,6 +21,8 @@ struct display_connector {
 
 	struct gpio_desc	*hpd_gpio;
 	int			hpd_irq;
+
+	struct regulator	*dp_pwr;
 };
 
 static inline struct display_connector *
@@ -172,11 +175,12 @@ static int display_connector_probe(struct platform_device *pdev)
 	of_property_read_string(pdev->dev.of_node, "label", &label);
 
 	/*
-	 * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+	 * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide
 	 * edge interrupts, register an interrupt handler.
 	 */
 	if (type == DRM_MODE_CONNECTOR_DVII ||
-	    type == DRM_MODE_CONNECTOR_HDMIA) {
+	    type == DRM_MODE_CONNECTOR_HDMIA ||
+	    type == DRM_MODE_CONNECTOR_DisplayPort) {
 		conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
 							 GPIOD_IN);
 		if (IS_ERR(conn->hpd_gpio)) {
@@ -223,6 +227,38 @@ static int display_connector_probe(struct platform_device *pdev)
 		}
 	}
 
+	/* Get the DP PWR for DP connector. */
+	if (type == DRM_MODE_CONNECTOR_DisplayPort) {
+		int ret;
+
+		conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr");
+
+		if (IS_ERR(conn->dp_pwr)) {
+			ret = PTR_ERR(conn->dp_pwr);
+
+			switch (ret) {
+			case -ENODEV:
+				conn->dp_pwr = NULL;
+				break;
+
+			case -EPROBE_DEFER:
+				return -EPROBE_DEFER;
+
+			default:
+				dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret);
+				return ret;
+			}
+		}
+
+		if (conn->dp_pwr) {
+			ret = regulator_enable(conn->dp_pwr);
+			if (ret) {
+				dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret);
+				return ret;
+			}
+		}
+	}
+
 	conn->bridge.funcs = &display_connector_bridge_funcs;
 	conn->bridge.of_node = pdev->dev.of_node;
 
@@ -251,6 +287,9 @@ static int display_connector_remove(struct platform_device *pdev)
 {
 	struct display_connector *conn = platform_get_drvdata(pdev);
 
+	if (conn->dp_pwr)
+		regulator_disable(conn->dp_pwr);
+
 	drm_bridge_remove(&conn->bridge);
 
 	if (!IS_ERR(conn->bridge.ddc))
@@ -275,6 +314,9 @@ static const struct of_device_id display_connector_match[] = {
 	}, {
 		.compatible = "vga-connector",
 		.data = (void *)DRM_MODE_CONNECTOR_VGA,
+	}, {
+		.compatible = "dp-connector",
+		.data = (void *)DRM_MODE_CONNECTOR_DisplayPort,
 	},
 	{},
 };
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0c79a9ba48bb6407d7a001b2e03cca139d4ed28d..dda4fa9a1a08d8ef792b7cd2959360bb79e6b79d 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3440,8 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
 
 err_iahb:
 	clk_disable_unprepare(hdmi->iahb_clk);
-	if (hdmi->cec_clk)
-		clk_disable_unprepare(hdmi->cec_clk);
+	clk_disable_unprepare(hdmi->cec_clk);
 err_isfr:
 	clk_disable_unprepare(hdmi->isfr_clk);
 err_res:
@@ -3465,8 +3464,7 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi)
 
 	clk_disable_unprepare(hdmi->iahb_clk);
 	clk_disable_unprepare(hdmi->isfr_clk);
-	if (hdmi->cec_clk)
-		clk_disable_unprepare(hdmi->cec_clk);
+	clk_disable_unprepare(hdmi->cec_clk);
 
 	if (hdmi->i2c)
 		i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 86b06975bfdd43e1892b0c744ba1720cf1937eac..e21078b2f8b5b998f5a26af90119c96531e8881f 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -202,7 +202,7 @@ static int thc63_probe(struct platform_device *pdev)
 	thc63->dev = &pdev->dev;
 	platform_set_drvdata(pdev, thc63);
 
-	thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
+	thc63->vcc = devm_regulator_get(thc63->dev, "vcc");
 	if (IS_ERR(thc63->vcc)) {
 		if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 4c7ad46fdd219f59c2830d2d52428763d5e606d2..5311d03d49cc093f76b04cd3d743a79d9a6d3a6d 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -45,13 +45,9 @@
 
 #include "drm_legacy.h"
 
-/**
+/*
  * Get AGP information.
  *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device has been initialized and acquired and fills in the
@@ -92,7 +88,7 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Acquire the AGP device.
  *
  * \param dev DRM device that is to acquire AGP.
@@ -103,11 +99,13 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
  */
 int drm_agp_acquire(struct drm_device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
 	if (!dev->agp)
 		return -ENODEV;
 	if (dev->agp->acquired)
 		return -EBUSY;
-	dev->agp->bridge = agp_backend_acquire(dev->pdev);
+	dev->agp->bridge = agp_backend_acquire(pdev);
 	if (!dev->agp->bridge)
 		return -ENODEV;
 	dev->agp->acquired = 1;
@@ -115,13 +113,9 @@ int drm_agp_acquire(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_agp_acquire);
 
-/**
+/*
  * Acquire the AGP device (ioctl).
  *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device hasn't been acquired before and calls
@@ -133,7 +127,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
 }
 
-/**
+/*
  * Release the AGP device.
  *
  * \param dev DRM device that is to release AGP.
@@ -157,7 +151,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_release(dev);
 }
 
-/**
+/*
  * Enable the AGP bus.
  *
  * \param dev DRM device that has previously acquired AGP.
@@ -187,13 +181,9 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_enable(dev, *mode);
 }
 
-/**
+/*
  * Allocate AGP memory.
  *
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device is present and has been acquired, allocates the
@@ -242,7 +232,7 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_alloc(dev, request);
 }
 
-/**
+/*
  * Search for the AGP memory entry associated with a handle.
  *
  * \param dev DRM device structure.
@@ -263,13 +253,9 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
 	return NULL;
 }
 
-/**
+/*
  * Unbind AGP memory from the GATT (ioctl).
  *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device is present and acquired, looks-up the AGP memory
@@ -285,7 +271,7 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
 	entry = drm_agp_lookup_entry(dev, request->handle);
 	if (!entry || !entry->bound)
 		return -EINVAL;
-	ret = drm_unbind_agp(entry->memory);
+	ret = agp_unbind_memory(entry->memory);
 	if (ret == 0)
 		entry->bound = 0;
 	return ret;
@@ -301,13 +287,9 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_unbind(dev, request);
 }
 
-/**
+/*
  * Bind AGP memory into the GATT (ioctl)
  *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device is present and has been acquired and that no memory
@@ -326,7 +308,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
 	if (!entry || entry->bound)
 		return -EINVAL;
 	page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
-	retcode = drm_bind_agp(entry->memory, page);
+	retcode = agp_bind_memory(entry->memory, page);
 	if (retcode)
 		return retcode;
 	entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -345,13 +327,9 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_bind(dev, request);
 }
 
-/**
+/*
  * Free AGP memory (ioctl).
  *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device is present and has been acquired and looks up the
@@ -369,11 +347,11 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
 	if (!entry)
 		return -EINVAL;
 	if (entry->bound)
-		drm_unbind_agp(entry->memory);
+		agp_unbind_memory(entry->memory);
 
 	list_del(&entry->head);
 
-	drm_free_agp(entry->memory, entry->pages);
+	agp_free_memory(entry->memory);
 	kfree(entry);
 	return 0;
 }
@@ -388,7 +366,7 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
 	return drm_agp_free(dev, request);
 }
 
-/**
+/*
  * Initialize the AGP resources.
  *
  * \return pointer to a drm_agp_head structure.
@@ -402,14 +380,15 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
  */
 struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct drm_agp_head *head = NULL;
 
 	head = kzalloc(sizeof(*head), GFP_KERNEL);
 	if (!head)
 		return NULL;
-	head->bridge = agp_find_bridge(dev->pdev);
+	head->bridge = agp_find_bridge(pdev);
 	if (!head->bridge) {
-		head->bridge = agp_backend_acquire(dev->pdev);
+		head->bridge = agp_backend_acquire(pdev);
 		if (!head->bridge) {
 			kfree(head);
 			return NULL;
@@ -453,8 +432,8 @@ void drm_legacy_agp_clear(struct drm_device *dev)
 
 	list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
 		if (entry->bound)
-			drm_unbind_agp(entry->memory);
-		drm_free_agp(entry->memory, entry->pages);
+			agp_unbind_memory(entry->memory);
+		agp_free_memory(entry->memory);
 		kfree(entry);
 	}
 	INIT_LIST_HEAD(&dev->agp->memory);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a8cbec832bc46051a8d500fa1ab3df67d23929d..560aaecba31bc3c5b50b978749fd353568a53939 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2040,6 +2040,9 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
  * should always call this function from their
  * &drm_mode_config_funcs.atomic_commit hook.
  *
+ * Drivers that need to extend the commit setup to private objects can use the
+ * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
+ *
  * To be able to use this support drivers need to use a few more helper
  * functions. drm_atomic_helper_wait_for_dependencies() must be called before
  * actually committing the hardware state, and for nonblocking commits this call
@@ -2083,8 +2086,11 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
 	struct drm_plane *plane;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct drm_crtc_commit *commit;
+	const struct drm_mode_config_helper_funcs *funcs;
 	int i, ret;
 
+	funcs = state->dev->mode_config.helper_private;
+
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		commit = kzalloc(sizeof(*commit), GFP_KERNEL);
 		if (!commit)
@@ -2169,6 +2175,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
 		new_plane_state->commit = drm_crtc_commit_get(commit);
 	}
 
+	if (funcs && funcs->atomic_commit_setup)
+		return funcs->atomic_commit_setup(state);
+
 	return 0;
 }
 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
@@ -3499,76 +3508,6 @@ int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
 }
 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
 
-/**
- * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
- * @crtc: CRTC object
- * @red: red correction table
- * @green: green correction table
- * @blue: green correction table
- * @size: size of the tables
- * @ctx: lock acquire context
- *
- * Implements support for legacy gamma correction table for drivers
- * that support color management through the DEGAMMA_LUT/GAMMA_LUT
- * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
- * how the atomic color management and gamma tables work.
- */
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
-				       u16 *red, u16 *green, u16 *blue,
-				       uint32_t size,
-				       struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_atomic_state *state;
-	struct drm_crtc_state *crtc_state;
-	struct drm_property_blob *blob = NULL;
-	struct drm_color_lut *blob_data;
-	int i, ret = 0;
-	bool replaced;
-
-	state = drm_atomic_state_alloc(crtc->dev);
-	if (!state)
-		return -ENOMEM;
-
-	blob = drm_property_create_blob(dev,
-					sizeof(struct drm_color_lut) * size,
-					NULL);
-	if (IS_ERR(blob)) {
-		ret = PTR_ERR(blob);
-		blob = NULL;
-		goto fail;
-	}
-
-	/* Prepare GAMMA_LUT with the legacy values. */
-	blob_data = blob->data;
-	for (i = 0; i < size; i++) {
-		blob_data[i].red = red[i];
-		blob_data[i].green = green[i];
-		blob_data[i].blue = blue[i];
-	}
-
-	state->acquire_ctx = ctx;
-	crtc_state = drm_atomic_get_crtc_state(state, crtc);
-	if (IS_ERR(crtc_state)) {
-		ret = PTR_ERR(crtc_state);
-		goto fail;
-	}
-
-	/* Reset DEGAMMA_LUT and CTM properties. */
-	replaced  = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
-	replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
-	replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
-	crtc_state->color_mgmt_changed |= replaced;
-
-	ret = drm_atomic_commit(state);
-
-fail:
-	drm_atomic_state_put(state);
-	drm_property_blob_put(blob);
-	return ret;
-}
-EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
 /**
  * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
  *						  the input end of a bridge
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 5c2141e9a9f483a3472b0ec96a3918761ee1b920..26e2f2ffd255b3b6631e2340330abbfbaa90e59a 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -185,12 +185,6 @@
  *		 plane does not expose the "alpha" property, then this is
  *		 assumed to be 1.0
  *
- * IN_FORMATS:
- *	Blob property which contains the set of buffer format and modifier
- *	pairs supported by this plane. The blob is a drm_format_modifier_blob
- *	struct. Without this property the plane doesn't support buffers with
- *	modifiers. Userspace cannot change this property.
- *
  * Note that all the property extensions described here apply either to the
  * plane or the CRTC (e.g. for the background color, which currently is not
  * exposed and assumed to be black).
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index aeb1327e3077cf8484ddacfe003a69d9e93c6dcf..e3d77dfefb0afe4b13059f19119a274dc9521731 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
 		 * As we're limiting the address to 2^32-1 (or less),
 		 * casting it down to 32 bits is no problem, but we
 		 * need to point to a 64bit variable first. */
-		map->handle = dma_alloc_coherent(&dev->pdev->dev,
+		map->handle = dma_alloc_coherent(dev->dev,
 						 map->size,
 						 &map->offset,
 						 GFP_KERNEL);
@@ -556,7 +556,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 	case _DRM_SCATTER_GATHER:
 		break;
 	case _DRM_CONSISTENT:
-		dma_free_coherent(&dev->pdev->dev,
+		dma_free_coherent(dev->dev,
 				  map->size,
 				  map->handle,
 				  map->offset);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0fe3c496002a904aa9bd95e19593812dcd16c833..79a50ef1250fd1d8370547e13fae21cb193ec209 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -30,6 +30,8 @@
 
 #include <linux/export.h>
 #include <linux/highmem.h>
+#include <linux/mem_encrypt.h>
+#include <xen/xen.h>
 
 #include <drm/drm_cache.h>
 
@@ -176,3 +178,34 @@ drm_clflush_virt_range(void *addr, unsigned long length)
 #endif
 }
 EXPORT_SYMBOL(drm_clflush_virt_range);
+
+bool drm_need_swiotlb(int dma_bits)
+{
+	struct resource *tmp;
+	resource_size_t max_iomem = 0;
+
+	/*
+	 * Xen paravirtual hosts require swiotlb regardless of requested dma
+	 * transfer size.
+	 *
+	 * NOTE: Really, what it requires is use of the dma_alloc_coherent
+	 *       allocator used in ttm_dma_populate() instead of
+	 *       ttm_populate_and_map_pages(), which bounce buffers so much in
+	 *       Xen it leads to swiotlb buffer exhaustion.
+	 */
+	if (xen_pv_domain())
+		return true;
+
+	/*
+	 * Enforce dma_alloc_coherent when memory encryption is active as well
+	 * for the same reasons as for Xen paravirtual hosts.
+	 */
+	if (mem_encrypt_active())
+		return true;
+
+	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
+		max_iomem = max(max_iomem,  tmp->end);
+
+	return max_iomem > ((u64)1 << dma_bits);
+}
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index b7e9e1c2564cd4b88a9ace3ae708d91010bc31e3..ced09c7c06f90f34bde86dc1ac9ac1fb5df78d5d 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -7,6 +7,7 @@
  * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
  */
 
+#include "drm/drm_modeset_lock.h"
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -1181,9 +1182,11 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
 	struct drm_device *dev = client->dev;
 	struct drm_connector *connector;
 	struct drm_mode_set *modeset;
+	struct drm_modeset_acquire_ctx ctx;
 	int j;
+	int ret;
 
-	drm_modeset_lock_all(dev);
+	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
 	drm_client_for_each_modeset(modeset, client) {
 		if (!modeset->crtc->enabled)
 			continue;
@@ -1195,7 +1198,7 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
 				dev->mode_config.dpms_property, dpms_mode);
 		}
 	}
-	drm_modeset_unlock_all(dev);
+	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3bcabc2f6e0e47bfb7606e7298f9c7f5d288d69b..bb14f488c8f6c7f05ec80fb5300c0d1bbdf049c4 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -22,6 +22,7 @@
 
 #include <linux/uaccess.h>
 
+#include <drm/drm_atomic.h>
 #include <drm/drm_color_mgmt.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_device.h>
@@ -89,9 +90,8 @@
  *	modes) appropriately.
  *
  * There is also support for a legacy gamma table, which is set up by calling
- * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
- * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
- * "GAMMA_LUT" property above.
+ * drm_mode_crtc_set_gamma_size(). The DRM core will then alias the legacy gamma
+ * ramp with "GAMMA_LUT" or, if that is unavailable, "DEGAMMA_LUT".
  *
  * Support for different non RGB color encodings is controlled through
  * &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
@@ -156,9 +156,6 @@ EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
  * optional. The gamma and degamma properties are only attached if
  * their size is not 0 and ctm_property is only attached if has_ctm is
  * true.
- *
- * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the
- * legacy &drm_crtc_funcs.gamma_set callback.
  */
 void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
 				uint degamma_lut_size,
@@ -231,6 +228,116 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
 }
 EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
 
+/**
+ * drm_crtc_supports_legacy_gamma - does the crtc support legacy gamma correction table
+ * @crtc: CRTC object
+ *
+ * Returns true/false if the given crtc supports setting the legacy gamma
+ * correction table.
+ */
+static bool drm_crtc_supports_legacy_gamma(struct drm_crtc *crtc)
+{
+	u32 gamma_id = crtc->dev->mode_config.gamma_lut_property->base.id;
+	u32 degamma_id = crtc->dev->mode_config.degamma_lut_property->base.id;
+
+	if (!crtc->gamma_size)
+		return false;
+
+	if (crtc->funcs->gamma_set)
+		return true;
+
+	return !!(drm_mode_obj_find_prop_id(&crtc->base, gamma_id) ||
+		  drm_mode_obj_find_prop_id(&crtc->base, degamma_id));
+}
+
+/**
+ * drm_crtc_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @size: size of the tables
+ * @ctx: lock acquire context
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that have set drm_crtc_funcs.gamma_set or that support color management
+ * through the DEGAMMA_LUT/GAMMA_LUT properties. See
+ * drm_crtc_enable_color_mgmt() and the containing chapter for
+ * how the atomic color management and gamma tables work.
+ *
+ * This function sets the gamma using drm_crtc_funcs.gamma_set if set, or
+ * alternatively using crtc color management properties.
+ */
+static int drm_crtc_legacy_gamma_set(struct drm_crtc *crtc,
+				     u16 *red, u16 *green, u16 *blue,
+				     u32 size,
+				     struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	struct drm_property_blob *blob;
+	struct drm_color_lut *blob_data;
+	u32 gamma_id = dev->mode_config.gamma_lut_property->base.id;
+	u32 degamma_id = dev->mode_config.degamma_lut_property->base.id;
+	bool use_gamma_lut;
+	int i, ret = 0;
+	bool replaced;
+
+	if (crtc->funcs->gamma_set)
+		return crtc->funcs->gamma_set(crtc, red, green, blue, size, ctx);
+
+	if (drm_mode_obj_find_prop_id(&crtc->base, gamma_id))
+		use_gamma_lut = true;
+	else if (drm_mode_obj_find_prop_id(&crtc->base, degamma_id))
+		use_gamma_lut = false;
+	else
+		return -ENODEV;
+
+	state = drm_atomic_state_alloc(crtc->dev);
+	if (!state)
+		return -ENOMEM;
+
+	blob = drm_property_create_blob(dev,
+					sizeof(struct drm_color_lut) * size,
+					NULL);
+	if (IS_ERR(blob)) {
+		ret = PTR_ERR(blob);
+		blob = NULL;
+		goto fail;
+	}
+
+	/* Prepare GAMMA_LUT with the legacy values. */
+	blob_data = blob->data;
+	for (i = 0; i < size; i++) {
+		blob_data[i].red = red[i];
+		blob_data[i].green = green[i];
+		blob_data[i].blue = blue[i];
+	}
+
+	state->acquire_ctx = ctx;
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
+
+	/* Set GAMMA_LUT and reset DEGAMMA_LUT and CTM */
+	replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
+					     use_gamma_lut ? NULL : blob);
+	replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+	replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
+					      use_gamma_lut ? blob : NULL);
+	crtc_state->color_mgmt_changed |= replaced;
+
+	ret = drm_atomic_commit(state);
+
+fail:
+	drm_atomic_state_put(state);
+	drm_property_blob_put(blob);
+	return ret;
+}
+
 /**
  * drm_mode_gamma_set_ioctl - set the gamma table
  * @dev: DRM device
@@ -262,7 +369,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 	if (!crtc)
 		return -ENOENT;
 
-	if (crtc->funcs->gamma_set == NULL)
+	if (!drm_crtc_supports_legacy_gamma(crtc))
 		return -ENOSYS;
 
 	/* memcpy into gamma store */
@@ -290,8 +397,8 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 		goto out;
 	}
 
-	ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
-				     crtc->gamma_size, &ctx);
+	ret = drm_crtc_legacy_gamma_set(crtc, r_base, g_base, b_base,
+					crtc->gamma_size, &ctx);
 
 out:
 	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 74090fc3aa552a54d047ddfff2af5e4483e3e009..9c4f9947b194fcaf954ada858b73a41fd680ea15 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,6 +38,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_auth.h>
@@ -67,7 +68,7 @@
  * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
  * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
  * features are controlled through &drm_property and
- * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check.
+ * &drm_mode_config_funcs.atomic_check.
  */
 
 /**
@@ -240,30 +241,12 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
  * 		Nearest Neighbor scaling filter
  */
 
-/**
- * drm_crtc_init_with_planes - Initialise a new CRTC object with
- *    specified primary and cursor planes.
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @primary: Primary plane for CRTC
- * @cursor: Cursor plane for CRTC
- * @funcs: callbacks for the new CRTC
- * @name: printf style format string for the CRTC name, or NULL for default name
- *
- * Inits a new object created as base part of a driver crtc object. Drivers
- * should use this function instead of drm_crtc_init(), which is only provided
- * for backwards compatibility with drivers which do not yet support universal
- * planes). For really simple hardware which has only 1 plane look at
- * drm_simple_display_pipe_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
-			      struct drm_plane *primary,
-			      struct drm_plane *cursor,
-			      const struct drm_crtc_funcs *funcs,
-			      const char *name, ...)
+__printf(6, 0)
+static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+				       struct drm_plane *primary,
+				       struct drm_plane *cursor,
+				       const struct drm_crtc_funcs *funcs,
+				       const char *name, va_list ap)
 {
 	struct drm_mode_config *config = &dev->mode_config;
 	int ret;
@@ -291,11 +274,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 		return ret;
 
 	if (name) {
-		va_list ap;
-
-		va_start(ap, name);
 		crtc->name = kvasprintf(GFP_KERNEL, name, ap);
-		va_end(ap);
 	} else {
 		crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
 				       drm_num_crtcs(dev));
@@ -339,8 +318,101 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 
 	return 0;
 }
+
+/**
+ * drm_crtc_init_with_planes - Initialise a new CRTC object with
+ *    specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
+ * The &drm_crtc_funcs.destroy hook should call drm_crtc_cleanup() and kfree()
+ * the crtc structure. The crtc structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * The @primary and @cursor planes are only relevant for legacy uAPI, see
+ * &drm_crtc.primary and &drm_crtc.cursor.
+ *
+ * Note: consider using drmm_crtc_alloc_with_planes() instead of
+ * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+			      struct drm_plane *primary,
+			      struct drm_plane *cursor,
+			      const struct drm_crtc_funcs *funcs,
+			      const char *name, ...)
+{
+	va_list ap;
+	int ret;
+
+	WARN_ON(!funcs->destroy);
+
+	va_start(ap, name);
+	ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+					  name, ap);
+	va_end(ap);
+
+	return ret;
+}
 EXPORT_SYMBOL(drm_crtc_init_with_planes);
 
+static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev,
+						void *ptr)
+{
+	struct drm_crtc *crtc = ptr;
+
+	drm_crtc_cleanup(crtc);
+}
+
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+				    size_t size, size_t offset,
+				    struct drm_plane *primary,
+				    struct drm_plane *cursor,
+				    const struct drm_crtc_funcs *funcs,
+				    const char *name, ...)
+{
+	void *container;
+	struct drm_crtc *crtc;
+	va_list ap;
+	int ret;
+
+	if (WARN_ON(!funcs || funcs->destroy))
+		return ERR_PTR(-EINVAL);
+
+	container = drmm_kzalloc(dev, size, GFP_KERNEL);
+	if (!container)
+		return ERR_PTR(-ENOMEM);
+
+	crtc = container + offset;
+
+	va_start(ap, name);
+	ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+					  name, ap);
+	va_end(ap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup,
+				       crtc);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return container;
+}
+EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
+
 /**
  * drm_crtc_cleanup - Clean up the core crtc usage
  * @crtc: CRTC to cleanup
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 5bd0934004e30c1f4757528efacb0c2bf8205e17..eedbb48815b7ba3b7c7962e063d6c580b756438e 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -949,6 +949,38 @@ bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE]
 }
 EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
 
+/**
+ * drm_dp_downstream_rgb_to_ycbcr_conversion() - determine downstream facing port
+ *                                               RGB->YCbCr conversion capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ * @color_spc: Colorspace for which conversion cap is sought
+ *
+ * Returns: whether the downstream facing port can convert RGB->YCbCr for a given
+ * colorspace.
+ */
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+					       const u8 port_cap[4],
+					       u8 color_spc)
+{
+	if (!drm_dp_is_branch(dpcd))
+		return false;
+
+	if (dpcd[DP_DPCD_REV] < 0x13)
+		return false;
+
+	switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+	case DP_DS_PORT_TYPE_HDMI:
+		if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+			return false;
+
+		return port_cap[3] & color_spc;
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_rgb_to_ycbcr_conversion);
+
 /**
  * drm_dp_downstream_mode() - return a mode for downstream facing port
  * @dev: DRM device
@@ -1204,7 +1236,7 @@ bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
 	return connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
 		dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 &&
 		dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
-		!drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT);
+		!drm_dp_has_quirk(desc, DP_DPCD_QUIRK_NO_SINK_COUNT);
 }
 EXPORT_SYMBOL(drm_dp_read_sink_count_cap);
 
@@ -1925,87 +1957,6 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
 #undef DEVICE_ID_ANY
 #undef DEVICE_ID
 
-struct edid_quirk {
-	u8 mfg_id[2];
-	u8 prod_id[2];
-	u32 quirks;
-};
-
-#define MFG(first, second) { (first), (second) }
-#define PROD_ID(first, second) { (first), (second) }
-
-/*
- * Some devices have unreliable OUIDs where they don't set the device ID
- * correctly, and as a result we need to use the EDID for finding additional
- * DP quirks in such cases.
- */
-static const struct edid_quirk edid_quirk_list[] = {
-	/* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
-	 * only supports DPCD backlight controls
-	 */
-	{ MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-	/*
-	 * Some Dell CML 2020 systems have panels support both AUX and PWM
-	 * backlight control, and some only support AUX backlight control. All
-	 * said panels start up in AUX mode by default, and we don't have any
-	 * support for disabling HDR mode on these panels which would be
-	 * required to switch to PWM backlight control mode (plus, I'm not
-	 * even sure we want PWM backlight controls over DPCD backlight
-	 * controls anyway...). Until we have a better way of detecting these,
-	 * force DPCD backlight mode on all of them.
-	 */
-	{ MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-	{ MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-	{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-	{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-	{ MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-	{ MFG(0x09, 0xe5), PROD_ID(0xde, 0x08), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-};
-
-#undef MFG
-#undef PROD_ID
-
-/**
- * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
- * DP-specific quirks
- * @edid: The EDID to check
- *
- * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
- * of manufacturers don't seem to like following standards and neglect to fill
- * the dev-ID in, making it impossible to only use OUIDs for determining
- * quirks in some cases. This function can be used to check the EDID and look
- * up any additional DP quirks. The bits returned by this function correspond
- * to the quirk bits in &drm_dp_quirk.
- *
- * Returns: a bitmask of quirks, if any. The driver can check this using
- * drm_dp_has_quirk().
- */
-u32 drm_dp_get_edid_quirks(const struct edid *edid)
-{
-	const struct edid_quirk *quirk;
-	u32 quirks = 0;
-	int i;
-
-	if (!edid)
-		return 0;
-
-	for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
-		quirk = &edid_quirk_list[i];
-		if (memcmp(quirk->mfg_id, edid->mfg_id,
-			   sizeof(edid->mfg_id)) == 0 &&
-		    memcmp(quirk->prod_id, edid->prod_code,
-			   sizeof(edid->prod_code)) == 0)
-			quirks |= quirk->quirks;
-	}
-
-	DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
-		      (int)sizeof(edid->mfg_id), edid->mfg_id,
-		      (int)sizeof(edid->prod_code), edid->prod_code, quirks);
-
-	return quirks;
-}
-EXPORT_SYMBOL(drm_dp_get_edid_quirks);
-
 /**
  * drm_dp_read_desc - read sink/branch descriptor from DPCD
  * @aux: DisplayPort AUX channel
@@ -2596,3 +2547,538 @@ void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
 #undef DP_SDP_LOG
 }
 EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+
+/**
+ * drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns maximum frl bandwidth supported by PCON in GBPS,
+ * returns 0 if not supported.
+ */
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			       const u8 port_cap[4])
+{
+	int bw;
+	u8 buf;
+
+	buf = port_cap[2];
+	bw = buf & DP_PCON_MAX_FRL_BW;
+
+	switch (bw) {
+	case DP_PCON_MAX_9GBPS:
+		return 9;
+	case DP_PCON_MAX_18GBPS:
+		return 18;
+	case DP_PCON_MAX_24GBPS:
+		return 24;
+	case DP_PCON_MAX_32GBPS:
+		return 32;
+	case DP_PCON_MAX_40GBPS:
+		return 40;
+	case DP_PCON_MAX_48GBPS:
+		return 48;
+	case DP_PCON_MAX_0GBPS:
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
+
+/**
+ * drm_dp_pcon_frl_prepare() - Prepare PCON for FRL.
+ * @aux: DisplayPort AUX channel
+ * @enable_frl_ready_hpd: Configure DP_PCON_ENABLE_HPD_READY.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
+{
+	int ret;
+	u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
+		 DP_PCON_ENABLE_LINK_FRL_MODE;
+
+	if (enable_frl_ready_hpd)
+		buf |= DP_PCON_ENABLE_HPD_READY;
+
+	ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
+
+/**
+ * drm_dp_pcon_is_frl_ready() - Is PCON ready for FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if success, else returns false.
+ */
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
+{
+	int ret;
+	u8 buf;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+	if (ret < 0)
+		return false;
+
+	if (buf & DP_PCON_FRL_READY)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_is_frl_ready);
+
+/**
+ * drm_dp_pcon_frl_configure_1() - Set HDMI LINK Configuration-Step1
+ * @aux: DisplayPort AUX channel
+ * @max_frl_gbps: maximum frl bw to be configured between PCON and HDMI sink
+ * @concurrent_mode: true if concurrent mode or operation is required,
+ * false otherwise.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+				bool concurrent_mode)
+{
+	int ret;
+	u8 buf;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+	if (ret < 0)
+		return ret;
+
+	if (concurrent_mode)
+		buf |= DP_PCON_ENABLE_CONCURRENT_LINK;
+	else
+		buf &= ~DP_PCON_ENABLE_CONCURRENT_LINK;
+
+	switch (max_frl_gbps) {
+	case 9:
+		buf |=  DP_PCON_ENABLE_MAX_BW_9GBPS;
+		break;
+	case 18:
+		buf |=  DP_PCON_ENABLE_MAX_BW_18GBPS;
+		break;
+	case 24:
+		buf |=  DP_PCON_ENABLE_MAX_BW_24GBPS;
+		break;
+	case 32:
+		buf |=  DP_PCON_ENABLE_MAX_BW_32GBPS;
+		break;
+	case 40:
+		buf |=  DP_PCON_ENABLE_MAX_BW_40GBPS;
+		break;
+	case 48:
+		buf |=  DP_PCON_ENABLE_MAX_BW_48GBPS;
+		break;
+	case 0:
+		buf |=  DP_PCON_ENABLE_MAX_BW_0GBPS;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
+
+/**
+ * drm_dp_pcon_frl_configure_2() - Set HDMI Link configuration Step-2
+ * @aux: DisplayPort AUX channel
+ * @max_frl_mask : Max FRL BW to be tried by the PCON with HDMI Sink
+ * @extended_train_mode : true for Extended Mode, false for Normal Mode.
+ * In Normal mode, the PCON tries each frl bw from the max_frl_mask starting
+ * from min, and stops when link training is successful. In Extended mode, all
+ * frl bw selected in the mask are trained by the PCON.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+				bool extended_train_mode)
+{
+	int ret;
+	u8 buf = max_frl_mask;
+
+	if (extended_train_mode)
+		buf |= DP_PCON_FRL_LINK_TRAIN_EXTENDED;
+
+	ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
+
+/**
+ * drm_dp_pcon_reset_frl_config() - Re-Set HDMI Link configuration.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
+{
+	int ret;
+
+	ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
+
+/**
+ * drm_dp_pcon_frl_enable() - Enable HDMI link through FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
+{
+	int ret;
+	u8 buf = 0;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+	if (ret < 0)
+		return ret;
+	if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
+		DRM_DEBUG_KMS("PCON in Autonomous mode, can't enable FRL\n");
+		return -EINVAL;
+	}
+	buf |= DP_PCON_ENABLE_HDMI_LINK;
+	ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
+
+/**
+ * drm_dp_pcon_hdmi_link_active() - check if the PCON HDMI LINK status is active.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if link is active else returns false.
+ */
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
+{
+	u8 buf;
+	int ret;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+	if (ret < 0)
+		return false;
+
+	return buf & DP_PCON_HDMI_TX_LINK_ACTIVE;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_active);
+
+/**
+ * drm_dp_pcon_hdmi_link_mode() - get the PCON HDMI LINK MODE
+ * @aux: DisplayPort AUX channel
+ * @frl_trained_mask: pointer to store bitmask of the trained bw configuration.
+ * Valid only if the MODE returned is FRL. For Normal Link training mode
+ * only 1 of the bits will be set, but in case of Extended mode, more than
+ * one bits can be set.
+ *
+ * Returns the link mode : TMDS or FRL on success, else returns negative error
+ * code.
+ */
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
+{
+	u8 buf;
+	int mode;
+	int ret;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+	if (ret < 0)
+		return ret;
+
+	mode = buf & DP_PCON_HDMI_LINK_MODE;
+
+	if (frl_trained_mask && DP_PCON_HDMI_MODE_FRL == mode)
+		*frl_trained_mask = (buf & DP_PCON_HDMI_FRL_TRAINED_BW) >> 1;
+
+	return mode;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_mode);
+
+/**
+ * drm_dp_pcon_hdmi_frl_link_error_count() - print the error count per lane
+ * during link failure between PCON and HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @connector: DRM connector
+ * code.
+ **/
+
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+					   struct drm_connector *connector)
+{
+	u8 buf, error_count;
+	int i, num_error;
+	struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+	for (i = 0; i < hdmi->max_lanes; i++) {
+		if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+			return;
+
+		error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
+		switch (error_count) {
+		case DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS:
+			num_error = 100;
+			break;
+		case DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS:
+			num_error = 10;
+			break;
+		case DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS:
+			num_error = 3;
+			break;
+		default:
+			num_error = 0;
+		}
+
+		DRM_ERROR("More than %d errors since the last read for lane %d", num_error, i);
+	}
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count);
+
+/*
+ * drm_dp_pcon_enc_is_dsc_1_2 - Does PCON Encoder supports DSC 1.2
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns true is PCON encoder is DSC 1.2 else returns false.
+ */
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+	u8 buf;
+	u8 major_v, minor_v;
+
+	buf = pcon_dsc_dpcd[DP_PCON_DSC_VERSION - DP_PCON_DSC_ENCODER];
+	major_v = (buf & DP_PCON_DSC_MAJOR_MASK) >> DP_PCON_DSC_MAJOR_SHIFT;
+	minor_v = (buf & DP_PCON_DSC_MINOR_MASK) >> DP_PCON_DSC_MINOR_SHIFT;
+
+	if (major_v == 1 && minor_v == 2)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_enc_is_dsc_1_2);
+
+/*
+ * drm_dp_pcon_dsc_max_slices - Get max slices supported by PCON DSC Encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum no. of slices supported by the PCON DSC Encoder.
+ */
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+	u8 slice_cap1, slice_cap2;
+
+	slice_cap1 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_1 - DP_PCON_DSC_ENCODER];
+	slice_cap2 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_2 - DP_PCON_DSC_ENCODER];
+
+	if (slice_cap2 & DP_PCON_DSC_24_PER_DSC_ENC)
+		return 24;
+	if (slice_cap2 & DP_PCON_DSC_20_PER_DSC_ENC)
+		return 20;
+	if (slice_cap2 & DP_PCON_DSC_16_PER_DSC_ENC)
+		return 16;
+	if (slice_cap1 & DP_PCON_DSC_12_PER_DSC_ENC)
+		return 12;
+	if (slice_cap1 & DP_PCON_DSC_10_PER_DSC_ENC)
+		return 10;
+	if (slice_cap1 & DP_PCON_DSC_8_PER_DSC_ENC)
+		return 8;
+	if (slice_cap1 & DP_PCON_DSC_6_PER_DSC_ENC)
+		return 6;
+	if (slice_cap1 & DP_PCON_DSC_4_PER_DSC_ENC)
+		return 4;
+	if (slice_cap1 & DP_PCON_DSC_2_PER_DSC_ENC)
+		return 2;
+	if (slice_cap1 & DP_PCON_DSC_1_PER_DSC_ENC)
+		return 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slices);
+
+/*
+ * drm_dp_pcon_dsc_max_slice_width() - Get max slice width for Pcon DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum width of the slices in pixel width i.e. no. of pixels x 320.
+ */
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+	u8 buf;
+
+	buf = pcon_dsc_dpcd[DP_PCON_DSC_MAX_SLICE_WIDTH - DP_PCON_DSC_ENCODER];
+
+	return buf * DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slice_width);
+
+/*
+ * drm_dp_pcon_dsc_bpp_incr() - Get bits per pixel increment for PCON DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns the bpp precision supported by the PCON encoder.
+ */
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+	u8 buf;
+
+	buf = pcon_dsc_dpcd[DP_PCON_DSC_BPP_INCR - DP_PCON_DSC_ENCODER];
+
+	switch (buf & DP_PCON_DSC_BPP_INCR_MASK) {
+	case DP_PCON_DSC_ONE_16TH_BPP:
+		return 16;
+	case DP_PCON_DSC_ONE_8TH_BPP:
+		return 8;
+	case DP_PCON_DSC_ONE_4TH_BPP:
+		return 4;
+	case DP_PCON_DSC_ONE_HALF_BPP:
+		return 2;
+	case DP_PCON_DSC_ONE_BPP:
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_bpp_incr);
+
+static
+int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
+{
+	u8 buf;
+	int ret;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+	if (ret < 0)
+		return ret;
+
+	buf |= DP_PCON_ENABLE_DSC_ENCODER;
+
+	if (pps_buf_config <= DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER) {
+		buf &= ~DP_PCON_ENCODER_PPS_OVERRIDE_MASK;
+		buf |= pps_buf_config << 2;
+	}
+
+	ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * drm_dp_pcon_pps_default() - Let PCON fill the default pps parameters
+ * for DSC1.2 between PCON & HDMI2.1 sink
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
+{
+	int ret;
+
+	ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_default);
+
+/**
+ * drm_dp_pcon_pps_override_buf() - Configure PPS encoder override buffer for
+ * HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @pps_buf: 128 bytes to be written into PPS buffer for HDMI sink by PCON.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
+{
+	int ret;
+
+	ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
+
+/*
+ * drm_dp_pcon_pps_override_param() - Write PPS parameters to DSC encoder
+ * override registers
+ * @aux: DisplayPort AUX channel
+ * @pps_param: 3 Parameters (2 Bytes each) : Slice Width, Slice Height,
+ * bits_per_pixel.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
+{
+	int ret;
+
+	ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+	if (ret < 0)
+		return ret;
+	ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+	if (ret < 0)
+		return ret;
+	ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
+
+/*
+ * drm_dp_pcon_convert_rgb_to_ycbcr() - Configure the PCon to convert RGB to Ycbcr
+ * @aux: displayPort AUX channel
+ * @color_spc: Color-space/s for which conversion is to be enabled, 0 for disable.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
+{
+	int ret;
+	u8 buf;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+	if (ret < 0)
+		return ret;
+
+	if (color_spc & DP_CONVERSION_RGB_YCBCR_MASK)
+		buf |= (color_spc & DP_CONVERSION_RGB_YCBCR_MASK);
+	else
+		buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
+
+	ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b11c0522a4410676c8ef6180a98ea984ac0687e2..309afe61afdd55616e8f943b4c81de8d34188e50 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2302,7 +2302,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
 	}
 
 	if (port->pdt != DP_PEER_DEVICE_NONE &&
-	    drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+	    drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
+	    port->port_num >= DP_MST_LOGICAL_PORT_0) {
 		port->cached_edid = drm_get_edid(port->connector,
 						 &port->aux.ddc);
 		drm_connector_set_tile_property(port->connector);
@@ -2751,7 +2752,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
 	drm_dp_mst_topology_put_mstb(mstb);
 
 	mutex_unlock(&mgr->probe_lock);
-	if (ret)
+	if (ret > 0)
 		drm_kms_helper_hotplug_event(dev);
 }
 
@@ -5837,8 +5838,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
 		return NULL;
 
-	if (drm_dp_has_quirk(&desc, 0,
-			     DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
 	    port->parent == port->mgr->mst_primary) {
 		u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 734303802bc3989ac7f3b88f7111d74bf7c72881..20d22e41d7ce74d7f7b0698d271dabd64f7ce488 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -469,6 +469,9 @@ void drm_dev_unplug(struct drm_device *dev)
 	synchronize_srcu(&drm_unplug_srcu);
 
 	drm_dev_unregister(dev);
+
+	/* Clear all CPU mappings pointing to this device */
+	unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
@@ -589,11 +592,7 @@ static int drm_dev_init(struct drm_device *dev,
 
 	kref_init(&dev->ref);
 	dev->dev = get_device(parent);
-#ifdef CONFIG_DRM_LEGACY
-	dev->driver = (struct drm_driver *)driver;
-#else
 	dev->driver = driver;
-#endif
 
 	INIT_LIST_HEAD(&dev->managed.resources);
 	spin_lock_init(&dev->managed.lock);
@@ -675,11 +674,8 @@ static int devm_drm_dev_init(struct device *parent,
 	if (ret)
 		return ret;
 
-	ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
-	if (ret)
-		devm_drm_dev_init_release(dev);
-
-	return ret;
+	return devm_add_action_or_reset(parent,
+					devm_drm_dev_init_release, dev);
 }
 
 void *__devm_drm_dev_alloc(struct device *parent,
@@ -897,8 +893,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_modeset_register_all(dev);
 
-	ret = 0;
-
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver->name, driver->major, driver->minor,
 		 driver->patchlevel, driver->date,
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 4a475d9696ff39dae1ace772c0af1f27897a07cc..ff602f7ec65b1e722458a562abf99a138b59b755 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -49,6 +49,33 @@ void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
 }
 EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
 
+/**
+ * drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes
+ * @rc_buffer_block_size: block size code, according to DPCD offset 62h
+ * @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h
+ *
+ * return:
+ * buffer size in bytes, or 0 on invalid input
+ */
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size)
+{
+	int size = 1024 * (rc_buffer_size + 1);
+
+	switch (rc_buffer_block_size) {
+	case DP_DSC_RC_BUF_BLK_SIZE_1:
+		return 1 * size;
+	case DP_DSC_RC_BUF_BLK_SIZE_4:
+		return 4 * size;
+	case DP_DSC_RC_BUF_BLK_SIZE_16:
+		return 16 * size;
+	case DP_DSC_RC_BUF_BLK_SIZE_64:
+		return 64 * size;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size);
+
 /**
  * drm_dsc_pps_payload_pack() - Populates the DSC PPS
  *
@@ -186,8 +213,7 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
 	pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
 
 	/* PPS 38, 39 */
-	pps_payload->rc_model_size =
-		cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+	pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size);
 
 	/* PPS 40 */
 	pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index d18a740fe0f181f076f33360d321bf7c10ed8df2..ad17fa21cebb514b16212bd092a44df5497e5c3a 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -29,6 +29,7 @@
 #include <drm/drm_mode.h>
 
 #include "drm_crtc_internal.h"
+#include "drm_internal.h"
 
 /**
  * DOC: overview
@@ -46,9 +47,10 @@
  * KMS frame buffers.
  *
  * To support dumb objects drivers must implement the &drm_driver.dumb_create
- * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if
- * not set and &drm_driver.dumb_map_offset defaults to
- * drm_gem_dumb_map_offset(). See the callbacks for further details.
+ * and &drm_driver.dumb_map_offset operations (the latter defaults to
+ * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles
+ * additionally need to implement the &drm_driver.dumb_destroy operation. See
+ * the callbacks for further details.
  *
  * Note that dumb objects may not be used for gpu acceleration, as has been
  * attempted on some ARM embedded platforms. Such drivers really must have
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e95cce8e736dc7090a6091046aae53871e92a27b..c2bbe7bee7b658a911decaab5e0d6c23ca9565cc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -32,6 +32,7 @@
 #include <linux/i2c.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/vga_switcheroo.h>
 
@@ -2075,9 +2076,13 @@ EXPORT_SYMBOL(drm_get_edid);
 struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
 				     struct i2c_adapter *adapter)
 {
-	struct pci_dev *pdev = connector->dev->pdev;
+	struct drm_device *dev = connector->dev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct edid *edid;
 
+	if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev)))
+		return NULL;
+
 	vga_switcheroo_lock_ddc(pdev);
 	edid = drm_get_edid(connector, adapter);
 	vga_switcheroo_unlock_ddc(pdev);
@@ -4851,6 +4856,41 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
 		info->rgb_quant_range_selectable = true;
 }
 
+static
+void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
+{
+	switch (max_frl_rate) {
+	case 1:
+		*max_lanes = 3;
+		*max_rate_per_lane = 3;
+		break;
+	case 2:
+		*max_lanes = 3;
+		*max_rate_per_lane = 6;
+		break;
+	case 3:
+		*max_lanes = 4;
+		*max_rate_per_lane = 6;
+		break;
+	case 4:
+		*max_lanes = 4;
+		*max_rate_per_lane = 8;
+		break;
+	case 5:
+		*max_lanes = 4;
+		*max_rate_per_lane = 10;
+		break;
+	case 6:
+		*max_lanes = 4;
+		*max_rate_per_lane = 12;
+		break;
+	case 0:
+	default:
+		*max_lanes = 0;
+		*max_rate_per_lane = 0;
+	}
+}
+
 static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
 					       const u8 *db)
 {
@@ -4904,6 +4944,74 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
 		}
 	}
 
+	if (hf_vsdb[7]) {
+		u8 max_frl_rate;
+		u8 dsc_max_frl_rate;
+		u8 dsc_max_slices;
+		struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
+
+		DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
+		max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
+		drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
+				     &hdmi->max_frl_rate_per_lane);
+		hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2;
+
+		if (hdmi_dsc->v_1p2) {
+			hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420;
+			hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP;
+
+			if (hf_vsdb[11] & DRM_EDID_DSC_16BPC)
+				hdmi_dsc->bpc_supported = 16;
+			else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC)
+				hdmi_dsc->bpc_supported = 12;
+			else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC)
+				hdmi_dsc->bpc_supported = 10;
+			else
+				hdmi_dsc->bpc_supported = 0;
+
+			dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+			drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+					     &hdmi_dsc->max_frl_rate_per_lane);
+			hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
+
+			dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES;
+			switch (dsc_max_slices) {
+			case 1:
+				hdmi_dsc->max_slices = 1;
+				hdmi_dsc->clk_per_slice = 340;
+				break;
+			case 2:
+				hdmi_dsc->max_slices = 2;
+				hdmi_dsc->clk_per_slice = 340;
+				break;
+			case 3:
+				hdmi_dsc->max_slices = 4;
+				hdmi_dsc->clk_per_slice = 340;
+				break;
+			case 4:
+				hdmi_dsc->max_slices = 8;
+				hdmi_dsc->clk_per_slice = 340;
+				break;
+			case 5:
+				hdmi_dsc->max_slices = 8;
+				hdmi_dsc->clk_per_slice = 400;
+				break;
+			case 6:
+				hdmi_dsc->max_slices = 12;
+				hdmi_dsc->clk_per_slice = 400;
+				break;
+			case 7:
+				hdmi_dsc->max_slices = 16;
+				hdmi_dsc->clk_per_slice = 400;
+				break;
+			case 0:
+			default:
+				hdmi_dsc->max_slices = 0;
+				hdmi_dsc->clk_per_slice = 0;
+			}
+		}
+	}
+
 	drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
 }
 
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index e555281f43d4b4daf5919e61ccf1549fca90f1d6..72e982323a5e5b02081a6ba396e421d06bc2c20d 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -26,6 +26,7 @@
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
 
 #include "drm_crtc_internal.h"
 
@@ -72,7 +73,7 @@ int drm_encoder_register_all(struct drm_device *dev)
 	int ret = 0;
 
 	drm_for_each_encoder(encoder, dev) {
-		if (encoder->funcs->late_register)
+		if (encoder->funcs && encoder->funcs->late_register)
 			ret = encoder->funcs->late_register(encoder);
 		if (ret)
 			return ret;
@@ -86,30 +87,16 @@ void drm_encoder_unregister_all(struct drm_device *dev)
 	struct drm_encoder *encoder;
 
 	drm_for_each_encoder(encoder, dev) {
-		if (encoder->funcs->early_unregister)
+		if (encoder->funcs && encoder->funcs->early_unregister)
 			encoder->funcs->early_unregister(encoder);
 	}
 }
 
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be subclassed as part of
- * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
- * called from the driver's &drm_encoder_funcs.destroy hook.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
-		     struct drm_encoder *encoder,
-		     const struct drm_encoder_funcs *funcs,
-		     int encoder_type, const char *name, ...)
+__printf(5, 0)
+static int __drm_encoder_init(struct drm_device *dev,
+			      struct drm_encoder *encoder,
+			      const struct drm_encoder_funcs *funcs,
+			      int encoder_type, const char *name, va_list ap)
 {
 	int ret;
 
@@ -125,11 +112,7 @@ int drm_encoder_init(struct drm_device *dev,
 	encoder->encoder_type = encoder_type;
 	encoder->funcs = funcs;
 	if (name) {
-		va_list ap;
-
-		va_start(ap, name);
 		encoder->name = kvasprintf(GFP_KERNEL, name, ap);
-		va_end(ap);
 	} else {
 		encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
 					  drm_encoder_enum_list[encoder_type].name,
@@ -150,6 +133,44 @@ int drm_encoder_init(struct drm_device *dev,
 
 	return ret;
 }
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initializes a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time the driver's
+ * &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree()
+ * the encoder structure. The encoder structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to
+ * let the DRM managed resource infrastructure take care of cleanup and
+ * deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+		     struct drm_encoder *encoder,
+		     const struct drm_encoder_funcs *funcs,
+		     int encoder_type, const char *name, ...)
+{
+	va_list ap;
+	int ret;
+
+	WARN_ON(!funcs->destroy);
+
+	va_start(ap, name);
+	ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+	va_end(ap);
+
+	return ret;
+}
 EXPORT_SYMBOL(drm_encoder_init);
 
 /**
@@ -181,6 +202,48 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
 
+static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
+{
+	struct drm_encoder *encoder = ptr;
+
+	if (WARN_ON(!encoder->dev))
+		return;
+
+	drm_encoder_cleanup(encoder);
+}
+
+void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
+			   const struct drm_encoder_funcs *funcs,
+			   int encoder_type, const char *name, ...)
+{
+	void *container;
+	struct drm_encoder *encoder;
+	va_list ap;
+	int ret;
+
+	if (WARN_ON(funcs && funcs->destroy))
+		return ERR_PTR(-EINVAL);
+
+	container = drmm_kzalloc(dev, size, GFP_KERNEL);
+	if (!container)
+		return ERR_PTR(-EINVAL);
+
+	encoder = container + offset;
+
+	va_start(ap, name);
+	ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+	va_end(ap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return container;
+}
+EXPORT_SYMBOL(__drmm_encoder_alloc);
+
 static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
 {
 	struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b81195106875d97abf38310ab4f8b4105ebfcd1..a44c3a4380592746ccb57ff33f68a1d3c2e797a6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -946,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
 	drm_modeset_lock_all(fb_helper->dev);
 	drm_client_for_each_modeset(modeset, &fb_helper->client) {
 		crtc = modeset->crtc;
-		if (!crtc->funcs->gamma_set || !crtc->gamma_size)
-			return -EINVAL;
+		if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
+			ret = -EINVAL;
+			goto out;
+		}
 
-		if (cmap->start + cmap->len > crtc->gamma_size)
-			return -EINVAL;
+		if (cmap->start + cmap->len > crtc->gamma_size) {
+			ret = -EINVAL;
+			goto out;
+		}
 
 		r = crtc->gamma_store;
 		g = r + crtc->gamma_size;
@@ -963,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
 		ret = crtc->funcs->gamma_set(crtc, r, g, b,
 					     crtc->gamma_size, NULL);
 		if (ret)
-			return ret;
+			goto out;
 	}
+out:
 	drm_modeset_unlock_all(fb_helper->dev);
 
 	return ret;
@@ -1054,6 +1059,11 @@ static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info)
 			goto out_state;
 		}
 
+		/*
+		 * FIXME: This always uses gamma_lut. Some HW have only
+		 * degamma_lut, in which case we should reset gamma_lut and set
+		 * degamma_lut. See drm_crtc_legacy_gamma_set().
+		 */
 		replaced  = drm_property_replace_blob(&crtc_state->degamma_lut,
 						      NULL);
 		replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
@@ -2486,6 +2496,11 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
 		return;
 	}
 
+	/*
+	 * FIXME: This mixes up depth with bpp, which results in a glorious
+	 * mess, resulting in some drivers picking wrong fbdev defaults and
+	 * others wrong preferred_depth defaults.
+	 */
 	if (!preferred_bpp)
 		preferred_bpp = dev->mode_config.preferred_depth;
 	if (!preferred_bpp)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b50380fa80ce83389172cf4f3a053299aaaf46cc..6b116bfd747c3ccd031d7805387a742634469cd7 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -113,8 +113,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
  * The memory mapping implementation will vary depending on how the driver
  * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
  * function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
- * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
  *
  * No other file operations are supported by the DRM userspace API. Overall the
  * following is an example &file_operations structure::
@@ -240,9 +239,6 @@ static void drm_events_release(struct drm_file *file_priv)
  * before calling this.
  *
  * If NULL is passed, this is a no-op.
- *
- * RETURNS:
- * 0 on success, or error code on failure.
  */
 void drm_file_free(struct drm_file *file)
 {
@@ -371,6 +367,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 	list_add(&priv->lhead, &dev->filelist);
 	mutex_unlock(&dev->filelist_mutex);
 
+#ifdef CONFIG_DRM_LEGACY
 #ifdef __alpha__
 	/*
 	 * Default the hose
@@ -390,6 +387,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 				dev->hose = b->sysdata;
 		}
 	}
+#endif
 #endif
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 92f89cee213e8290a9a703dc0919d6424d227526..c2ce78c4edc3ee287e346c088e8bfbc1c440392d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -335,22 +335,12 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
 
-/**
- * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
- * @file: drm file-private structure to remove the dumb handle from
- * @dev: corresponding drm_device
- * @handle: the dumb handle to remove
- *
- * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
- * which use gem to manage their backing storage.
- */
 int drm_gem_dumb_destroy(struct drm_file *file,
 			 struct drm_device *dev,
-			 uint32_t handle)
+			 u32 handle)
 {
 	return drm_gem_handle_delete(file, handle);
 }
-EXPORT_SYMBOL(drm_gem_dumb_destroy);
 
 /**
  * drm_gem_handle_create_tail - internal functions to create a handle
@@ -1078,20 +1068,17 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 	drm_gem_object_get(obj);
 
 	vma->vm_private_data = obj;
+	vma->vm_ops = obj->funcs->vm_ops;
 
 	if (obj->funcs->mmap) {
 		ret = obj->funcs->mmap(obj, vma);
-		if (ret) {
-			drm_gem_object_put(obj);
-			return ret;
-		}
+		if (ret)
+			goto err_drm_gem_object_put;
 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
 	} else {
-		if (obj->funcs->vm_ops)
-			vma->vm_ops = obj->funcs->vm_ops;
-		else {
-			drm_gem_object_put(obj);
-			return -EINVAL;
+		if (!vma->vm_ops) {
+			ret = -EINVAL;
+			goto err_drm_gem_object_put;
 		}
 
 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
@@ -1100,6 +1087,10 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 	}
 
 	return 0;
+
+err_drm_gem_object_put:
+	drm_gem_object_put(obj);
+	return ret;
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
 
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 4d5c1d86b0222d2c42e77342454f8f016a10e5aa..7942cf05cd93825d6778d98f22ebb959eb9977fe 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -36,8 +36,9 @@
 static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
 	.free = drm_gem_cma_free_object,
 	.print_info = drm_gem_cma_print_info,
-	.get_sg_table = drm_gem_cma_prime_get_sg_table,
-	.vmap = drm_gem_cma_prime_vmap,
+	.get_sg_table = drm_gem_cma_get_sg_table,
+	.vmap = drm_gem_cma_vmap,
+	.mmap = drm_gem_cma_mmap,
 	.vm_ops = &drm_gem_cma_vm_ops,
 };
 
@@ -277,62 +278,6 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = {
 };
 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
 
-static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
-				struct vm_area_struct *vma)
-{
-	int ret;
-
-	/*
-	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
-	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
-	 * the whole buffer.
-	 */
-	vma->vm_flags &= ~VM_PFNMAP;
-	vma->vm_pgoff = 0;
-
-	ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
-			  cma_obj->paddr, vma->vm_end - vma->vm_start);
-	if (ret)
-		drm_gem_vm_close(vma);
-
-	return ret;
-}
-
-/**
- * drm_gem_cma_mmap - memory-map a CMA GEM object
- * @filp: file object
- * @vma: VMA for the area to be mapped
- *
- * This function implements an augmented version of the GEM DRM file mmap
- * operation for CMA objects: In addition to the usual GEM VMA setup it
- * immediately faults in the entire object instead of using on-demaind
- * faulting. Drivers which employ the CMA helpers should use this function
- * as their ->mmap() handler in the DRM device file's file_operations
- * structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_CMA_FOPS().macro.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-	struct drm_gem_cma_object *cma_obj;
-	struct drm_gem_object *gem_obj;
-	int ret;
-
-	ret = drm_gem_mmap(filp, vma);
-	if (ret)
-		return ret;
-
-	gem_obj = vma->vm_private_data;
-	cma_obj = to_drm_gem_cma_obj(gem_obj);
-
-	return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-
 #ifndef CONFIG_MMU
 /**
  * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
@@ -424,18 +369,18 @@ void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
 EXPORT_SYMBOL(drm_gem_cma_print_info);
 
 /**
- * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
  *     pages for a CMA GEM object
  * @obj: GEM object
  *
- * This function exports a scatter/gather table suitable for PRIME usage by
+ * This function exports a scatter/gather table by
  * calling the standard DMA mapping API. Drivers using the CMA helpers should
  * set this as their &drm_gem_object_funcs.get_sg_table callback.
  *
  * Returns:
  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  */
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
 {
 	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
 	struct sg_table *sgt;
@@ -456,7 +401,7 @@ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
 	kfree(sgt);
 	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
 
 /**
  * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
@@ -501,40 +446,13 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
 
 /**
- * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
- * @vma: VMA for the area to be mapped
- *
- * This function maps a buffer imported via DRM PRIME into a userspace
- * process's address space. Drivers that use the CMA helpers should set this
- * as their &drm_driver.gem_prime_mmap callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
-			   struct vm_area_struct *vma)
-{
-	struct drm_gem_cma_object *cma_obj;
-	int ret;
-
-	ret = drm_gem_mmap_obj(obj, obj->size, vma);
-	if (ret < 0)
-		return ret;
-
-	cma_obj = to_drm_gem_cma_obj(obj);
-	return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
-
-/**
- * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
  *     address space
  * @obj: GEM object
  * @map: Returns the kernel virtual address of the CMA GEM object's backing
  *       store.
  *
- * This function maps a buffer exported via DRM PRIME into the kernel's
+ * This function maps a buffer into the kernel's
  * virtual address space. Since the CMA buffers are already mapped into the
  * kernel virtual address space this simply returns the cached virtual
  * address. Drivers using the CMA helpers should set this as their DRM
@@ -543,7 +461,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  * Returns:
  * 0 on success, or a negative error code otherwise.
  */
-int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
 {
 	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
 
@@ -551,7 +469,44 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+
+/**
+ * drm_gem_cma_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer into a userspace process's address space.
+ * In addition to the usual GEM VMA setup it immediately faults in the entire
+ * object instead of using on-demand faulting. Drivers that use the CMA
+ * helpers should set this as their &drm_gem_object_funcs.mmap callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	struct drm_gem_cma_object *cma_obj;
+	int ret;
+
+	/*
+	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+	 * the whole buffer.
+	 */
+	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+	vma->vm_flags &= ~VM_PFNMAP;
+
+	cma_obj = to_drm_gem_cma_obj(obj);
+
+	ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+			  cma_obj->paddr, vma->vm_end - vma->vm_start);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 
 /**
  * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 81d386b5b92a03902e13de829a687e513d6ed1ab..fad2249ee67b2810d9e12cd0de3933c7ab7245ad 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -191,6 +191,9 @@ void drm_gem_unpin(struct drm_gem_object *obj);
 int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
 void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
 
+int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+			 u32 handle);
+
 /* drm_debugfs.c drm_debugfs_crc.c */
 #if defined(CONFIG_DEBUG_FS)
 int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09d6e9e2e07515240a230097bd3b1f6b0cf2a02c..c3bd664ea733fe3311be5de77f95de2816392148 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -122,7 +122,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
 		dev->driver->irq_preinstall(dev);
 
 	/* PCI devices require shared interrupts. */
-	if (dev->pdev)
+	if (dev_is_pci(dev->dev))
 		sh_flags = IRQF_SHARED;
 
 	ret = request_irq(irq, dev->driver->irq_handler,
@@ -140,7 +140,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
 	if (ret < 0) {
 		dev->irq_enabled = false;
 		if (drm_core_check_feature(dev, DRIVER_LEGACY))
-			vga_client_register(dev->pdev, NULL, NULL, NULL);
+			vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
 		free_irq(irq, dev);
 	} else {
 		dev->irq = irq;
@@ -203,7 +203,7 @@ int drm_irq_uninstall(struct drm_device *dev)
 	DRM_DEBUG("irq=%d\n", dev->irq);
 
 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		vga_client_register(dev->pdev, NULL, NULL, NULL);
+		vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
 
 	if (dev->driver->irq_uninstall)
 		dev->driver->irq_uninstall(dev);
@@ -214,12 +214,45 @@ int drm_irq_uninstall(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_irq_uninstall);
 
+static void devm_drm_irq_uninstall(void *data)
+{
+	drm_irq_uninstall(data);
+}
+
+/**
+ * devm_drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
+ *
+ * devm_drm_irq_install is a  help function of drm_irq_install.
+ *
+ * if the driver uses devm_drm_irq_install, there is no need
+ * to call drm_irq_uninstall when the drm module get unloaded,
+ * as this will done automagically.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+int devm_drm_irq_install(struct drm_device *dev, int irq)
+{
+	int ret;
+
+	ret = drm_irq_install(dev, irq);
+	if (ret)
+		return ret;
+
+	return devm_add_action_or_reset(dev->dev,
+					devm_drm_irq_uninstall, dev);
+}
+EXPORT_SYMBOL(devm_drm_irq_install);
+
 #if IS_ENABLED(CONFIG_DRM_LEGACY)
 int drm_legacy_irq_control(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
 	struct drm_control *ctl = data;
 	int ret = 0, irq;
+	struct pci_dev *pdev;
 
 	/* if we haven't irq we fallback for compatibility reasons -
 	 * this used to be a separate function in drm_dma.h
@@ -230,12 +263,13 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return 0;
 	/* UMS was only ever supported on pci devices. */
-	if (WARN_ON(!dev->pdev))
+	if (WARN_ON(!dev_is_pci(dev->dev)))
 		return -EINVAL;
 
 	switch (ctl->func) {
 	case DRM_INST_HANDLER:
-		irq = dev->pdev->irq;
+		pdev = to_pci_dev(dev->dev);
+		irq = pdev->irq;
 
 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
 		    ctl->irq != irq)
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 1be3ea32047495379f2468854972c968136699a4..f71358f9eac94295317396053e37a7f10c48af8b 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -127,7 +127,7 @@ static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
 static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
 #endif
 
-#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
 void drm_legacy_vma_flush(struct drm_device *d);
 #else
 static inline void drm_legacy_vma_flush(struct drm_device *d)
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index fbea69d6f90902d93f910a4d93e50ba1ed9e3c61..e4f20a2eb6e793163756a22c1666b5ee894f9f78 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -37,7 +37,6 @@
 #include <linux/highmem.h>
 #include <linux/pci.h>
 #include <linux/vmalloc.h>
-#include <xen/xen.h>
 
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_cache.h>
@@ -100,24 +99,6 @@ static void *agp_remap(unsigned long offset, unsigned long size,
 	return addr;
 }
 
-/** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory *handle, int pages)
-{
-	agp_free_memory(handle);
-}
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory *handle, unsigned int start)
-{
-	return agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory *handle)
-{
-	return agp_unbind_memory(handle);
-}
-
 #else /*  CONFIG_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,
 			      struct drm_device *dev)
@@ -156,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
 		iounmap(map->handle);
 }
 EXPORT_SYMBOL(drm_legacy_ioremapfree);
-
-bool drm_need_swiotlb(int dma_bits)
-{
-	struct resource *tmp;
-	resource_size_t max_iomem = 0;
-
-	/*
-	 * Xen paravirtual hosts require swiotlb regardless of requested dma
-	 * transfer size.
-	 *
-	 * NOTE: Really, what it requires is use of the dma_alloc_coherent
-	 *       allocator used in ttm_dma_populate() instead of
-	 *       ttm_populate_and_map_pages(), which bounce buffers so much in
-	 *       Xen it leads to swiotlb buffer exhaustion.
-	 */
-	if (xen_pv_domain())
-		return true;
-
-	/*
-	 * Enforce dma_alloc_coherent when memory encryption is active as well
-	 * for the same reasons as for Xen paravirtual hosts.
-	 */
-	if (mem_encrypt_active())
-		return true;
-
-	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
-		max_iomem = max(max_iomem,  tmp->end);
-	}
-
-	return max_iomem > ((u64)1 << dma_bits);
-}
-EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index f1affc1bb6799fdce233dbc27b4dd62754465953..37b4b9f0e468ad9a750ebd078637da71dd251db5 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -195,7 +195,7 @@ void drm_mode_config_reset(struct drm_device *dev)
 			crtc->funcs->reset(crtc);
 
 	drm_for_each_encoder(encoder, dev)
-		if (encoder->funcs->reset)
+		if (encoder->funcs && encoder->funcs->reset)
 			encoder->funcs->reset(encoder);
 
 	drm_connector_list_iter_begin(dev, &conn_iter);
@@ -625,6 +625,10 @@ static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
 void drm_mode_config_validate(struct drm_device *dev)
 {
 	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	u32 primary_with_crtc = 0, cursor_with_crtc = 0;
+	unsigned int num_primary = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
@@ -636,4 +640,49 @@ void drm_mode_config_validate(struct drm_device *dev)
 		validate_encoder_possible_clones(encoder);
 		validate_encoder_possible_crtcs(encoder);
 	}
+
+	drm_for_each_crtc(crtc, dev) {
+		WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n",
+		     crtc->base.id, crtc->name);
+
+		WARN(crtc->cursor && crtc->funcs->cursor_set,
+		     "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func",
+		     crtc->base.id, crtc->name);
+		WARN(crtc->cursor && crtc->funcs->cursor_set2,
+		     "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func",
+		     crtc->base.id, crtc->name);
+		WARN(crtc->cursor && crtc->funcs->cursor_move,
+		     "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func",
+		     crtc->base.id, crtc->name);
+
+		if (crtc->primary) {
+			WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)),
+			     "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+			     crtc->primary->base.id, crtc->primary->name,
+			     crtc->base.id, crtc->name);
+			WARN(primary_with_crtc & drm_plane_mask(crtc->primary),
+			     "Primary plane [PLANE:%d:%s] used for multiple CRTCs",
+			     crtc->primary->base.id, crtc->primary->name);
+			primary_with_crtc |= drm_plane_mask(crtc->primary);
+		}
+		if (crtc->cursor) {
+			WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)),
+			     "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+			     crtc->cursor->base.id, crtc->cursor->name,
+			     crtc->base.id, crtc->name);
+			WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor),
+			     "Cursor plane [PLANE:%d:%s] used for multiple CRTCs",
+			     crtc->cursor->base.id, crtc->cursor->name);
+			cursor_with_crtc |= drm_plane_mask(crtc->cursor);
+		}
+	}
+
+	drm_for_each_plane(plane, dev) {
+		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+			num_primary++;
+	}
+
+	WARN(num_primary != dev->mode_config.num_crtc,
+	     "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs",
+	     num_primary, dev->mode_config.num_crtc);
 }
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 33fb2f05ce66291c2de8ad9b1b0d8ab7d43132df..1ac67d4505e073933dd79d9e2ad21cf96f78844c 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
 	if (mode->htotal == 0 || mode->vtotal == 0)
 		return 0;
 
-	num = mode->clock * 1000;
+	num = mode->clock;
 	den = mode->htotal * mode->vtotal;
 
 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
 	if (mode->vscan > 1)
 		den *= mode->vscan;
 
-	return DIV_ROUND_CLOSEST(num, den);
+	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
 }
 EXPORT_SYMBOL(drm_mode_vrefresh);
 
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 6dba4b8ce4fe2fd438f4d09ff870151a5e7255ec..2294a1580d359c5fcce0164ea011052e223092de 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -24,6 +24,8 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 
@@ -36,6 +38,9 @@
 #include "drm_legacy.h"
 
 #ifdef CONFIG_DRM_LEGACY
+/* List of devices hanging off drivers with stealth attach. */
+static LIST_HEAD(legacy_dev_list);
+static DEFINE_MUTEX(legacy_dev_list_lock);
 
 /**
  * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
@@ -65,7 +70,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
 		return NULL;
 
 	dmah->size = size;
-	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+	dmah->vaddr = dma_alloc_coherent(dev->dev, size,
 					 &dmah->busaddr,
 					 GFP_KERNEL);
 
@@ -88,7 +93,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
  */
 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
-	dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+	dma_free_coherent(dev->dev, dmah->size, dmah->vaddr,
 			  dmah->busaddr);
 	kfree(dmah);
 }
@@ -107,16 +112,18 @@ static int drm_get_pci_domain(struct drm_device *dev)
 		return 0;
 #endif /* __alpha__ */
 
-	return pci_domain_nr(dev->pdev->bus);
+	return pci_domain_nr(to_pci_dev(dev->dev)->bus);
 }
 
 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
 	master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
 					drm_get_pci_domain(dev),
-					dev->pdev->bus->number,
-					PCI_SLOT(dev->pdev->devfn),
-					PCI_FUNC(dev->pdev->devfn));
+					pdev->bus->number,
+					PCI_SLOT(pdev->devfn),
+					PCI_FUNC(pdev->devfn));
 	if (!master->unique)
 		return -ENOMEM;
 
@@ -126,12 +133,14 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
 
 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
 	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
-	    (p->busnum & 0xff) != dev->pdev->bus->number ||
-	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+	    (p->busnum & 0xff) != pdev->bus->number ||
+	    p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
 		return -EINVAL;
 
-	p->irq = dev->pdev->irq;
+	p->irq = pdev->irq;
 
 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
 		  p->irq);
@@ -159,7 +168,7 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
 		return -EOPNOTSUPP;
 
 	/* UMS was only ever support on PCI devices. */
-	if (WARN_ON(!dev->pdev))
+	if (WARN_ON(!dev_is_pci(dev->dev)))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
@@ -183,7 +192,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
 static void drm_pci_agp_init(struct drm_device *dev)
 {
 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
-		if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+		if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
 			dev->agp = drm_agp_init(dev);
 		if (dev->agp) {
 			dev->agp->agp_mtrr = arch_phys_wc_add(
@@ -196,7 +205,7 @@ static void drm_pci_agp_init(struct drm_device *dev)
 
 static int drm_get_pci_dev(struct pci_dev *pdev,
 			   const struct pci_device_id *ent,
-			   struct drm_driver *driver)
+			   const struct drm_driver *driver)
 {
 	struct drm_device *dev;
 	int ret;
@@ -225,10 +234,11 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
 	if (ret)
 		goto err_agp;
 
-	/* No locking needed since shadow-attach is single-threaded since it may
-	 * only be called from the per-driver module init hook. */
-	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+	if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
+		mutex_lock(&legacy_dev_list_lock);
+		list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
+		mutex_unlock(&legacy_dev_list_lock);
+	}
 
 	return 0;
 
@@ -249,7 +259,8 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
  *
  * Return: 0 on success or a negative error code on failure.
  */
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+int drm_legacy_pci_init(const struct drm_driver *driver,
+			struct pci_driver *pdriver)
 {
 	struct pci_dev *pdev = NULL;
 	const struct pci_device_id *pid;
@@ -261,7 +272,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
 		return -EINVAL;
 
 	/* If not using KMS, fall back to stealth mode manual scanning. */
-	INIT_LIST_HEAD(&driver->legacy_dev_list);
 	for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
 		pid = &pdriver->id_table[i];
 
@@ -295,7 +305,8 @@ EXPORT_SYMBOL(drm_legacy_pci_init);
  * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
  * is deprecated and only used by dri1 drivers.
  */
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+			 struct pci_driver *pdriver)
 {
 	struct drm_device *dev, *tmp;
 
@@ -304,11 +315,15 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
 	if (!(driver->driver_features & DRIVER_LEGACY)) {
 		WARN_ON(1);
 	} else {
-		list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+		mutex_lock(&legacy_dev_list_lock);
+		list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
 					 legacy_dev_list) {
-			list_del(&dev->legacy_dev_list);
-			drm_put_dev(dev);
+			if (dev->driver == driver) {
+				list_del(&dev->legacy_dev_list);
+				drm_put_dev(dev);
+			}
 		}
+		mutex_unlock(&legacy_dev_list_lock);
 	}
 	DRM_INFO("Module unloaded\n");
 }
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a0cb746bcb0a9a1344e0cd23fa3dcf7a228a9f3b..338650abd267a9ab97c045a8ba1bdc20bef6fe2d 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -30,6 +30,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_vblank.h>
 
 #include "drm_crtc_internal.h"
@@ -40,7 +41,7 @@
  * A plane represents an image source that can be blended with or overlayed on
  * top of a CRTC during the scanout process. Planes take their input data from a
  * &drm_framebuffer object. The plane itself specifies the cropping and scaling
- * of that image, and where it is placed on the visible are of a display
+ * of that image, and where it is placed on the visible area of a display
  * pipeline, represented by &drm_crtc. A plane can also have additional
  * properties that specify how the pixels are positioned and blended, like
  * rotation or Z-position. All these properties are stored in &drm_plane_state.
@@ -49,14 +50,34 @@
  * &struct drm_plane (possibly as part of a larger structure) and registers it
  * with a call to drm_universal_plane_init().
  *
- * Cursor and overlay planes are optional. All drivers should provide one
- * primary plane per CRTC to avoid surprising userspace too much. See enum
- * drm_plane_type for a more in-depth discussion of these special uapi-relevant
- * plane types. Special planes are associated with their CRTC by calling
- * drm_crtc_init_with_planes().
- *
  * The type of a plane is exposed in the immutable "type" enumeration property,
- * which has one of the following values: "Overlay", "Primary", "Cursor".
+ * which has one of the following values: "Overlay", "Primary", "Cursor" (see
+ * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see
+ * &drm_plane.possible_crtcs.
+ *
+ * Each CRTC must have a unique primary plane userspace can attach to enable
+ * the CRTC. In other words, userspace must be able to attach a different
+ * primary plane to each CRTC at the same time. Primary planes can still be
+ * compatible with multiple CRTCs. There must be exactly as many primary planes
+ * as there are CRTCs.
+ *
+ * Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core
+ * relies on the driver to set the primary and optionally the cursor plane used
+ * for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All
+ * drivers must provide one primary plane per CRTC to avoid surprising legacy
+ * userspace too much.
+ */
+
+/**
+ * DOC: standard plane properties
+ *
+ * DRM planes have a few standardized properties:
+ *
+ * IN_FORMATS:
+ *     Blob property which contains the set of buffer format and modifier
+ *     pairs supported by this plane. The blob is a struct
+ *     drm_format_modifier_blob. Without this property the plane doesn't
+ *     support buffers with modifiers. Userspace cannot change this property.
  */
 
 static unsigned int drm_num_planes(struct drm_device *dev)
@@ -152,31 +173,16 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
 	return 0;
 }
 
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @format_modifiers: array of struct drm_format modifiers terminated by
- *                    DRM_FORMAT_MOD_INVALID
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
-			     uint32_t possible_crtcs,
-			     const struct drm_plane_funcs *funcs,
-			     const uint32_t *formats, unsigned int format_count,
-			     const uint64_t *format_modifiers,
-			     enum drm_plane_type type,
-			     const char *name, ...)
+__printf(9, 0)
+static int __drm_universal_plane_init(struct drm_device *dev,
+				      struct drm_plane *plane,
+				      uint32_t possible_crtcs,
+				      const struct drm_plane_funcs *funcs,
+				      const uint32_t *formats,
+				      unsigned int format_count,
+				      const uint64_t *format_modifiers,
+				      enum drm_plane_type type,
+				      const char *name, va_list ap)
 {
 	struct drm_mode_config *config = &dev->mode_config;
 	unsigned int format_modifier_count = 0;
@@ -237,11 +243,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 	}
 
 	if (name) {
-		va_list ap;
-
-		va_start(ap, name);
 		plane->name = kvasprintf(GFP_KERNEL, name, ap);
-		va_end(ap);
 	} else {
 		plane->name = kasprintf(GFP_KERNEL, "plane-%d",
 					drm_num_planes(dev));
@@ -286,8 +288,102 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 
 	return 0;
 }
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ *                    DRM_FORMAT_MOD_INVALID
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook
+ * should call drm_plane_cleanup() and kfree() the plane structure. The plane
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Note: consider using drmm_universal_plane_alloc() instead of
+ * drm_universal_plane_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+			     uint32_t possible_crtcs,
+			     const struct drm_plane_funcs *funcs,
+			     const uint32_t *formats, unsigned int format_count,
+			     const uint64_t *format_modifiers,
+			     enum drm_plane_type type,
+			     const char *name, ...)
+{
+	va_list ap;
+	int ret;
+
+	WARN_ON(!funcs->destroy);
+
+	va_start(ap, name);
+	ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+					 formats, format_count, format_modifiers,
+					 type, name, ap);
+	va_end(ap);
+	return ret;
+}
 EXPORT_SYMBOL(drm_universal_plane_init);
 
+static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr)
+{
+	struct drm_plane *plane = ptr;
+
+	if (WARN_ON(!plane->dev))
+		return;
+
+	drm_plane_cleanup(plane);
+}
+
+void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
+				   size_t offset, uint32_t possible_crtcs,
+				   const struct drm_plane_funcs *funcs,
+				   const uint32_t *formats, unsigned int format_count,
+				   const uint64_t *format_modifiers,
+				   enum drm_plane_type type,
+				   const char *name, ...)
+{
+	void *container;
+	struct drm_plane *plane;
+	va_list ap;
+	int ret;
+
+	if (WARN_ON(!funcs || funcs->destroy))
+		return ERR_PTR(-EINVAL);
+
+	container = drmm_kzalloc(dev, size, GFP_KERNEL);
+	if (!container)
+		return ERR_PTR(-ENOMEM);
+
+	plane = container + offset;
+
+	va_start(ap, name);
+	ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+					 formats, format_count, format_modifiers,
+					 type, name, ap);
+	va_end(ap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release,
+				       plane);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return container;
+}
+EXPORT_SYMBOL(__drmm_universal_plane_alloc);
+
 int drm_plane_register_all(struct drm_device *dev)
 {
 	unsigned int num_planes = 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7db55fce35d837153f427148b9044cd607df3bfc..2a54f86856afef25ebaeff9654926b23d42e579f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -717,6 +717,8 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
 
 	if (obj->funcs && obj->funcs->mmap) {
+		vma->vm_ops = obj->funcs->vm_ops;
+
 		ret = obj->funcs->mmap(obj, vma);
 		if (ret)
 			return ret;
@@ -978,44 +980,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
 EXPORT_SYMBOL(drm_gem_prime_import);
 
 /**
- * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * drm_prime_sg_to_page_array - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the pages in
+ * @max_entries: size of the passed-in array
+ *
+ * Exports an sg table into an array of pages.
+ *
+ * This function is deprecated and strongly discouraged to be used.
+ * The page array is only useful for page faults and those can corrupt fields
+ * in the struct page if they are not handled by the exporting driver.
+ */
+int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
+					    struct page **pages,
+					    int max_entries)
+{
+	struct sg_page_iter page_iter;
+	struct page **p = pages;
+
+	for_each_sgtable_page(sgt, &page_iter, 0) {
+		if (WARN_ON(p - pages >= max_entries))
+			return -1;
+		*p++ = sg_page_iter_page(&page_iter);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_array);
+
+/**
+ * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
  * @sgt: scatter-gather table to convert
- * @pages: optional array of page pointers to store the page array in
- * @addrs: optional array to store the dma bus address of each page
+ * @addrs: array to store the dma bus address of each page
  * @max_entries: size of both the passed-in arrays
  *
- * Exports an sg table into an array of pages and addresses. This is currently
- * required by the TTM driver in order to do correct fault handling.
+ * Exports an sg table into an array of addresses.
  *
- * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
+ * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
  * implementation.
  */
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
-				     dma_addr_t *addrs, int max_entries)
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+				   int max_entries)
 {
 	struct sg_dma_page_iter dma_iter;
-	struct sg_page_iter page_iter;
-	struct page **p = pages;
 	dma_addr_t *a = addrs;
 
-	if (pages) {
-		for_each_sgtable_page(sgt, &page_iter, 0) {
-			if (WARN_ON(p - pages >= max_entries))
-				return -1;
-			*p++ = sg_page_iter_page(&page_iter);
-		}
+	for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+		if (WARN_ON(a - addrs >= max_entries))
+			return -1;
+		*a++ = sg_page_iter_dma_address(&dma_iter);
 	}
-	if (addrs) {
-		for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
-			if (WARN_ON(a - addrs >= max_entries))
-				return -1;
-			*a++ = sg_page_iter_dma_address(&dma_iter);
-		}
-	}
-
 	return 0;
 }
-EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
 
 /**
  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d6017726cc2a0c84005997ed577d599e7f0e2160..ad59a51eab6d897f863ce0ba8dffded5f7266c6d 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -515,7 +515,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 	if (count == 0 && connector->status == connector_status_connected)
 		count = drm_add_override_edid_modes(connector);
 
-	if (count == 0 && connector->status == connector_status_connected)
+	if (count == 0 && (connector->status == connector_status_connected ||
+			   connector->status == connector_status_unknown))
 		count = drm_add_modes_noedid(connector, 1024, 768);
 	count += drm_helper_probe_add_cmdline_mode(connector);
 	if (count == 0)
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 743e57c1b44f3eeecbbd475caba851efe48f5206..6ce8f5cd1eb59c886a1af7014b1e5ba10250bbcb 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -9,6 +9,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
@@ -55,8 +56,9 @@ static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
  * stored in the device structure. Free the encoder's memory as part of
  * the device release function.
  *
- * FIXME: Later improvements to DRM's resource management may allow for
- *        an automated kfree() of the encoder's memory.
+ * Note: consider using drmm_simple_encoder_alloc() instead of
+ * drm_simple_encoder_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
  *
  * Returns:
  * Zero on success, error code on failure.
@@ -71,6 +73,14 @@ int drm_simple_encoder_init(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_simple_encoder_init);
 
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+				  size_t offset, int encoder_type)
+{
+	return __drmm_encoder_alloc(dev, size, offset, NULL, encoder_type,
+				    NULL);
+}
+EXPORT_SYMBOL(__drmm_simple_encoder_alloc);
+
 static enum drm_mode_status
 drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
 			       const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index d30e2f2b8f3c47bf13d9e7bb531f676537ca2806..30912d8f82a54506bc87255d4a700202cebec6b2 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -74,7 +74,7 @@
  *                |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|   updates the
  *                |                                        |   frame as it
  *                |                                        |   travels down
- *                |                                        |   ("sacn out")
+ *                |                                        |   ("scan out")
  *                |               Old frame                |
  *                |                                        |
  *                |                                        |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 6d5a03b32238007ad5a71263c068360291c39bb6..9b3b989d7cad12e0101661472cb2f058cb4464e4 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -278,7 +278,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 			case _DRM_SCATTER_GATHER:
 				break;
 			case _DRM_CONSISTENT:
-				dma_free_coherent(&dev->pdev->dev,
+				dma_free_coherent(dev->dev,
 						  map->size,
 						  map->handle,
 						  map->offset);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index d9bd83203a15e5d4cb10f0875e1067a6afbe4c49..b390dd4d60b76d929fc2847484f1a8067126d2b9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
 		goto fail;
 	}
 
-	ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
-					       NULL, npages);
+	ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 0e23c93a1094cdc9e7ec0664edd8d87b040a4221..ec395658a43f1bf154e12af3cedecedffcab7603 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,9 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config DRM_GMA500
-	tristate "Intel GMA5/600 KMS Framebuffer"
+	tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
 	depends on DRM && PCI && X86 && MMU
 	select DRM_KMS_HELPER
-	select DRM_TTM
 	# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
 	select ACPI_VIDEO if ACPI
 	select BACKLIGHT_CLASS_DEVICE if ACPI
@@ -19,17 +18,3 @@ config DRM_GMA600
 	help
 	  Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
 	  platforms with LVDS ports. MIPI is not currently supported.
-
-config DRM_GMA3600
-	bool "Intel GMA3600/3650 support (Experimental)"
-	depends on DRM_GMA500
-	help
-	  Say yes to include basic support for Intel GMA3600/3650 (Intel
-	  Cedar Trail) platforms.
-
-config DRM_MEDFIELD
-	bool "Intel Medfield support (Experimental)"
-	depends on DRM_GMA500 && X86_INTEL_MID
-	help
-	  Say yes to include support for the Intel Medfield platform.
-
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index c8f2c89be99db49e45b574250fa37873e8783a7d..884ab1f9063efdcf7eedb7b2ac6c02b1103ce40e 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -6,36 +6,35 @@
 gma500_gfx-y += \
 	  accel_2d.o \
 	  backlight.o \
+	  blitter.o \
+	  cdv_device.o \
+	  cdv_intel_crt.o \
+	  cdv_intel_display.o \
+	  cdv_intel_dp.o \
+	  cdv_intel_hdmi.o \
+	  cdv_intel_lvds.o \
 	  framebuffer.o \
 	  gem.o \
+	  gma_device.o \
+	  gma_display.o \
 	  gtt.o \
 	  intel_bios.o \
-	  intel_i2c.o \
 	  intel_gmbus.o \
+	  intel_i2c.o \
+	  mid_bios.o \
 	  mmu.o \
-	  blitter.o \
 	  power.o \
+	  psb_device.o \
 	  psb_drv.o \
-	  gma_display.o \
-	  gma_device.o \
 	  psb_intel_display.o \
 	  psb_intel_lvds.o \
 	  psb_intel_modes.o \
 	  psb_intel_sdvo.o \
 	  psb_lid.o \
-	  psb_irq.o \
-	  psb_device.o \
-	  mid_bios.o
+	  psb_irq.o
 
 gma500_gfx-$(CONFIG_ACPI) +=  opregion.o \
 
-gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
-	  cdv_intel_crt.o \
-	  cdv_intel_display.o \
-	  cdv_intel_hdmi.o \
-	  cdv_intel_lvds.o \
-	  cdv_intel_dp.o
-
 gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
 	  oaktrail_crtc.o \
 	  oaktrail_lvds.o \
@@ -43,14 +42,4 @@ gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
 	  oaktrail_hdmi.o \
 	  oaktrail_hdmi_i2c.o
 
-gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
-	  mdfld_output.o \
-	  mdfld_intel_display.o \
-	  mdfld_dsi_output.o \
-	  mdfld_dsi_dpi.o \
-	  mdfld_dsi_pkg_sender.o \
-	  mdfld_tpo_vid.o \
-	  mdfld_tmd_vid.o \
-	  tc35876x-dsi-lvds.o
-
 obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index e75293e4a52f1262f3119853fec39f7f58857414..19e055dbd4c2c07a5dc70a35ccef4226119b09be 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -95,13 +95,14 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
 static int cdv_get_brightness(struct backlight_device *bd)
 {
 	struct drm_device *dev = bl_get_data(bd);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 
 	if (cdv_backlight_combination_mode(dev)) {
 		u8 lbpc;
 
 		val &= ~1;
-		pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+		pci_read_config_byte(pdev, 0xF4, &lbpc);
 		val *= lbpc;
 	}
 	return (val * 100)/cdv_get_max_backlight(dev);
@@ -111,6 +112,7 @@ static int cdv_get_brightness(struct backlight_device *bd)
 static int cdv_set_brightness(struct backlight_device *bd)
 {
 	struct drm_device *dev = bl_get_data(bd);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int level = bd->props.brightness;
 	u32 blc_pwm_ctl;
 
@@ -128,7 +130,7 @@ static int cdv_set_brightness(struct backlight_device *bd)
 		lbpc = level * 0xfe / max + 1;
 		level /= lbpc;
 
-		pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+		pci_write_config_byte(pdev, 0xF4, lbpc);
 	}
 
 	blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@@ -205,8 +207,9 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
 static void cdv_init_pm(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 pwr_cnt;
-	int domain = pci_domain_nr(dev->pdev->bus);
+	int domain = pci_domain_nr(pdev->bus);
 	int i;
 
 	dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
@@ -234,6 +237,8 @@ static void cdv_init_pm(struct drm_device *dev)
 
 static void cdv_errata(struct drm_device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
 	/* Disable bonus launch.
 	 *	CPU and GPU competes for memory and display misses updates and
 	 *	flickers. Worst with dual core, dual displays.
@@ -242,7 +247,7 @@ static void cdv_errata(struct drm_device *dev)
 	 *	Bonus Launch to work around the issue, by degrading
 	 *	performance.
 	 */
-	 CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
+	 CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
 }
 
 /**
@@ -255,12 +260,13 @@ static void cdv_errata(struct drm_device *dev)
 static int cdv_save_display_registers(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct psb_save_area *regs = &dev_priv->regs;
 	struct drm_connector *connector;
 
 	dev_dbg(dev->dev, "Saving GPU registers.\n");
 
-	pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+	pci_read_config_byte(pdev, 0xF4, &regs->cdv.saveLBB);
 
 	regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
 	regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
@@ -309,11 +315,12 @@ static int cdv_save_display_registers(struct drm_device *dev)
 static int cdv_restore_display_registers(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct psb_save_area *regs = &dev_priv->regs;
 	struct drm_connector *connector;
 	u32 temp;
 
-	pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+	pci_write_config_byte(pdev, 0xF4, regs->cdv.saveLBB);
 
 	REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
 	REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
@@ -421,16 +428,16 @@ static int cdv_power_up(struct drm_device *dev)
 static void cdv_hotplug_work_func(struct work_struct *work)
 {
         struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
-							hotplug_work);                 
+							hotplug_work);
         struct drm_device *dev = dev_priv->dev;
 
         /* Just fire off a uevent and let userspace tell us what to do */
         drm_helper_hpd_irq_event(dev);
-}                       
+}
 
 /* The core driver has received a hotplug IRQ. We are in IRQ context
    so extract the needed information and kick off queued processing */
-   
+
 static int cdv_hotplug_event(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
@@ -449,7 +456,7 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
 	}  else {
 		REG_WRITE(PORT_HOTPLUG_EN, 0);
 		REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
-	}	
+	}
 }
 
 static const char *force_audio_names[] = {
@@ -568,9 +575,10 @@ static const struct psb_offset cdv_regmap[2] = {
 static int cdv_chip_setup(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
 
-	if (pci_enable_msi(dev->pdev))
+	if (pci_enable_msi(pdev))
 		dev_warn(dev->dev, "Enabling MSI failed!\n");
 	dev_priv->regmap = cdv_regmap;
 	gma_get_core_freq(dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 88535f5aacc5d39aa0bc484a5cad41c5ae325701..c48c9d322dfbddb86b89ea1f870420a15b9ebd5b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -127,7 +127,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
 }
 
 
-/**
+/*
  * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
  *
  * \return true if CRT is connected.
@@ -278,8 +278,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
 	gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
 							  i2c_reg, "CRTDDC_A");
 	if (!gma_encoder->ddc_bus) {
-		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-			   "failed.\n");
+		dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
 		goto failed_ddc;
 	}
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 686385a66167ba49ff55028c97883dcb72faee61..5d33022497793f50a4500e1f84d6e00cab6a2d9f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -551,7 +551,7 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
 	}
 }
 
-/**
+/*
  * Return the pipe currently connected to the panel fitter,
  * or -1 if the panel fitter is not present or not in use
  */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index bfd9a15d63b1a25a33abe256fe976478d9e4a294..6d3ada39ff8675c523f07ae4f897294c9969aaa3 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -306,7 +306,7 @@ static uint32_t dp_vswing_premph_table[] = {
 };
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
- * @intel_dp: DP struct
+ * @encoder: GMA encoder struct
  *
  * If a CPU or PCH DP output is attached to an eDP panel, this function
  * will return true, and false otherwise.
@@ -1687,7 +1687,7 @@ static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
 	return status;
 }
 
-/**
+/*
  * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
  *
  * \return true if DP port is connected.
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 0d12c6ffbc4078796a9e1fcc56e3a583dba54786..e525689f84f0cf3d684d380d680e33741172a07d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -22,9 +22,6 @@
  *
  * Authors:
  *	jim liu <jim.liu@intel.com>
- *
- * FIXME:
- *	We should probably make this generic and share it with Medfield
  */
 
 #include <linux/pm_runtime.h>
@@ -56,7 +53,6 @@ struct mid_intel_hdmi_priv {
 	bool has_hdmi_audio;
 	/* Should set this when detect hotplug */
 	bool hdmi_device_connected;
-	struct mdfld_hdmi_i2c *i2c_bus;
 	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
 	struct drm_device *dev;
 };
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index eaaf4efec21765a494c3bc2ef6750bc4cd080b74..5bff7d9e3aa6efab866e7e98d5b0de98f51b220b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -74,7 +74,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
 	return retval;
 }
 
-/**
+/*
  * Sets the backlight level.
  *
  * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
@@ -99,7 +99,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
 	}
 }
 
-/**
+/*
  * Sets the power state for the panel.
  */
 static void cdv_intel_lvds_set_power(struct drm_device *dev,
@@ -291,7 +291,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
 	REG_WRITE(PFIT_CONTROL, pfit_control);
 }
 
-/**
+/*
  * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
  */
 static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
@@ -471,6 +471,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
 /**
  * cdv_intel_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
+ * @mode_dev: PSB mode device
  *
  * Create the connector, register the LVDS DDC bus, and try to figure out what
  * modes we can display on the LVDS panel (if present).
@@ -554,7 +555,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 							 "LVDSBLC_B");
 	if (!gma_encoder->i2c_bus) {
 		dev_printk(KERN_ERR,
-			&dev->pdev->dev, "I2C bus registration failed.\n");
+			dev->dev, "I2C bus registration failed.\n");
 		goto failed_blc_i2c;
 	}
 	gma_encoder->i2c_bus->slave_addr = 0x2C;
@@ -575,7 +576,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 							 GPIOC,
 							 "LVDSDDC_C");
 	if (!gma_encoder->ddc_bus) {
-		dev_printk(KERN_ERR, &dev->pdev->dev,
+		dev_printk(KERN_ERR, dev->dev,
 			   "DDC bus registration " "failed.\n");
 		goto failed_ddc;
 	}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index fc4fda1d258b1daf7c6c7a5603e0d0d603463658..ebe9dccf2d830817ab43be042cda266d48d00a8f 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -159,7 +159,7 @@ static const struct fb_ops psbfb_unaccel_ops = {
  *	@dev: our DRM device
  *	@fb: framebuffer to set up
  *	@mode_cmd: mode description
- *	@gt: backing object
+ *	@obj: backing object
  *
  *	Configure and fill in the boilerplate for our frame buffer. Return
  *	0 on success or an error code if we fail.
@@ -197,7 +197,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
  *	psb_framebuffer_create	-	create a framebuffer backed by gt
  *	@dev: our DRM device
  *	@mode_cmd: the description of the requested mode
- *	@gt: the backing object
+ *	@obj: the backing object
  *
  *	Create a framebuffer object backed by the gt, and fill in the
  *	boilerplate required
@@ -252,7 +252,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
 
 /**
  *	psbfb_create		-	create a framebuffer
- *	@fbdev: the framebuffer device
+ *	@fb_helper: the framebuffer helper
  *	@sizes: specification of the layout
  *
  *	Create a framebuffer to the specifications provided
@@ -262,6 +262,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
 {
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
@@ -325,8 +326,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
 
 	drm_fb_helper_fill_info(info, fb_helper, sizes);
 
-	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
-	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+	info->fix.mmio_start = pci_resource_start(pdev, 0);
+	info->fix.mmio_len = pci_resource_len(pdev, 0);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
@@ -529,6 +530,7 @@ void psb_modeset_init(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int i;
 
 	drm_mode_config_init(dev);
@@ -540,8 +542,7 @@ void psb_modeset_init(struct drm_device *dev)
 
 	/* set memory base */
 	/* Oaktrail and Poulsbo should use BAR 2*/
-	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
-					&(dev->mode_config.fb_base));
+	pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base));
 
 	/* num pipes is 2 for PSB but 1 for Mrst */
 	for (i = 0; i < dev_priv->num_pipe; i++)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index db827e591403c7db640b6ce3b6851ccb8b345155..fbf420051ef5f79986168806b6adf14d3733b8ea 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -16,6 +16,7 @@
 #include <drm/drm.h>
 #include <drm/drm_vma_manager.h>
 
+#include "gem.h"
 #include "psb_drv.h"
 
 static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
@@ -49,6 +50,8 @@ const struct drm_gem_object_funcs psb_gem_object_funcs = {
  *	@dev: our device
  *	@size: the size requested
  *	@handlep: returned handle (opaque number)
+ *	@stolen: unused
+ *	@align: unused
  *
  *	Create a GEM object, fill in the boilerplate and attach a handle to
  *	it so that userspace can speak about it. This does the core work
@@ -97,7 +100,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
 
 /**
  *	psb_gem_dumb_create	-	create a dumb buffer
- *	@drm_file: our client file
+ *	@file: our client file
  *	@dev: our device
  *	@args: the requested arguments copied from userspace
  *
@@ -116,7 +119,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 
 /**
  *	psb_gem_fault		-	pagefault handler for GEM objects
- *	@vma: the VMA of the GEM object
  *	@vmf: fault detail
  *
  *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
index 3741a711b9fdecc57e8d2df255e63072ef03187d..bae6454ead292327e89c77b93154fb5242cf3ea7 100644
--- a/drivers/gpu/drm/gma500/gem.h
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -8,6 +8,8 @@
 #ifndef _GEM_H
 #define _GEM_H
 
+struct drm_device;
+
 extern const struct drm_gem_object_funcs psb_gem_object_funcs;
 
 extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 869f303925667e51cecc8302af6626976982bbd7..4c91e86f4b1421b99f728650a7f4762fe381acc3 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -6,12 +6,14 @@
  **************************************************************************/
 
 #include "psb_drv.h"
+#include "gma_device.h"
 
 void gma_get_core_freq(struct drm_device *dev)
 {
 	uint32_t clock;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct pci_dev *pci_root =
-		pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
 					    0, 0);
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 3df6d6e850f5200ed189fc735a4b122db2e5ecf8..b03f7b8241f2bdda2d51fb29853ecd941b1ad1ac 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -20,7 +20,7 @@
 #include "psb_intel_drv.h"
 #include "psb_intel_reg.h"
 
-/**
+/*
  * Returns whether any output on the specified pipe is of the specified type
  */
 bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
@@ -180,7 +180,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
 	return 0;
 }
 
-/**
+/*
  * Sets the power management mode of the pipe and plane.
  *
  * This code should probably grow support for turning the cursor off and back
@@ -559,14 +559,14 @@ int gma_crtc_set_config(struct drm_mode_set *set,
 	if (!dev_priv->rpm_enabled)
 		return drm_crtc_helper_set_config(set, ctx);
 
-	pm_runtime_forbid(&dev->pdev->dev);
+	pm_runtime_forbid(dev->dev);
 	ret = drm_crtc_helper_set_config(set, ctx);
-	pm_runtime_allow(&dev->pdev->dev);
+	pm_runtime_allow(dev->dev);
 
 	return ret;
 }
 
-/**
+/*
  * Save HW states of given crtc
  */
 void gma_crtc_save(struct drm_crtc *crtc)
@@ -609,7 +609,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
 		crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
 }
 
-/**
+/*
  * Restore HW states of given crtc
  */
 void gma_crtc_restore(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index d246b1f703668286f089cdade355be4cc3c75bfb..e884750bc123827f84f468deff8c1dcfb86f72ce 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -340,13 +340,14 @@ static void psb_gtt_alloc(struct drm_device *dev)
 void psb_gtt_takedown(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	if (dev_priv->gtt_map) {
 		iounmap(dev_priv->gtt_map);
 		dev_priv->gtt_map = NULL;
 	}
 	if (dev_priv->gtt_initialized) {
-		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+		pci_write_config_word(pdev, PSB_GMCH_CTRL,
 				      dev_priv->gmch_ctrl);
 		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
 		(void) PSB_RVDC32(PSB_PGETBL_CTL);
@@ -358,6 +359,7 @@ void psb_gtt_takedown(struct drm_device *dev)
 int psb_gtt_init(struct drm_device *dev, int resume)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	unsigned gtt_pages;
 	unsigned long stolen_size, vram_stolen_size;
 	unsigned i, num_pages;
@@ -376,8 +378,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 	pg = &dev_priv->gtt;
 
 	/* Enable the GTT */
-	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
-	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+	pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+	pci_write_config_word(pdev, PSB_GMCH_CTRL,
 			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
 
 	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
@@ -397,8 +399,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 	 */
 	pg->mmu_gatt_start = 0xE0000000;
 
-	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
-	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+	pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
+	gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE)
 								>> PAGE_SHIFT;
 	/* CDV doesn't report this. In which case the system has 64 gtt pages */
 	if (pg->gtt_start == 0 || gtt_pages == 0) {
@@ -407,10 +409,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 		pg->gtt_start = dev_priv->pge_ctl;
 	}
 
-	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
-	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+	pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
+	pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE)
 								>> PAGE_SHIFT;
-	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+	dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
 
 	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
 		static struct resource fudge;	/* Preferably peppermint */
@@ -431,7 +433,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 		dev_priv->gtt_mem = &fudge;
 	}
 
-	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+	pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
 								- PAGE_SIZE;
 
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8ad6337eeba3a400f2c72428eb9b8e7cf78b3d54..d838369f0119471655fb99d228dfa010a116b72e 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -50,7 +50,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
 	uint8_t	panel_type;
 
 	edp = find_section(bdb, BDB_EDP);
-	
+
 	dev_priv->edp.bpp = 18;
 	if (!edp) {
 		if (dev_priv->edp.support) {
@@ -80,7 +80,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
 	dev_priv->edp.pps = *edp_pps;
 
 	DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, 
+				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
 				dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
 				dev_priv->edp.pps.t11_t12);
 
@@ -516,7 +516,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
 int psb_intel_init_bios(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct vbt_header *vbt = NULL;
 	struct bdb_header *bdb = NULL;
 	u8 __iomem *bios = NULL;
@@ -574,7 +574,7 @@ int psb_intel_init_bios(struct drm_device *dev)
 	return 0;
 }
 
-/**
+/*
  * Destroy and free VBT data
  */
 void psb_intel_destroy_bios(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index a083fbfe35b8ad373bfa688bc72b2953b1293624..370bd6451bd9b66a655b17d7dd9844bd5aea5704 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
 		 "gma500 GPIO%c", "?BACDE?F"[pin]);
 	gpio->adapter.owner = THIS_MODULE;
 	gpio->adapter.algo_data	= &gpio->algo;
-	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+	gpio->adapter.dev.parent = dev_priv->dev->dev;
 	gpio->algo.setsda = set_data;
 	gpio->algo.setscl = set_clock;
 	gpio->algo.getsda = get_data;
@@ -417,7 +417,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
 			 "gma500 gmbus %s",
 			 names[i]);
 
-		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->adapter.dev.parent = dev->dev;
 		bus->adapter.algo_data	= dev_priv;
 
 		bus->adapter.algo = &gmbus_algorithm;
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index de8810188190ba03210a18156bed8e9f48ef1d8c..5e1b4d70c3171dbb2aa185fe28f4641594c50e65 100644
--- a/drivers/gpu/drm/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -85,7 +85,6 @@ static void set_data(void *data, int state_high)
 /**
  * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
  * @dev: DRM device
- * @output: driver specific output device
  * @reg: GPIO reg to use
  * @name: name for this bus
  *
@@ -117,7 +116,7 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
 	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
 	chan->adapter.owner = THIS_MODULE;
 	chan->adapter.algo_data = &chan->algo;
-	chan->adapter.dev.parent = &dev->pdev->dev;
+	chan->adapter.dev.parent = dev->dev;
 	chan->algo.setsda = set_data;
 	chan->algo.setscl = set_clock;
 	chan->algo.getsda = get_data;
@@ -145,7 +144,7 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
 
 /**
  * psb_intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
+ * @chan: channel to free
  *
  * Unregister the adapter from the i2c layer, then free the structure.
  */
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
deleted file mode 100644
index b83d59b21de5d8c0418a4465352847073836f25b..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ /dev/null
@@ -1,562 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- **************************************************************************/
-
-#include <linux/delay.h>
-#include <linux/gpio/machine.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "mid_bios.h"
-#include "psb_drv.h"
-#include "tc35876x-dsi-lvds.h"
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
-#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
-#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-#define BRIGHTNESS_MIN_LEVEL 1
-#define BRIGHTNESS_MAX_LEVEL 100
-#define BRIGHTNESS_MASK	0xFF
-#define BLC_POLARITY_NORMAL 0
-#define BLC_POLARITY_INVERSE 1
-#define BLC_ADJUSTMENT_MAX 100
-
-#define MDFLD_BLC_PWM_PRECISION_FACTOR    10
-#define MDFLD_BLC_MAX_PWM_REG_FREQ        0xFFFE
-#define MDFLD_BLC_MIN_PWM_REG_FREQ        0x2
-
-#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT	(16)
-
-static struct backlight_device *mdfld_backlight_device;
-
-int mdfld_set_brightness(struct backlight_device *bd)
-{
-	struct drm_device *dev =
-		(struct drm_device *)bl_get_data(mdfld_backlight_device);
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	int level = bd->props.brightness;
-
-	DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
-
-	/* Perform value bounds checking */
-	if (level < BRIGHTNESS_MIN_LEVEL)
-		level = BRIGHTNESS_MIN_LEVEL;
-
-	if (gma_power_begin(dev, false)) {
-		u32 adjusted_level = 0;
-
-		/*
-		 * Adjust the backlight level with the percent in
-		 * dev_priv->blc_adj2
-		 */
-		adjusted_level = level * dev_priv->blc_adj2;
-		adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
-		dev_priv->brightness_adjusted = adjusted_level;
-
-		if (mdfld_get_panel_type(dev, 0) == TC35876X) {
-			if (dev_priv->dpi_panel_on[0] ||
-					dev_priv->dpi_panel_on[2])
-				tc35876x_brightness_control(dev,
-						dev_priv->brightness_adjusted);
-		} else {
-			if (dev_priv->dpi_panel_on[0])
-				mdfld_dsi_brightness_control(dev, 0,
-						dev_priv->brightness_adjusted);
-		}
-
-		if (dev_priv->dpi_panel_on[2])
-			mdfld_dsi_brightness_control(dev, 2,
-					dev_priv->brightness_adjusted);
-		gma_power_end(dev);
-	}
-
-	/* cache the brightness for later use */
-	dev_priv->brightness = level;
-	return 0;
-}
-
-static int mdfld_get_brightness(struct backlight_device *bd)
-{
-	struct drm_device *dev =
-		(struct drm_device *)bl_get_data(mdfld_backlight_device);
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
-
-	/* return locally cached var instead of HW read (due to DPST etc.) */
-	return dev_priv->brightness;
-}
-
-static const struct backlight_ops mdfld_ops = {
-	.get_brightness = mdfld_get_brightness,
-	.update_status  = mdfld_set_brightness,
-};
-
-static int device_backlight_init(struct drm_device *dev)
-{
-	struct drm_psb_private *dev_priv = (struct drm_psb_private *)
-		dev->dev_private;
-
-	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
-	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
-
-	return 0;
-}
-
-static int mdfld_backlight_init(struct drm_device *dev)
-{
-	struct backlight_properties props;
-	int ret = 0;
-
-	memset(&props, 0, sizeof(struct backlight_properties));
-	props.max_brightness = BRIGHTNESS_MAX_LEVEL;
-	props.type = BACKLIGHT_PLATFORM;
-	mdfld_backlight_device = backlight_device_register("mdfld-bl",
-				NULL, (void *)dev, &mdfld_ops, &props);
-
-	if (IS_ERR(mdfld_backlight_device))
-		return PTR_ERR(mdfld_backlight_device);
-
-	ret = device_backlight_init(dev);
-	if (ret)
-		return ret;
-
-	mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
-	mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
-	backlight_update_status(mdfld_backlight_device);
-	return 0;
-}
-#endif
-
-struct backlight_device *mdfld_get_backlight_device(void)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-	return mdfld_backlight_device;
-#else
-	return NULL;
-#endif
-}
-
-/*
- * mdfld_save_display_registers
- *
- * Description: We are going to suspend so save current display
- * register state.
- *
- * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
- */
-static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct medfield_state *regs = &dev_priv->regs.mdfld;
-	struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
-	const struct psb_offset *map = &dev_priv->regmap[pipenum];
-	int i;
-	u32 *mipi_val;
-
-	/* register */
-	u32 mipi_reg = MIPI;
-
-	switch (pipenum) {
-	case 0:
-		mipi_val = &regs->saveMIPI;
-		break;
-	case 1:
-		mipi_val = &regs->saveMIPI;
-		break;
-	case 2:
-		/* register */
-		mipi_reg = MIPI_C;
-		/* pointer to values */
-		mipi_val = &regs->saveMIPI_C;
-		break;
-	default:
-		DRM_ERROR("%s, invalid pipe number.\n", __func__);
-		return -EINVAL;
-	}
-
-	/* Pipe & plane A info */
-	pipe->dpll = PSB_RVDC32(map->dpll);
-	pipe->fp0 = PSB_RVDC32(map->fp0);
-	pipe->conf = PSB_RVDC32(map->conf);
-	pipe->htotal = PSB_RVDC32(map->htotal);
-	pipe->hblank = PSB_RVDC32(map->hblank);
-	pipe->hsync = PSB_RVDC32(map->hsync);
-	pipe->vtotal = PSB_RVDC32(map->vtotal);
-	pipe->vblank = PSB_RVDC32(map->vblank);
-	pipe->vsync = PSB_RVDC32(map->vsync);
-	pipe->src = PSB_RVDC32(map->src);
-	pipe->stride = PSB_RVDC32(map->stride);
-	pipe->linoff = PSB_RVDC32(map->linoff);
-	pipe->tileoff = PSB_RVDC32(map->tileoff);
-	pipe->size = PSB_RVDC32(map->size);
-	pipe->pos = PSB_RVDC32(map->pos);
-	pipe->surf = PSB_RVDC32(map->surf);
-	pipe->cntr = PSB_RVDC32(map->cntr);
-	pipe->status = PSB_RVDC32(map->status);
-
-	/*save palette (gamma) */
-	for (i = 0; i < 256; i++)
-		pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
-
-	if (pipenum == 1) {
-		regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
-		regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
-
-		regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
-		regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
-		return 0;
-	}
-
-	*mipi_val = PSB_RVDC32(mipi_reg);
-	return 0;
-}
-
-/*
- * mdfld_restore_display_registers
- *
- * Description: We are going to resume so restore display register state.
- *
- * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
- */
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
-{
-	/* To get  panel out of ULPS mode. */
-	u32 temp = 0;
-	u32 device_ready_reg = DEVICE_READY_REG;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct mdfld_dsi_config *dsi_config = NULL;
-	struct medfield_state *regs = &dev_priv->regs.mdfld;
-	struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
-	const struct psb_offset *map = &dev_priv->regmap[pipenum];
-	u32 i;
-	u32 dpll;
-	u32 timeout = 0;
-
-	/* register */
-	u32 mipi_reg = MIPI;
-
-	/* values */
-	u32 dpll_val = pipe->dpll;
-	u32 mipi_val = regs->saveMIPI;
-
-	switch (pipenum) {
-	case 0:
-		dpll_val &= ~DPLL_VCO_ENABLE;
-		dsi_config = dev_priv->dsi_configs[0];
-		break;
-	case 1:
-		dpll_val &= ~DPLL_VCO_ENABLE;
-		break;
-	case 2:
-		mipi_reg = MIPI_C;
-		mipi_val = regs->saveMIPI_C;
-		dsi_config = dev_priv->dsi_configs[1];
-		break;
-	default:
-		DRM_ERROR("%s, invalid pipe number.\n", __func__);
-		return -EINVAL;
-	}
-
-	/*make sure VGA plane is off. it initializes to on after reset!*/
-	PSB_WVDC32(0x80000000, VGACNTRL);
-
-	if (pipenum == 1) {
-		PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
-		PSB_RVDC32(map->dpll);
-
-		PSB_WVDC32(pipe->fp0, map->fp0);
-	} else {
-
-		dpll = PSB_RVDC32(map->dpll);
-
-		if (!(dpll & DPLL_VCO_ENABLE)) {
-
-			/* When ungating power of DPLL, needs to wait 0.5us
-			   before enable the VCO */
-			if (dpll & MDFLD_PWR_GATE_EN) {
-				dpll &= ~MDFLD_PWR_GATE_EN;
-				PSB_WVDC32(dpll, map->dpll);
-				/* FIXME_MDFLD PO - change 500 to 1 after PO */
-				udelay(500);
-			}
-
-			PSB_WVDC32(pipe->fp0, map->fp0);
-			PSB_WVDC32(dpll_val, map->dpll);
-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
-			udelay(500);
-
-			dpll_val |= DPLL_VCO_ENABLE;
-			PSB_WVDC32(dpll_val, map->dpll);
-			PSB_RVDC32(map->dpll);
-
-			/* wait for DSI PLL to lock */
-			while (timeout < 20000 &&
-			  !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
-				udelay(150);
-				timeout++;
-			}
-
-			if (timeout == 20000) {
-				DRM_ERROR("%s, can't lock DSIPLL.\n",
-								__func__);
-				return -EINVAL;
-			}
-		}
-	}
-	/* Restore mode */
-	PSB_WVDC32(pipe->htotal, map->htotal);
-	PSB_WVDC32(pipe->hblank, map->hblank);
-	PSB_WVDC32(pipe->hsync, map->hsync);
-	PSB_WVDC32(pipe->vtotal, map->vtotal);
-	PSB_WVDC32(pipe->vblank, map->vblank);
-	PSB_WVDC32(pipe->vsync, map->vsync);
-	PSB_WVDC32(pipe->src, map->src);
-	PSB_WVDC32(pipe->status, map->status);
-
-	/*set up the plane*/
-	PSB_WVDC32(pipe->stride, map->stride);
-	PSB_WVDC32(pipe->linoff, map->linoff);
-	PSB_WVDC32(pipe->tileoff, map->tileoff);
-	PSB_WVDC32(pipe->size, map->size);
-	PSB_WVDC32(pipe->pos, map->pos);
-	PSB_WVDC32(pipe->surf, map->surf);
-
-	if (pipenum == 1) {
-		/* restore palette (gamma) */
-		/* udelay(50000); */
-		for (i = 0; i < 256; i++)
-			PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
-
-		PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
-		PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
-
-		/*TODO: resume HDMI port */
-
-		/*TODO: resume pipe*/
-
-		/*enable the plane*/
-		PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
-
-		return 0;
-	}
-
-	/*set up pipe related registers*/
-	PSB_WVDC32(mipi_val, mipi_reg);
-
-	/*setup MIPI adapter + MIPI IP registers*/
-	if (dsi_config)
-		mdfld_dsi_controller_init(dsi_config, pipenum);
-
-	if (in_atomic() || in_interrupt())
-		mdelay(20);
-	else
-		msleep(20);
-
-	/*enable the plane*/
-	PSB_WVDC32(pipe->cntr, map->cntr);
-
-	if (in_atomic() || in_interrupt())
-		mdelay(20);
-	else
-		msleep(20);
-
-	/* LP Hold Release */
-	temp = REG_READ(mipi_reg);
-	temp |= LP_OUTPUT_HOLD_RELEASE;
-	REG_WRITE(mipi_reg, temp);
-	mdelay(1);
-
-
-	/* Set DSI host to exit from Utra Low Power State */
-	temp = REG_READ(device_ready_reg);
-	temp &= ~ULPS_MASK;
-	temp |= 0x3;
-	temp |= EXIT_ULPS_DEV_READY;
-	REG_WRITE(device_ready_reg, temp);
-	mdelay(1);
-
-	temp = REG_READ(device_ready_reg);
-	temp &= ~ULPS_MASK;
-	temp |= EXITING_ULPS;
-	REG_WRITE(device_ready_reg, temp);
-	mdelay(1);
-
-	/*enable the pipe*/
-	PSB_WVDC32(pipe->conf, map->conf);
-
-	/* restore palette (gamma) */
-	/* udelay(50000); */
-	for (i = 0; i < 256; i++)
-		PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
-
-	return 0;
-}
-
-static int mdfld_save_registers(struct drm_device *dev)
-{
-	/* mdfld_save_cursor_overlay_registers(dev); */
-	mdfld_save_display_registers(dev, 0);
-	mdfld_save_display_registers(dev, 2);
-	mdfld_disable_crtc(dev, 0);
-	mdfld_disable_crtc(dev, 2);
-
-	return 0;
-}
-
-static int mdfld_restore_registers(struct drm_device *dev)
-{
-	mdfld_restore_display_registers(dev, 2);
-	mdfld_restore_display_registers(dev, 0);
-	/* mdfld_restore_cursor_overlay_registers(dev); */
-
-	return 0;
-}
-
-static int mdfld_power_down(struct drm_device *dev)
-{
-	/* FIXME */
-	return 0;
-}
-
-static int mdfld_power_up(struct drm_device *dev)
-{
-	/* FIXME */
-	return 0;
-}
-
-/* Medfield  */
-static const struct psb_offset mdfld_regmap[3] = {
-	{
-		.fp0 = MRST_FPA0,
-		.fp1 = MRST_FPA1,
-		.cntr = DSPACNTR,
-		.conf = PIPEACONF,
-		.src = PIPEASRC,
-		.dpll = MRST_DPLL_A,
-		.htotal = HTOTAL_A,
-		.hblank = HBLANK_A,
-		.hsync = HSYNC_A,
-		.vtotal = VTOTAL_A,
-		.vblank = VBLANK_A,
-		.vsync = VSYNC_A,
-		.stride = DSPASTRIDE,
-		.size = DSPASIZE,
-		.pos = DSPAPOS,
-		.surf = DSPASURF,
-		.addr = MRST_DSPABASE,
-		.status = PIPEASTAT,
-		.linoff = DSPALINOFF,
-		.tileoff = DSPATILEOFF,
-		.palette = PALETTE_A,
-	},
-	{
-		.fp0 = MDFLD_DPLL_DIV0,
-		.cntr = DSPBCNTR,
-		.conf = PIPEBCONF,
-		.src = PIPEBSRC,
-		.dpll = MDFLD_DPLL_B,
-		.htotal = HTOTAL_B,
-		.hblank = HBLANK_B,
-		.hsync = HSYNC_B,
-		.vtotal = VTOTAL_B,
-		.vblank = VBLANK_B,
-		.vsync = VSYNC_B,
-		.stride = DSPBSTRIDE,
-		.size = DSPBSIZE,
-		.pos = DSPBPOS,
-		.surf = DSPBSURF,
-		.addr = MRST_DSPBBASE,
-		.status = PIPEBSTAT,
-		.linoff = DSPBLINOFF,
-		.tileoff = DSPBTILEOFF,
-		.palette = PALETTE_B,
-	},
-	{
-		.fp0 = MRST_FPA0,	/* This is what the old code did ?? */
-		.cntr = DSPCCNTR,
-		.conf = PIPECCONF,
-		.src = PIPECSRC,
-		/* No DPLL_C */
-		.dpll = MRST_DPLL_A,
-		.htotal = HTOTAL_C,
-		.hblank = HBLANK_C,
-		.hsync = HSYNC_C,
-		.vtotal = VTOTAL_C,
-		.vblank = VBLANK_C,
-		.vsync = VSYNC_C,
-		.stride = DSPCSTRIDE,
-		.size = DSPBSIZE,
-		.pos = DSPCPOS,
-		.surf = DSPCSURF,
-		.addr = MDFLD_DSPCBASE,
-		.status = PIPECSTAT,
-		.linoff = DSPCLINOFF,
-		.tileoff = DSPCTILEOFF,
-		.palette = PALETTE_C,
-	},
-};
-
-/*
- * The GPIO lines for resetting DSI pipe 0 and 2 are available in the
- * PCI device 0000:00:0c.0 on the Medfield.
- */
-static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = {
-	.table  = {
-		GPIO_LOOKUP("0000:00:0c.0", 128, "dsi-pipe0-reset",
-			    GPIO_ACTIVE_HIGH),
-		GPIO_LOOKUP("0000:00:0c.0", 34, "dsi-pipe2-reset",
-			    GPIO_ACTIVE_HIGH),
-		{ },
-	},
-};
-
-static int mdfld_chip_setup(struct drm_device *dev)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	if (pci_enable_msi(dev->pdev))
-		dev_warn(dev->dev, "Enabling MSI failed!\n");
-	dev_priv->regmap = mdfld_regmap;
-
-	/* Associate the GPIO lines with the DRM device */
-	mdfld_dsi_pipe_gpio_table.dev_id = dev_name(dev->dev);
-	gpiod_add_lookup_table(&mdfld_dsi_pipe_gpio_table);
-
-	return mid_chip_setup(dev);
-}
-
-const struct psb_ops mdfld_chip_ops = {
-	.name = "mdfld",
-	.pipes = 3,
-	.crtcs = 3,
-	.lvds_mask = (1 << 1),
-	.hdmi_mask = (1 << 1),
-	.cursor_needs_phys = 0,
-	.sgx_offset = MRST_SGX_OFFSET,
-
-	.chip_setup = mdfld_chip_setup,
-	.crtc_helper = &mdfld_helper_funcs,
-	.crtc_funcs = &psb_intel_crtc_funcs,
-
-	.output_init = mdfld_output_init,
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-	.backlight_init = mdfld_backlight_init,
-#endif
-
-	.save_regs = mdfld_save_registers,
-	.restore_regs = mdfld_restore_registers,
-	.save_crtc = gma_crtc_save,
-	.restore_crtc = gma_crtc_restore,
-	.power_down = mdfld_power_down,
-	.power_up = mdfld_power_up,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
deleted file mode 100644
index ae1223f631a7f8af9e4957430917d8cd076cd811..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-
-#include <drm/drm_simple_kms_helper.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "psb_drv.h"
-#include "tc35876x-dsi-lvds.h"
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
-								int pipe);
-
-static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
-{
-	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
-	int timeout = 0;
-
-	udelay(500);
-
-	/* This will time out after approximately 2+ seconds */
-	while ((timeout < 20000) &&
-		(REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
-		udelay(100);
-		timeout++;
-	}
-
-	if (timeout == 20000)
-		DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
-	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
-	int timeout = 0;
-
-	udelay(500);
-
-	/* This will time out after approximately 2+ seconds */
-	while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
-					& DSI_FIFO_GEN_HS_CTRL_FULL)) {
-		udelay(100);
-		timeout++;
-	}
-	if (timeout == 20000)
-		DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
-	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
-	int timeout = 0;
-
-	udelay(500);
-
-	/* This will time out after approximately 2+ seconds */
-	while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
-					DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
-		udelay(100);
-		timeout++;
-	}
-
-	if (timeout == 20000)
-		DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
-}
-
-static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
-{
-	u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
-	int timeout = 0;
-
-	udelay(500);
-
-	/* This will time out after approximately 2+ seconds */
-	while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
-					& DSI_INTR_STATE_SPL_PKG_SENT))) {
-		udelay(100);
-		timeout++;
-	}
-
-	if (timeout == 20000)
-                DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
-}
-
-/* For TC35876X */
-
-static void dsi_set_device_ready_state(struct drm_device *dev, int state,
-				int pipe)
-{
-	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
-}
-
-static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
-							int state, int pipe)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	u32 pipeconf_reg = PIPEACONF;
-	u32 dspcntr_reg = DSPACNTR;
-
-	u32 dspcntr = dev_priv->dspcntr[pipe];
-	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
-	if (pipe) {
-		pipeconf_reg = PIPECCONF;
-		dspcntr_reg = DSPCCNTR;
-	} else
-		mipi &= (~0x03);
-
-	if (state) {
-		/*Set up pipe */
-		REG_WRITE(pipeconf_reg, BIT(31));
-
-		if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
-			dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
-				__func__);
-
-		/*Set up display plane */
-		REG_WRITE(dspcntr_reg, dspcntr);
-	} else {
-		u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
-
-		/* Put DSI lanes to ULPS to disable pipe */
-		REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
-		REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
-
-		/* LP Hold */
-		REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
-		REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
-
-		/* Disable display plane */
-		REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
-
-		/* Flush the plane changes ??? posted write? */
-		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
-		REG_READ(dspbase_reg);
-
-		/* Disable PIPE */
-		REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
-
-		if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
-			dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
-				__func__);
-
-		if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
-			dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
-				__func__);
-	}
-}
-
-static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
-								int pipe)
-{
-	struct mdfld_dsi_dpi_output *dpi_output =
-				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_encoder_get_config(dsi_encoder);
-	struct drm_device *dev = dsi_config->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	if (!dev_priv->dpi_panel_on[pipe]) {
-		dev_err(dev->dev, "DPI panel is already off\n");
-		return;
-	}
-	tc35876x_toshiba_bridge_panel_off(dev);
-	tc35876x_set_bridge_reset_state(dev, 1);
-	dsi_set_pipe_plane_enable_state(dev, 0, pipe);
-	mdfld_dsi_dpi_shut_down(dpi_output, pipe);
-	dsi_set_device_ready_state(dev, 0, pipe);
-}
-
-static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
-								int pipe)
-{
-	struct mdfld_dsi_dpi_output *dpi_output =
-				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_encoder_get_config(dsi_encoder);
-	struct drm_device *dev = dsi_config->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->dpi_panel_on[pipe]) {
-		dev_err(dev->dev, "DPI panel is already on\n");
-		return;
-	}
-
-	/* For resume path sequence */
-	mdfld_dsi_dpi_shut_down(dpi_output, pipe);
-	dsi_set_device_ready_state(dev, 0, pipe);
-
-	dsi_set_device_ready_state(dev, 1, pipe);
-	tc35876x_set_bridge_reset_state(dev, 0);
-	tc35876x_configure_lvds_bridge(dev);
-	mdfld_dsi_dpi_turn_on(dpi_output, pipe);  /* Send turn on command */
-	dsi_set_pipe_plane_enable_state(dev, 1, pipe);
-}
-/* End for TC35876X */
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_dsi_tpo_ic_init
- *
- * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
- *               restore_display_registers.  since this function does not
- *               acquire the mutex, it is important that the calling function
- *               does!
-\* ************************************************************************* */
-static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
-{
-	struct drm_device *dev = dsi_config->dev;
-	u32 dcsChannelNumber = dsi_config->channel_num;
-	u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
-	u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
-	u32 gen_ctrl_val = GEN_LONG_WRITE;
-
-	DRM_INFO("Enter mrst init TPO MIPI display.\n");
-
-	gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
-
-	/* Flip page order */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00008036);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
-	/* 0xF0 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x005a5af0);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
-	/* Write protection key */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x005a5af1);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
-	/* 0xFC */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x005a5afc);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
-	/* 0xB7 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x770000b7);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00000044);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
-
-	/* 0xB6 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x000a0ab6);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
-	/* 0xF2 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x081010f2);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x4a070708);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x000000c5);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
-	/* 0xF8 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x024003f8);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x01030a04);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x0e020220);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00000004);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
-
-	/* 0xE2 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x398fc3e2);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x0000916f);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
-
-	/* 0xB0 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x000000b0);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
-	/* 0xF4 */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x240242f4);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x78ee2002);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x2a071050);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x507fee10);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x10300710);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
-
-	/* 0xBA */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x19fe07ba);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x101c0a31);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00000010);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
-	/* 0xBB */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x28ff07bb);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x24280a31);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00000034);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
-	/* 0xFB */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x535d05fb);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x1b1a2130);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x221e180e);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x131d2120);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x535d0508);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x1c1a2131);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x231f160d);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x111b2220);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x535c2008);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x1f1d2433);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x2c251a10);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x2c34372d);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00000023);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
-	/* 0xFA */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x525c0bfa);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x1c1c232f);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x2623190e);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x18212625);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x545d0d0e);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x1e1d2333);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x26231a10);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x1a222725);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x545d280f);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x21202635);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x31292013);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x31393d33);
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x00000029);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
-	/* Set DM */
-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
-	REG_WRITE(gen_data_reg, 0x000100f7);
-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-}
-
-static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
-						int num_lane, int bpp)
-{
-	return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
-}
-
-/*
- * Calculate the dpi time basing on a given drm mode @mode
- * return 0 on success.
- * FIXME: I was using proposed mode value for calculation, may need to
- * use crtc mode values later
- */
-int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
-				struct mdfld_dsi_dpi_timing *dpi_timing,
-				int num_lane, int bpp)
-{
-	int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
-	int pclk_vsync, pclk_vfp, pclk_vbp;
-
-	pclk_hactive = mode->hdisplay;
-	pclk_hfp = mode->hsync_start - mode->hdisplay;
-	pclk_hsync = mode->hsync_end - mode->hsync_start;
-	pclk_hbp = mode->htotal - mode->hsync_end;
-
-	pclk_vfp = mode->vsync_start - mode->vdisplay;
-	pclk_vsync = mode->vsync_end - mode->vsync_start;
-	pclk_vbp = mode->vtotal - mode->vsync_end;
-
-	/*
-	 * byte clock counts were calculated by following formula
-	 * bclock_count = pclk_count * bpp / num_lane / 8
-	 */
-	dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_hsync, num_lane, bpp);
-	dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_hbp, num_lane, bpp);
-	dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_hfp, num_lane, bpp);
-	dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_hactive, num_lane, bpp);
-	dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_vsync, num_lane, bpp);
-	dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_vbp, num_lane, bpp);
-	dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
-						pclk_vfp, num_lane, bpp);
-
-	return 0;
-}
-
-void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
-								int pipe)
-{
-	struct drm_device *dev = dsi_config->dev;
-	int lane_count = dsi_config->lane_count;
-	struct mdfld_dsi_dpi_timing dpi_timing;
-	struct drm_display_mode *mode = dsi_config->mode;
-	u32 val;
-
-	/*un-ready device*/
-	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
-
-	/*init dsi adapter before kicking off*/
-	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
-
-	/*enable all interrupts*/
-	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
-
-	/*set up func_prg*/
-	val = lane_count;
-	val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
-
-	switch (dsi_config->bpp) {
-	case 16:
-		val |= DSI_DPI_COLOR_FORMAT_RGB565;
-		break;
-	case 18:
-		val |= DSI_DPI_COLOR_FORMAT_RGB666;
-		break;
-	case 24:
-		val |= DSI_DPI_COLOR_FORMAT_RGB888;
-		break;
-	default:
-		DRM_ERROR("unsupported color format, bpp = %d\n",
-							dsi_config->bpp);
-	}
-	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
-
-	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
-			(mode->vtotal * mode->htotal * dsi_config->bpp /
-				(8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
-	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
-				0xffff & DSI_LP_RX_TIMEOUT_MASK);
-
-	/*max value: 20 clock cycles of txclkesc*/
-	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
-				0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
-
-	/*min 21 txclkesc, max: ffffh*/
-	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
-				0xffff & DSI_RESET_TIMER_MASK);
-
-	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
-				mode->vdisplay << 16 | mode->hdisplay);
-
-	/*set DPI timing registers*/
-	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
-				dsi_config->lane_count, dsi_config->bpp);
-
-	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
-			dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
-			dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
-			dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
-			dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
-			dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
-			dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
-			dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-
-	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
-
-	/*min: 7d0 max: 4e20*/
-	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
-
-	/*set up video mode*/
-	val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
-	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
-
-	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
-
-	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
-
-	/*TODO: figure out how to setup these registers*/
-	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
-	else
-		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
-
-	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
-
-	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */
-
-	/*set device ready*/
-	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
-}
-
-void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
-{
-	struct drm_device *dev = output->dev;
-
-	/* clear special packet sent bit */
-	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
-		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
-					DSI_INTR_STATE_SPL_PKG_SENT);
-
-	/*send turn on package*/
-	REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
-
-	/*wait for SPL_PKG_SENT interrupt*/
-	mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
-
-	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
-		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
-					DSI_INTR_STATE_SPL_PKG_SENT);
-
-	output->panel_on = 1;
-
-	/* FIXME the following is disabled to WA the X slow start issue
-	   for TMD panel
-	if (pipe == 2)
-		dev_priv->dpi_panel_on2 = true;
-	else if (pipe == 0)
-		dev_priv->dpi_panel_on = true; */
-}
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
-								int pipe)
-{
-	struct drm_device *dev = output->dev;
-
-	/*if output is on, or mode setting didn't happen, ignore this*/
-	if ((!output->panel_on) || output->first_boot) {
-		output->first_boot = 0;
-		return;
-	}
-
-	/* Wait for dpi fifo to empty */
-	mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
-
-	/* Clear the special packet interrupt bit if set */
-	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
-		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
-					DSI_INTR_STATE_SPL_PKG_SENT);
-
-	if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
-		goto shutdown_out;
-
-	REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
-
-shutdown_out:
-	output->panel_on = 0;
-	output->first_boot = 0;
-
-	/* FIXME the following is disabled to WA the X slow start issue
-	   for TMD panel
-	if (pipe == 2)
-		dev_priv->dpi_panel_on2 = false;
-	else if (pipe == 0)
-		dev_priv->dpi_panel_on = false;	 */
-}
-
-static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
-{
-	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
-	struct mdfld_dsi_dpi_output *dpi_output =
-				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_encoder_get_config(dsi_encoder);
-	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
-	struct drm_device *dev = dsi_config->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	/*start up display island if it was shutdown*/
-	if (!gma_power_begin(dev, true))
-		return;
-
-	if (on) {
-		if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
-			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
-		else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-			mdfld_dsi_configure_up(dsi_encoder, pipe);
-		else {
-			/*enable mipi port*/
-			REG_WRITE(MIPI_PORT_CONTROL(pipe),
-				REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
-			REG_READ(MIPI_PORT_CONTROL(pipe));
-
-			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
-			mdfld_dsi_tpo_ic_init(dsi_config, pipe);
-		}
-		dev_priv->dpi_panel_on[pipe] = true;
-	} else {
-		if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
-			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
-		else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-			mdfld_dsi_configure_down(dsi_encoder, pipe);
-		else {
-			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
-
-			/*disable mipi port*/
-			REG_WRITE(MIPI_PORT_CONTROL(pipe),
-				REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
-			REG_READ(MIPI_PORT_CONTROL(pipe));
-		}
-		dev_priv->dpi_panel_on[pipe] = false;
-	}
-	gma_power_end(dev);
-}
-
-void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
-{
-	mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
-}
-
-bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
-				     const struct drm_display_mode *mode,
-				     struct drm_display_mode *adjusted_mode)
-{
-	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_encoder_get_config(dsi_encoder);
-	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
-	if (fixed_mode) {
-		adjusted_mode->hdisplay = fixed_mode->hdisplay;
-		adjusted_mode->hsync_start = fixed_mode->hsync_start;
-		adjusted_mode->hsync_end = fixed_mode->hsync_end;
-		adjusted_mode->htotal = fixed_mode->htotal;
-		adjusted_mode->vdisplay = fixed_mode->vdisplay;
-		adjusted_mode->vsync_start = fixed_mode->vsync_start;
-		adjusted_mode->vsync_end = fixed_mode->vsync_end;
-		adjusted_mode->vtotal = fixed_mode->vtotal;
-		adjusted_mode->clock = fixed_mode->clock;
-		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-	}
-	return true;
-}
-
-void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
-{
-	mdfld_dsi_dpi_set_power(encoder, false);
-}
-
-void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
-{
-	mdfld_dsi_dpi_set_power(encoder, true);
-}
-
-/* For TC35876X */
-/* This functionality was implemented in FW in iCDK */
-/* But removed in DV0 and later. So need to add here. */
-static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
-{
-	struct drm_device *dev = dsi_config->dev;
-
-	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
-	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
-	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
-	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
-	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
-	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
-	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
-	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
-	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
-	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
-	REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
-	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
-}
-
-static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
-					int pipe)
-{
-	struct drm_device *dev = dsi_config->dev;
-	struct mdfld_dsi_dpi_timing dpi_timing;
-	struct drm_display_mode *mode = dsi_config->mode;
-
-	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
-					dsi_config->lane_count,
-					dsi_config->bpp);
-
-	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
-		mode->vdisplay << 16 | mode->hdisplay);
-	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
-		dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
-		dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
-		dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
-		dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
-		dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
-		dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
-	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
-		dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-}
-
-static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
-{
-	struct drm_device *dev = dsi_config->dev;
-	int lane_count = dsi_config->lane_count;
-
-	if (pipe) {
-		REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
-		REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
-	} else {
-		REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
-		REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
-	}
-
-	REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
-	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
-
-	/* lane_count = 3 */
-	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
-
-	mdfld_mipi_set_video_timing(dsi_config, pipe);
-}
-
-static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
-{
-	struct drm_device *dev = dsi_config->dev;
-	struct drm_display_mode *mode = dsi_config->mode;
-
-	REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
-	REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
-	REG_WRITE(HSYNC_A,
-		((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
-
-	REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
-	REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
-	REG_WRITE(VSYNC_A,
-		((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
-
-	REG_WRITE(PIPEASRC,
-		((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-}
-/* End for TC35876X */
-
-void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
-				   struct drm_display_mode *mode,
-				   struct drm_display_mode *adjusted_mode)
-{
-	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
-	struct mdfld_dsi_dpi_output *dpi_output =
-					MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_encoder_get_config(dsi_encoder);
-	struct drm_device *dev = dsi_config->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
-	u32 pipeconf_reg = PIPEACONF;
-	u32 dspcntr_reg = DSPACNTR;
-	u32 pipeconf, dspcntr;
-
-	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
-	if (WARN_ON(pipe < 0))
-		return;
-
-	pipeconf = dev_priv->pipeconf[pipe];
-	dspcntr = dev_priv->dspcntr[pipe];
-
-	if (pipe) {
-		pipeconf_reg = PIPECCONF;
-		dspcntr_reg = DSPCCNTR;
-	} else {
-		if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-			mipi &= (~0x03); /* Use all four lanes */
-		else
-			mipi |= 2;
-	}
-
-	/*start up display island if it was shutdown*/
-	if (!gma_power_begin(dev, true))
-		return;
-
-	if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
-		/*
-		 * The following logic is required to reset the bridge and
-		 * configure. This also starts the DSI clock at 200MHz.
-		 */
-		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */
-		tc35876x_toshiba_bridge_panel_on(dev);
-		udelay(100);
-		/* Now start the DSI clock */
-		REG_WRITE(MRST_DPLL_A, 0x00);
-		REG_WRITE(MRST_FPA0, 0xC1);
-		REG_WRITE(MRST_DPLL_A, 0x00800000);
-		udelay(500);
-		REG_WRITE(MRST_DPLL_A, 0x80800000);
-
-		if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
-			dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
-				__func__);
-
-		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
-
-		mipi_set_properties(dsi_config, pipe);
-		mdfld_mipi_config(dsi_config, pipe);
-		mdfld_set_pipe_timing(dsi_config, pipe);
-
-		REG_WRITE(DSPABASE, 0x00);
-		REG_WRITE(DSPASIZE,
-			((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-
-		REG_WRITE(DSPACNTR, 0x98000000);
-		REG_WRITE(DSPASURF, 0x00);
-
-		REG_WRITE(VGACNTRL, 0x80000000);
-		REG_WRITE(DEVICE_READY_REG, 0x00000001);
-
-		REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
-	} else {
-		/*set up mipi port FIXME: do at init time */
-		REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
-	}
-	REG_READ(MIPI_PORT_CONTROL(pipe));
-
-	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
-		/* NOP */
-	} else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
-		/* set up DSI controller DPI interface */
-		mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-
-		/* Configure MIPI Bridge and Panel */
-		tc35876x_configure_lvds_bridge(dev);
-		dev_priv->dpi_panel_on[pipe] = true;
-	} else {
-		/*turn on DPI interface*/
-		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
-	}
-
-	/*set up pipe*/
-	REG_WRITE(pipeconf_reg, pipeconf);
-	REG_READ(pipeconf_reg);
-
-	/*set up display plane*/
-	REG_WRITE(dspcntr_reg, dspcntr);
-	REG_READ(dspcntr_reg);
-
-	msleep(20); /* FIXME: this should wait for vblank */
-
-	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
-		/* NOP */
-	} else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
-		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
-	} else {
-		/* init driver ic */
-		mdfld_dsi_tpo_ic_init(dsi_config, pipe);
-		/*init backlight*/
-		mdfld_dsi_brightness_init(dsi_config, pipe);
-	}
-
-	gma_power_end(dev);
-}
-
-/*
- * Init DSI DPI encoder.
- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
- * return pointer of newly allocated DPI encoder, NULL on error
- */
-struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
-				struct mdfld_dsi_connector *dsi_connector,
-				const struct panel_funcs *p_funcs)
-{
-	struct mdfld_dsi_dpi_output *dpi_output = NULL;
-	struct mdfld_dsi_config *dsi_config;
-	struct drm_connector *connector = NULL;
-	struct drm_encoder *encoder = NULL;
-	int pipe;
-	u32 data;
-	int ret;
-
-	pipe = dsi_connector->pipe;
-
-	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
-		dsi_config = mdfld_dsi_get_config(dsi_connector);
-
-		/* panel hard-reset */
-		if (p_funcs->reset) {
-			ret = p_funcs->reset(dev, pipe);
-			if (ret) {
-				DRM_ERROR("Panel %d hard-reset failed\n", pipe);
-				return NULL;
-			}
-		}
-
-		/* panel drvIC init */
-		if (p_funcs->drv_ic_init)
-			p_funcs->drv_ic_init(dsi_config, pipe);
-
-		/* panel power mode detect */
-		ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
-		if (ret) {
-			DRM_ERROR("Panel %d get power mode failed\n", pipe);
-			dsi_connector->status = connector_status_disconnected;
-		} else {
-			DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
-			dsi_connector->status = connector_status_connected;
-		}
-	}
-
-	dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
-	if (!dpi_output) {
-		DRM_ERROR("No memory\n");
-		return NULL;
-	}
-
-	dpi_output->panel_on = 0;
-	dpi_output->dev = dev;
-	if (mdfld_get_panel_type(dev, pipe) != TC35876X)
-		dpi_output->p_funcs = p_funcs;
-	dpi_output->first_boot = 1;
-
-	/*get fixed mode*/
-	dsi_config = mdfld_dsi_get_config(dsi_connector);
-
-	/*create drm encoder object*/
-	connector = &dsi_connector->base.base;
-	encoder = &dpi_output->base.base.base;
-	drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
-	drm_encoder_helper_add(encoder,
-				p_funcs->encoder_helper_funcs);
-
-	/*attach to given connector*/
-	drm_connector_attach_encoder(connector, encoder);
-
-	/*set possible crtcs and clones*/
-	if (dsi_connector->pipe) {
-		encoder->possible_crtcs = (1 << 2);
-		encoder->possible_clones = 0;
-	} else {
-		encoder->possible_crtcs = (1 << 0);
-		encoder->possible_clones = 0;
-	}
-
-	dsi_connector->base.encoder = &dpi_output->base.base;
-
-	return &dpi_output->base;
-}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
deleted file mode 100644
index 2b40663e16964dba0a038e3a55493bc137034616..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_DPI_H__
-#define __MDFLD_DSI_DPI_H__
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-struct mdfld_dsi_dpi_timing {
-	u16 hsync_count;
-	u16 hbp_count;
-	u16 hfp_count;
-	u16 hactive_count;
-	u16 vsync_count;
-	u16 vbp_count;
-	u16 vfp_count;
-};
-
-struct mdfld_dsi_dpi_output {
-	struct mdfld_dsi_encoder base;
-	struct drm_device *dev;
-
-	int panel_on;
-	int first_boot;
-
-	const struct panel_funcs *p_funcs;
-};
-
-#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
-	container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
-
-/* Export functions */
-extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
-				struct mdfld_dsi_dpi_timing *dpi_timing,
-				int num_lane, int bpp);
-extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
-				struct mdfld_dsi_connector *dsi_connector,
-				const struct panel_funcs *p_funcs);
-
-/* MDFLD DPI helper functions */
-extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
-extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
-				int pipe);
-extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
-				int pipe);
-#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
deleted file mode 100644
index 4aab76613bd9e7977a52eb2c38afbb7fd8f002bd..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/pm_runtime.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "tc35876x-dsi-lvds.h"
-
-/* get the LABC from command line. */
-static int LABC_control = 1;
-
-#ifdef MODULE
-module_param(LABC_control, int, 0644);
-#else
-
-static int __init parse_LABC_control(char *arg)
-{
-	/* LABC control can be passed in as a cmdline parameter */
-	/* to enable this feature add LABC=1 to cmdline */
-	/* to disable this feature add LABC=0 to cmdline */
-	if (!arg)
-		return -EINVAL;
-
-	if (!strcasecmp(arg, "0"))
-		LABC_control = 0;
-	else if (!strcasecmp(arg, "1"))
-		LABC_control = 1;
-
-	return 0;
-}
-early_param("LABC", parse_LABC_control);
-#endif
-
-/**
- * Check and see if the generic control or data buffer is empty and ready.
- */
-void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
-							u32 fifo_stat)
-{
-	u32 GEN_BF_time_out_count;
-
-	/* Check MIPI Adatper command registers */
-	for (GEN_BF_time_out_count = 0;
-			GEN_BF_time_out_count < GEN_FB_TIME_OUT;
-			GEN_BF_time_out_count++) {
-		if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
-			break;
-		udelay(100);
-	}
-
-	if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
-		DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
-					gen_fifo_stat_reg);
-}
-
-/**
- * Manage the DSI MIPI keyboard and display brightness.
- * FIXME: this is exported to OSPM code. should work out an specific
- * display interface to OSPM.
- */
-
-void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
-	struct mdfld_dsi_pkg_sender *sender =
-				mdfld_dsi_get_pkg_sender(dsi_config);
-	struct drm_device *dev;
-	struct drm_psb_private *dev_priv;
-	u32 gen_ctrl_val;
-
-	if (!sender) {
-		DRM_ERROR("No sender found\n");
-		return;
-	}
-
-	dev = sender->dev;
-	dev_priv = dev->dev_private;
-
-	/* Set default display backlight value to 85% (0xd8)*/
-	mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
-				true);
-
-	/* Set minimum brightness setting of CABC function to 20% (0x33)*/
-	mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
-
-	/* Enable backlight or/and LABC */
-	gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
-								BACKLIGHT_ON;
-	if (LABC_control == 1)
-		gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
-								| GAMMA_AUTO;
-
-	if (LABC_control == 1)
-		gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
-
-	dev_priv->mipi_ctrl_display = gen_ctrl_val;
-
-	mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
-				1, true);
-
-	mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
-}
-
-void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
-{
-	struct mdfld_dsi_pkg_sender *sender;
-	struct drm_psb_private *dev_priv;
-	struct mdfld_dsi_config *dsi_config;
-	u32 gen_ctrl_val = 0;
-	int p_type = TMD_VID;
-
-	if (!dev || (pipe != 0 && pipe != 2)) {
-		DRM_ERROR("Invalid parameter\n");
-		return;
-	}
-
-	p_type = mdfld_get_panel_type(dev, 0);
-
-	dev_priv = dev->dev_private;
-
-	if (pipe)
-		dsi_config = dev_priv->dsi_configs[1];
-	else
-		dsi_config = dev_priv->dsi_configs[0];
-
-	sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
-	if (!sender) {
-		DRM_ERROR("No sender found\n");
-		return;
-	}
-
-	gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
-
-	dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
-							pipe, gen_ctrl_val);
-
-	if (p_type == TMD_VID) {
-		/* Set display backlight value */
-		mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
-					(u8)gen_ctrl_val, 1, true);
-	} else {
-		/* Set display backlight value */
-		mdfld_dsi_send_mcs_short(sender, write_display_brightness,
-					(u8)gen_ctrl_val, 1, true);
-
-		/* Enable backlight control */
-		if (level == 0)
-			gen_ctrl_val = 0;
-		else
-			gen_ctrl_val = dev_priv->mipi_ctrl_display;
-
-		mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
-					(u8)gen_ctrl_val, 1, true);
-	}
-}
-
-static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
-				u8 dcs, u32 *data, bool hs)
-{
-	struct mdfld_dsi_pkg_sender *sender
-		= mdfld_dsi_get_pkg_sender(dsi_config);
-
-	if (!sender || !data) {
-		DRM_ERROR("Invalid parameter\n");
-		return -EINVAL;
-	}
-
-	return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
-}
-
-int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
-			bool hs)
-{
-	if (!dsi_config || !mode) {
-		DRM_ERROR("Invalid parameter\n");
-		return -EINVAL;
-	}
-
-	return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
-}
-
-/*
- * NOTE: this function was used by OSPM.
- * TODO: will be removed later, should work out display interfaces for OSPM
- */
-void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
-	if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
-		DRM_ERROR("Invalid parameters\n");
-		return;
-	}
-
-	mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-}
-
-static void mdfld_dsi_connector_save(struct drm_connector *connector)
-{
-}
-
-static void mdfld_dsi_connector_restore(struct drm_connector *connector)
-{
-}
-
-/* FIXME: start using the force parameter */
-static enum drm_connector_status
-mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
-{
-	struct mdfld_dsi_connector *dsi_connector
-		= mdfld_dsi_connector(connector);
-
-	dsi_connector->status = connector_status_connected;
-
-	return dsi_connector->status;
-}
-
-static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
-				struct drm_property *property,
-				uint64_t value)
-{
-	struct drm_encoder *encoder = connector->encoder;
-
-	if (!strcmp(property->name, "scaling mode") && encoder) {
-		struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
-		bool centerechange;
-		uint64_t val;
-
-		if (!gma_crtc)
-			goto set_prop_error;
-
-		switch (value) {
-		case DRM_MODE_SCALE_FULLSCREEN:
-			break;
-		case DRM_MODE_SCALE_NO_SCALE:
-			break;
-		case DRM_MODE_SCALE_ASPECT:
-			break;
-		default:
-			goto set_prop_error;
-		}
-
-		if (drm_object_property_get_value(&connector->base, property, &val))
-			goto set_prop_error;
-
-		if (val == value)
-			goto set_prop_done;
-
-		if (drm_object_property_set_value(&connector->base,
-							property, value))
-			goto set_prop_error;
-
-		centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
-			(value == DRM_MODE_SCALE_NO_SCALE);
-
-		if (gma_crtc->saved_mode.hdisplay != 0 &&
-		    gma_crtc->saved_mode.vdisplay != 0) {
-			if (centerechange) {
-				if (!drm_crtc_helper_set_mode(encoder->crtc,
-						&gma_crtc->saved_mode,
-						encoder->crtc->x,
-						encoder->crtc->y,
-						encoder->crtc->primary->fb))
-					goto set_prop_error;
-			} else {
-				const struct drm_encoder_helper_funcs *funcs =
-						encoder->helper_private;
-				funcs->mode_set(encoder,
-					&gma_crtc->saved_mode,
-					&gma_crtc->saved_adjusted_mode);
-			}
-		}
-	} else if (!strcmp(property->name, "backlight") && encoder) {
-		if (drm_object_property_set_value(&connector->base, property,
-									value))
-			goto set_prop_error;
-		else
-			gma_backlight_set(encoder->dev, value);
-	}
-set_prop_done:
-	return 0;
-set_prop_error:
-	return -1;
-}
-
-static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
-{
-	struct mdfld_dsi_connector *dsi_connector =
-					mdfld_dsi_connector(connector);
-	struct mdfld_dsi_pkg_sender *sender;
-
-	if (!dsi_connector)
-		return;
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-	sender = dsi_connector->pkg_sender;
-	mdfld_dsi_pkg_sender_destroy(sender);
-	kfree(dsi_connector);
-}
-
-static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
-{
-	struct mdfld_dsi_connector *dsi_connector =
-				mdfld_dsi_connector(connector);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_get_config(dsi_connector);
-	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-	struct drm_display_mode *dup_mode = NULL;
-	struct drm_device *dev = connector->dev;
-
-	if (fixed_mode) {
-		dev_dbg(dev->dev, "fixed_mode %dx%d\n",
-				fixed_mode->hdisplay, fixed_mode->vdisplay);
-		dup_mode = drm_mode_duplicate(dev, fixed_mode);
-		drm_mode_probed_add(connector, dup_mode);
-		return 1;
-	}
-	DRM_ERROR("Didn't get any modes!\n");
-	return 0;
-}
-
-static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
-						struct drm_display_mode *mode)
-{
-	struct mdfld_dsi_connector *dsi_connector =
-					mdfld_dsi_connector(connector);
-	struct mdfld_dsi_config *dsi_config =
-					mdfld_dsi_get_config(dsi_connector);
-	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-		return MODE_NO_DBLESCAN;
-
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		return MODE_NO_INTERLACE;
-
-	/**
-	 * FIXME: current DC has no fitting unit, reject any mode setting
-	 * request
-	 * Will figure out a way to do up-scaling(panel fitting) later.
-	 **/
-	if (fixed_mode) {
-		if (mode->hdisplay != fixed_mode->hdisplay)
-			return MODE_PANEL;
-
-		if (mode->vdisplay != fixed_mode->vdisplay)
-			return MODE_PANEL;
-	}
-
-	return MODE_OK;
-}
-
-static struct drm_encoder *mdfld_dsi_connector_best_encoder(
-				struct drm_connector *connector)
-{
-	struct mdfld_dsi_connector *dsi_connector =
-				mdfld_dsi_connector(connector);
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_get_config(dsi_connector);
-	return &dsi_config->encoder->base.base;
-}
-
-/*DSI connector funcs*/
-static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
-	.detect = mdfld_dsi_connector_detect,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.set_property = mdfld_dsi_connector_set_property,
-	.destroy = mdfld_dsi_connector_destroy,
-};
-
-/*DSI connector helper funcs*/
-static const struct drm_connector_helper_funcs
-	mdfld_dsi_connector_helper_funcs = {
-	.get_modes = mdfld_dsi_connector_get_modes,
-	.mode_valid = mdfld_dsi_connector_mode_valid,
-	.best_encoder = mdfld_dsi_connector_best_encoder,
-};
-
-static int mdfld_dsi_get_default_config(struct drm_device *dev,
-				struct mdfld_dsi_config *config, int pipe)
-{
-	if (!dev || !config) {
-		DRM_ERROR("Invalid parameters");
-		return -EINVAL;
-	}
-
-	config->bpp = 24;
-	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-		config->lane_count = 4;
-	else
-		config->lane_count = 2;
-	config->channel_num = 0;
-
-	if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
-		config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
-	else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
-		config->video_mode =
-				MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
-	else
-		config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
-
-	return 0;
-}
-
-int mdfld_dsi_panel_reset(struct drm_device *ddev, int pipe)
-{
-	struct device *dev = ddev->dev;
-	struct gpio_desc *gpiod;
-
-	/*
-	 * Raise the GPIO reset line for the corresponding pipe to HIGH,
-	 * this is probably because it is active low so this takes the
-	 * respective pipe out of reset. (We have no code to put it back
-	 * into reset in this driver.)
-	 */
-	switch (pipe) {
-	case 0:
-		gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_OUT_HIGH);
-		if (IS_ERR(gpiod))
-			return PTR_ERR(gpiod);
-		break;
-	case 2:
-		gpiod = gpiod_get(dev, "dsi-pipe2-reset", GPIOD_OUT_HIGH);
-		if (IS_ERR(gpiod))
-			return PTR_ERR(gpiod);
-		break;
-	default:
-		DRM_DEV_ERROR(dev, "Invalid output pipe\n");
-		return -EINVAL;
-	}
-	gpiod_put(gpiod);
-
-	/* Flush posted writes on the device */
-	gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_ASIS);
-	if (IS_ERR(gpiod))
-		return PTR_ERR(gpiod);
-	gpiod_get_value(gpiod);
-	gpiod_put(gpiod);
-
-	return 0;
-}
-
-/*
- * MIPI output init
- * @dev drm device
- * @pipe pipe number. 0 or 2
- * @config
- *
- * Do the initialization of a MIPI output, including create DRM mode objects
- * initialization of DSI output on @pipe
- */
-void mdfld_dsi_output_init(struct drm_device *dev,
-			   int pipe,
-			   const struct panel_funcs *p_vid_funcs)
-{
-	struct mdfld_dsi_config *dsi_config;
-	struct mdfld_dsi_connector *dsi_connector;
-	struct drm_connector *connector;
-	struct mdfld_dsi_encoder *encoder;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct panel_info dsi_panel_info;
-	u32 width_mm, height_mm;
-
-	dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
-
-	if (pipe != 0 && pipe != 2) {
-		DRM_ERROR("Invalid parameter\n");
-		return;
-	}
-
-	/*create a new connector*/
-	dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
-	if (!dsi_connector) {
-		DRM_ERROR("No memory");
-		return;
-	}
-
-	dsi_connector->pipe =  pipe;
-
-	dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
-			GFP_KERNEL);
-	if (!dsi_config) {
-		DRM_ERROR("cannot allocate memory for DSI config\n");
-		goto dsi_init_err0;
-	}
-	mdfld_dsi_get_default_config(dev, dsi_config, pipe);
-
-	dsi_connector->private = dsi_config;
-
-	dsi_config->changed = 1;
-	dsi_config->dev = dev;
-
-	dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
-	if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
-			goto dsi_init_err0;
-
-	width_mm = dsi_panel_info.width_mm;
-	height_mm = dsi_panel_info.height_mm;
-
-	dsi_config->mode = dsi_config->fixed_mode;
-	dsi_config->connector = dsi_connector;
-
-	if (!dsi_config->fixed_mode) {
-		DRM_ERROR("No panel fixed mode was found\n");
-		goto dsi_init_err0;
-	}
-
-	if (pipe && dev_priv->dsi_configs[0]) {
-		dsi_config->dvr_ic_inited = 0;
-		dev_priv->dsi_configs[1] = dsi_config;
-	} else if (pipe == 0) {
-		dsi_config->dvr_ic_inited = 1;
-		dev_priv->dsi_configs[0] = dsi_config;
-	} else {
-		DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
-		goto dsi_init_err0;
-	}
-
-
-	connector = &dsi_connector->base.base;
-	dsi_connector->base.save = mdfld_dsi_connector_save;
-	dsi_connector->base.restore = mdfld_dsi_connector_restore;
-
-	drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
-						DRM_MODE_CONNECTOR_LVDS);
-	drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
-
-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-	connector->display_info.width_mm = width_mm;
-	connector->display_info.height_mm = height_mm;
-	connector->interlace_allowed = false;
-	connector->doublescan_allowed = false;
-
-	/*attach properties*/
-	drm_object_attach_property(&connector->base,
-				dev->mode_config.scaling_mode_property,
-				DRM_MODE_SCALE_FULLSCREEN);
-	drm_object_attach_property(&connector->base,
-				dev_priv->backlight_property,
-				MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
-	/*init DSI package sender on this output*/
-	if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
-		DRM_ERROR("Package Sender initialization failed on pipe %d\n",
-									pipe);
-		goto dsi_init_err0;
-	}
-
-	encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
-	if (!encoder) {
-		DRM_ERROR("Create DPI encoder failed\n");
-		goto dsi_init_err1;
-	}
-	encoder->private = dsi_config;
-	dsi_config->encoder = encoder;
-	encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
-		INTEL_OUTPUT_MIPI2;
-	drm_connector_register(connector);
-	return;
-
-	/*TODO: add code to destroy outputs on error*/
-dsi_init_err1:
-	/*destroy sender*/
-	mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
-
-	drm_connector_cleanup(connector);
-
-	kfree(dsi_config->fixed_mode);
-	kfree(dsi_config);
-dsi_init_err0:
-	kfree(dsi_connector);
-}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
deleted file mode 100644
index 5c0db3c2903ff8762cff3b35c8607b05597aa7d8..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_OUTPUT_H__
-#define __MDFLD_DSI_OUTPUT_H__
-
-#include <linux/backlight.h>
-
-#include <asm/intel-mid.h>
-
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-#include "mdfld_output.h"
-#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
-
-#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
-#define FLD_MOD(orig, val, start, end) \
-	(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-
-#define REG_FLD_MOD(reg, val, start, end) \
-	REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
-
-static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
-		u32 val, int start, int end)
-{
-	int t = 100000;
-
-	while (FLD_GET(REG_READ(reg), start, end) != val) {
-		if (--t == 0)
-			return 1;
-	}
-
-	return 0;
-}
-
-#define REG_FLD_WAIT(reg, val, start, end) \
-	REGISTER_FLD_WAIT(dev, reg, val, start, end)
-
-#define REG_BIT_WAIT(reg, val, bitnum) \
-	REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
-
-#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
-
-#ifdef DEBUG
-#define CHECK_PIPE(pipe) ({			\
-	const typeof(pipe) __pipe = (pipe);	\
-	BUG_ON(__pipe != 0 && __pipe != 2);	\
-	__pipe;	})
-#else
-#define CHECK_PIPE(pipe) (pipe)
-#endif
-
-/*
- * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
- */
-#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
-
-/* mdfld DSI controller registers */
-#define MIPI_DEVICE_READY_REG(pipe)		(0xb000 + REG_OFFSET(pipe))
-#define MIPI_INTR_STAT_REG(pipe)		(0xb004 + REG_OFFSET(pipe))
-#define MIPI_INTR_EN_REG(pipe)			(0xb008 + REG_OFFSET(pipe))
-#define MIPI_DSI_FUNC_PRG_REG(pipe)		(0xb00c + REG_OFFSET(pipe))
-#define MIPI_HS_TX_TIMEOUT_REG(pipe)		(0xb010 + REG_OFFSET(pipe))
-#define MIPI_LP_RX_TIMEOUT_REG(pipe)		(0xb014 + REG_OFFSET(pipe))
-#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe)	(0xb018 + REG_OFFSET(pipe))
-#define MIPI_DEVICE_RESET_TIMER_REG(pipe)	(0xb01c + REG_OFFSET(pipe))
-#define MIPI_DPI_RESOLUTION_REG(pipe)		(0xb020 + REG_OFFSET(pipe))
-#define MIPI_DBI_FIFO_THROTTLE_REG(pipe)	(0xb024 + REG_OFFSET(pipe))
-#define MIPI_HSYNC_COUNT_REG(pipe)		(0xb028 + REG_OFFSET(pipe))
-#define MIPI_HBP_COUNT_REG(pipe)		(0xb02c + REG_OFFSET(pipe))
-#define MIPI_HFP_COUNT_REG(pipe)		(0xb030 + REG_OFFSET(pipe))
-#define MIPI_HACTIVE_COUNT_REG(pipe)		(0xb034 + REG_OFFSET(pipe))
-#define MIPI_VSYNC_COUNT_REG(pipe)		(0xb038 + REG_OFFSET(pipe))
-#define MIPI_VBP_COUNT_REG(pipe)		(0xb03c + REG_OFFSET(pipe))
-#define MIPI_VFP_COUNT_REG(pipe)		(0xb040 + REG_OFFSET(pipe))
-#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe)	(0xb044 + REG_OFFSET(pipe))
-#define MIPI_DPI_CONTROL_REG(pipe)		(0xb048 + REG_OFFSET(pipe))
-#define MIPI_DPI_DATA_REG(pipe)			(0xb04c + REG_OFFSET(pipe))
-#define MIPI_INIT_COUNT_REG(pipe)		(0xb050 + REG_OFFSET(pipe))
-#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe)	(0xb054 + REG_OFFSET(pipe))
-#define MIPI_VIDEO_MODE_FORMAT_REG(pipe)	(0xb058 + REG_OFFSET(pipe))
-#define MIPI_EOT_DISABLE_REG(pipe)		(0xb05c + REG_OFFSET(pipe))
-#define MIPI_LP_BYTECLK_REG(pipe)		(0xb060 + REG_OFFSET(pipe))
-#define MIPI_LP_GEN_DATA_REG(pipe)		(0xb064 + REG_OFFSET(pipe))
-#define MIPI_HS_GEN_DATA_REG(pipe)		(0xb068 + REG_OFFSET(pipe))
-#define MIPI_LP_GEN_CTRL_REG(pipe)		(0xb06c + REG_OFFSET(pipe))
-#define MIPI_HS_GEN_CTRL_REG(pipe)		(0xb070 + REG_OFFSET(pipe))
-#define MIPI_GEN_FIFO_STAT_REG(pipe)		(0xb074 + REG_OFFSET(pipe))
-#define MIPI_HS_LS_DBI_ENABLE_REG(pipe)		(0xb078 + REG_OFFSET(pipe))
-#define MIPI_DPHY_PARAM_REG(pipe)		(0xb080 + REG_OFFSET(pipe))
-#define MIPI_DBI_BW_CTRL_REG(pipe)		(0xb084 + REG_OFFSET(pipe))
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe)	(0xb088 + REG_OFFSET(pipe))
-
-#define MIPI_CTRL_REG(pipe)			(0xb104 + REG_OFFSET(pipe))
-#define MIPI_DATA_ADD_REG(pipe)			(0xb108 + REG_OFFSET(pipe))
-#define MIPI_DATA_LEN_REG(pipe)			(0xb10c + REG_OFFSET(pipe))
-#define MIPI_CMD_ADD_REG(pipe)			(0xb110 + REG_OFFSET(pipe))
-#define MIPI_CMD_LEN_REG(pipe)			(0xb114 + REG_OFFSET(pipe))
-
-/* non-uniform reg offset */
-#define MIPI_PORT_CONTROL(pipe)		(CHECK_PIPE(pipe) ? MIPI_C : MIPI)
-
-#define DSI_DEVICE_READY				(0x1)
-#define DSI_POWER_STATE_ULPS_ENTER			(0x2 << 1)
-#define DSI_POWER_STATE_ULPS_EXIT			(0x1 << 1)
-#define DSI_POWER_STATE_ULPS_OFFSET			(0x1)
-
-
-#define DSI_ONE_DATA_LANE					(0x1)
-#define DSI_TWO_DATA_LANE					(0x2)
-#define DSI_THREE_DATA_LANE					(0X3)
-#define DSI_FOUR_DATA_LANE					(0x4)
-#define DSI_DPI_VIRT_CHANNEL_OFFSET			(0x3)
-#define DSI_DBI_VIRT_CHANNEL_OFFSET			(0x5)
-#define DSI_DPI_COLOR_FORMAT_RGB565			(0x01 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666			(0x02 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK		(0x03 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB888			(0x04 << 7)
-#define DSI_DBI_COLOR_FORMAT_OPTION2			(0x05 << 13)
-
-#define DSI_INTR_STATE_RXSOTERROR			BIT(0)
-
-#define DSI_INTR_STATE_SPL_PKG_SENT			BIT(30)
-#define DSI_INTR_STATE_TE				BIT(31)
-
-#define DSI_HS_TX_TIMEOUT_MASK				(0xffffff)
-
-#define DSI_LP_RX_TIMEOUT_MASK				(0xffffff)
-
-#define DSI_TURN_AROUND_TIMEOUT_MASK		(0x3f)
-
-#define DSI_RESET_TIMER_MASK				(0xffff)
-
-#define DSI_DBI_FIFO_WM_HALF				(0x0)
-#define DSI_DBI_FIFO_WM_QUARTER				(0x1)
-#define DSI_DBI_FIFO_WM_LOW					(0x2)
-
-#define DSI_DPI_TIMING_MASK					(0xffff)
-
-#define DSI_INIT_TIMER_MASK					(0xffff)
-
-#define DSI_DBI_RETURN_PACK_SIZE_MASK		(0x3ff)
-
-#define DSI_LP_BYTECLK_MASK					(0x0ffff)
-
-#define DSI_HS_CTRL_GEN_SHORT_W0			(0x03)
-#define DSI_HS_CTRL_GEN_SHORT_W1			(0x13)
-#define DSI_HS_CTRL_GEN_SHORT_W2			(0x23)
-#define DSI_HS_CTRL_GEN_R0					(0x04)
-#define DSI_HS_CTRL_GEN_R1					(0x14)
-#define DSI_HS_CTRL_GEN_R2					(0x24)
-#define DSI_HS_CTRL_GEN_LONG_W				(0x29)
-#define DSI_HS_CTRL_MCS_SHORT_W0			(0x05)
-#define DSI_HS_CTRL_MCS_SHORT_W1			(0x15)
-#define DSI_HS_CTRL_MCS_R0					(0x06)
-#define DSI_HS_CTRL_MCS_LONG_W				(0x39)
-#define DSI_HS_CTRL_VC_OFFSET				(0x06)
-#define DSI_HS_CTRL_WC_OFFSET				(0x08)
-
-#define	DSI_FIFO_GEN_HS_DATA_FULL			BIT(0)
-#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY		BIT(1)
-#define DSI_FIFO_GEN_HS_DATA_EMPTY			BIT(2)
-#define DSI_FIFO_GEN_LP_DATA_FULL			BIT(8)
-#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY		BIT(9)
-#define DSI_FIFO_GEN_LP_DATA_EMPTY			BIT(10)
-#define DSI_FIFO_GEN_HS_CTRL_FULL			BIT(16)
-#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY		BIT(17)
-#define DSI_FIFO_GEN_HS_CTRL_EMPTY			BIT(18)
-#define DSI_FIFO_GEN_LP_CTRL_FULL			BIT(24)
-#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY		BIT(25)
-#define DSI_FIFO_GEN_LP_CTRL_EMPTY			BIT(26)
-#define DSI_FIFO_DBI_EMPTY					BIT(27)
-#define DSI_FIFO_DPI_EMPTY					BIT(28)
-
-#define DSI_DBI_HS_LP_SWITCH_MASK			(0x1)
-
-#define DSI_HS_LP_SWITCH_COUNTER_OFFSET		(0x0)
-#define DSI_LP_HS_SWITCH_COUNTER_OFFSET		(0x16)
-
-#define DSI_DPI_CTRL_HS_SHUTDOWN			(0x00000001)
-#define DSI_DPI_CTRL_HS_TURN_ON				(0x00000002)
-
-/*dsi power modes*/
-#define DSI_POWER_MODE_DISPLAY_ON	BIT(2)
-#define DSI_POWER_MODE_NORMAL_ON	BIT(3)
-#define DSI_POWER_MODE_SLEEP_OUT	BIT(4)
-#define DSI_POWER_MODE_PARTIAL_ON	BIT(5)
-#define DSI_POWER_MODE_IDLE_ON		BIT(6)
-
-enum {
-	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
-	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
-	MDFLD_DSI_VIDEO_BURST_MODE = 3,
-};
-
-#define DSI_DPI_COMPLETE_LAST_LINE			BIT(2)
-#define DSI_DPI_DISABLE_BTA					BIT(3)
-
-struct mdfld_dsi_connector {
-	struct gma_connector base;
-
-	int pipe;
-	void *private;
-	void *pkg_sender;
-
-	/* Connection status */
-	enum drm_connector_status status;
-};
-
-struct mdfld_dsi_encoder {
-	struct gma_encoder base;
-	void *private;
-};
-
-/*
- * DSI config, consists of one DSI connector, two DSI encoders.
- * DRM will pick up on DSI encoder basing on differents configs.
- */
-struct mdfld_dsi_config {
-	struct drm_device *dev;
-	struct drm_display_mode *fixed_mode;
-	struct drm_display_mode *mode;
-
-	struct mdfld_dsi_connector *connector;
-	struct mdfld_dsi_encoder *encoder;
-
-	int changed;
-
-	int bpp;
-	int lane_count;
-	/*Virtual channel number for this encoder*/
-	int channel_num;
-	/*video mode configure*/
-	int video_mode;
-
-	int dvr_ic_inited;
-};
-
-static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
-		struct drm_connector *connector)
-{
-	struct gma_connector *gma_connector;
-
-	gma_connector = to_gma_connector(connector);
-
-	return container_of(gma_connector, struct mdfld_dsi_connector, base);
-}
-
-static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
-		struct drm_encoder *encoder)
-{
-	struct gma_encoder *gma_encoder;
-
-	gma_encoder = to_gma_encoder(encoder);
-
-	return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
-}
-
-static inline struct mdfld_dsi_config *
-	mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
-{
-	if (!connector)
-		return NULL;
-	return (struct mdfld_dsi_config *)connector->private;
-}
-
-static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
-{
-	struct mdfld_dsi_connector *dsi_connector;
-
-	if (!config)
-		return NULL;
-
-	dsi_connector = config->connector;
-
-	if (!dsi_connector)
-		return NULL;
-
-	return dsi_connector->pkg_sender;
-}
-
-static inline struct mdfld_dsi_config *
-	mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
-{
-	if (!encoder)
-		return NULL;
-	return (struct mdfld_dsi_config *)encoder->private;
-}
-
-static inline struct mdfld_dsi_connector *
-	mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
-{
-	struct mdfld_dsi_config *config;
-
-	if (!encoder)
-		return NULL;
-
-	config = mdfld_dsi_encoder_get_config(encoder);
-	if (!config)
-		return NULL;
-
-	return config->connector;
-}
-
-static inline void *mdfld_dsi_encoder_get_pkg_sender(
-				struct mdfld_dsi_encoder *encoder)
-{
-	struct mdfld_dsi_config *dsi_config;
-
-	dsi_config = mdfld_dsi_encoder_get_config(encoder);
-	if (!dsi_config)
-		return NULL;
-
-	return mdfld_dsi_get_pkg_sender(dsi_config);
-}
-
-static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
-{
-	struct mdfld_dsi_connector *connector;
-
-	if (!encoder)
-		return -1;
-
-	connector = mdfld_dsi_encoder_get_connector(encoder);
-	if (!connector)
-		return -1;
-	return connector->pipe;
-}
-
-/* Export functions */
-extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
-					u32 gen_fifo_stat_reg, u32 fifo_stat);
-extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
-					int pipe);
-extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
-					int level);
-extern void mdfld_dsi_output_init(struct drm_device *dev,
-					int pipe,
-					const struct panel_funcs *p_vid_funcs);
-extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
-					int pipe);
-
-extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
-					u32 *mode, bool hs);
-extern int mdfld_dsi_panel_reset(struct drm_device *dev, int pipe);
-
-#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
deleted file mode 100644
index 6e0de83e9f7d247162d1e7e9b0e11c2abc4d7ff8..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-#include <linux/freezer.h>
-
-#include <video/mipi_display.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-#define MDFLD_DSI_READ_MAX_COUNT		5000
-
-enum {
-	MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
-};
-
-enum {
-	MDFLD_DSI_PKG_SENDER_FREE = 0x0,
-	MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
-};
-
-static const char *const dsi_errors[] = {
-	"RX SOT Error",
-	"RX SOT Sync Error",
-	"RX EOT Sync Error",
-	"RX Escape Mode Entry Error",
-	"RX LP TX Sync Error",
-	"RX HS Receive Timeout Error",
-	"RX False Control Error",
-	"RX ECC Single Bit Error",
-	"RX ECC Multibit Error",
-	"RX Checksum Error",
-	"RX DSI Data Type Not Recognised",
-	"RX DSI VC ID Invalid",
-	"TX False Control Error",
-	"TX ECC Single Bit Error",
-	"TX ECC Multibit Error",
-	"TX Checksum Error",
-	"TX DSI Data Type Not Recognised",
-	"TX DSI VC ID invalid",
-	"High Contention",
-	"Low contention",
-	"DPI FIFO Under run",
-	"HS TX Timeout",
-	"LP RX Timeout",
-	"Turn Around ACK Timeout",
-	"ACK With No Error",
-	"RX Invalid TX Length",
-	"RX Prot Violation",
-	"HS Generic Write FIFO Full",
-	"LP Generic Write FIFO Full",
-	"Generic Read Data Avail",
-	"Special Packet Sent",
-	"Tearing Effect",
-};
-
-static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
-						u32 mask)
-{
-	struct drm_device *dev = sender->dev;
-	u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
-	int retry = 0xffff;
-
-	while (retry--) {
-		if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
-			return 0;
-		udelay(100);
-	}
-	DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
-	return -EIO;
-}
-
-static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
-	return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
-						BIT(26) | BIT(27) | BIT(28)));
-}
-
-static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
-	return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
-}
-
-static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
-	return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
-}
-
-static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
-{
-	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
-	struct drm_device *dev = sender->dev;
-
-	dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
-
-	switch (mask) {
-	case BIT(0):
-	case BIT(1):
-	case BIT(2):
-	case BIT(3):
-	case BIT(4):
-	case BIT(5):
-	case BIT(6):
-	case BIT(7):
-	case BIT(8):
-	case BIT(9):
-	case BIT(10):
-	case BIT(11):
-	case BIT(12):
-	case BIT(13):
-		dev_dbg(sender->dev->dev, "No Action required\n");
-		break;
-	case BIT(14):
-		/*wait for all fifo empty*/
-		/*wait_for_all_fifos_empty(sender)*/
-		break;
-	case BIT(15):
-		dev_dbg(sender->dev->dev, "No Action required\n");
-		break;
-	case BIT(16):
-		break;
-	case BIT(17):
-		break;
-	case BIT(18):
-	case BIT(19):
-		dev_dbg(sender->dev->dev, "High/Low contention detected\n");
-		/*wait for contention recovery time*/
-		/*mdelay(10);*/
-		/*wait for all fifo empty*/
-		if (0)
-			wait_for_all_fifos_empty(sender);
-		break;
-	case BIT(20):
-		dev_dbg(sender->dev->dev, "No Action required\n");
-		break;
-	case BIT(21):
-		/*wait for all fifo empty*/
-		/*wait_for_all_fifos_empty(sender);*/
-		break;
-	case BIT(22):
-		break;
-	case BIT(23):
-	case BIT(24):
-	case BIT(25):
-	case BIT(26):
-	case BIT(27):
-		dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
-		REG_WRITE(intr_stat_reg, mask);
-		wait_for_hs_fifos_empty(sender);
-		break;
-	case BIT(28):
-		dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
-		REG_WRITE(intr_stat_reg, mask);
-		wait_for_lp_fifos_empty(sender);
-		break;
-	case BIT(29):
-	case BIT(30):
-	case BIT(31):
-		dev_dbg(sender->dev->dev, "No Action required\n");
-		break;
-	}
-
-	if (mask & REG_READ(intr_stat_reg))
-		dev_dbg(sender->dev->dev,
-				"Cannot clean interrupt 0x%08x\n", mask);
-	return 0;
-}
-
-static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
-{
-	struct drm_device *dev = sender->dev;
-	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
-	u32 mask;
-	u32 intr_stat;
-	int i;
-	int err = 0;
-
-	intr_stat = REG_READ(intr_stat_reg);
-
-	for (i = 0; i < 32; i++) {
-		mask = (0x00000001UL) << i;
-		if (intr_stat & mask) {
-			dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
-			err = handle_dsi_error(sender, mask);
-			if (err)
-				DRM_ERROR("Cannot handle error\n");
-		}
-	}
-	return err;
-}
-
-static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
-			u8 cmd, u8 param, bool hs)
-{
-	struct drm_device *dev = sender->dev;
-	u32 ctrl_reg;
-	u32 val;
-	u8 virtual_channel = 0;
-
-	if (hs) {
-		ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
-
-		/* FIXME: wait_for_hs_fifos_empty(sender); */
-	} else {
-		ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
-
-		/* FIXME: wait_for_lp_fifos_empty(sender); */
-	}
-
-	val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
-		FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
-
-	REG_WRITE(ctrl_reg, val);
-
-	return 0;
-}
-
-static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
-			u8 *data, int len, bool hs)
-{
-	struct drm_device *dev = sender->dev;
-	u32 ctrl_reg;
-	u32 data_reg;
-	u32 val;
-	u8 *p;
-	u8 b1, b2, b3, b4;
-	u8 virtual_channel = 0;
-	int i;
-
-	if (hs) {
-		ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
-		data_reg = sender->mipi_hs_gen_data_reg;
-
-		/* FIXME: wait_for_hs_fifos_empty(sender); */
-	} else {
-		ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
-		data_reg = sender->mipi_lp_gen_data_reg;
-
-		/* FIXME: wait_for_lp_fifos_empty(sender); */
-	}
-
-	p = data;
-	for (i = 0; i < len / 4; i++) {
-		b1 = *p++;
-		b2 = *p++;
-		b3 = *p++;
-		b4 = *p++;
-
-		REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
-	}
-
-	i = len % 4;
-	if (i) {
-		b1 = 0; b2 = 0; b3 = 0;
-
-		switch (i) {
-		case 3:
-			b1 = *p++;
-			b2 = *p++;
-			b3 = *p++;
-			break;
-		case 2:
-			b1 = *p++;
-			b2 = *p++;
-			break;
-		case 1:
-			b1 = *p++;
-			break;
-		}
-
-		REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
-	}
-
-	val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
-		FLD_VAL(data_type, 5, 0);
-
-	REG_WRITE(ctrl_reg, val);
-
-	return 0;
-}
-
-static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
-			u8 *data, u16 len)
-{
-	u8 cmd;
-
-	switch (data_type) {
-	case MIPI_DSI_DCS_SHORT_WRITE:
-	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
-	case MIPI_DSI_DCS_LONG_WRITE:
-		cmd = *data;
-		break;
-	default:
-		return 0;
-	}
-
-	/*this prevents other package sending while doing msleep*/
-	sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
-
-	/*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
-	if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
-		/*TODO: replace it with msleep later*/
-		mdelay(120);
-	}
-
-	if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
-		/*TODO: replace it with msleep later*/
-		mdelay(120);
-	}
-	return 0;
-}
-
-static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
-			u8 *data, u16 len)
-{
-	u8 cmd;
-
-	switch (data_type) {
-	case MIPI_DSI_DCS_SHORT_WRITE:
-	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
-	case MIPI_DSI_DCS_LONG_WRITE:
-		cmd = *data;
-		break;
-	default:
-		return 0;
-	}
-
-	/*update panel status*/
-	if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
-		sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
-		/*TODO: replace it with msleep later*/
-		mdelay(120);
-	} else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
-		sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
-		/*TODO: replace it with msleep later*/
-		mdelay(120);
-	} else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
-		/*TODO: replace it with msleep later*/
-		mdelay(5);
-	}
-
-	sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
-	return 0;
-}
-
-static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
-		u8 *data, u16 len, bool hs)
-{
-	int ret;
-
-	/*handle DSI error*/
-	ret = dsi_error_handler(sender);
-	if (ret) {
-		DRM_ERROR("Error handling failed\n");
-		return -EAGAIN;
-	}
-
-	/* send pkg */
-	if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
-		DRM_ERROR("sender is busy\n");
-		return -EAGAIN;
-	}
-
-	ret = send_pkg_prepare(sender, data_type, data, len);
-	if (ret) {
-		DRM_ERROR("send_pkg_prepare error\n");
-		return ret;
-	}
-
-	switch (data_type) {
-	case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
-	case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
-	case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
-	case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
-	case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
-	case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
-	case MIPI_DSI_DCS_SHORT_WRITE:
-	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
-	case MIPI_DSI_DCS_READ:
-		ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
-		break;
-	case MIPI_DSI_GENERIC_LONG_WRITE:
-	case MIPI_DSI_DCS_LONG_WRITE:
-		ret = send_long_pkg(sender, data_type, data, len, hs);
-		break;
-	}
-
-	send_pkg_done(sender, data_type, data, len);
-
-	/*FIXME: should I query complete and fifo empty here?*/
-
-	return ret;
-}
-
-int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
-			u32 len, bool hs)
-{
-	unsigned long flags;
-
-	if (!sender || !data || !len) {
-		DRM_ERROR("Invalid parameters\n");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&sender->lock, flags);
-	send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
-	spin_unlock_irqrestore(&sender->lock, flags);
-
-	return 0;
-}
-
-int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
-			u8 param, u8 param_num, bool hs)
-{
-	u8 data[2];
-	unsigned long flags;
-	u8 data_type;
-
-	if (!sender) {
-		DRM_ERROR("Invalid parameter\n");
-		return -EINVAL;
-	}
-
-	data[0] = cmd;
-
-	if (param_num) {
-		data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
-		data[1] = param;
-	} else {
-		data_type = MIPI_DSI_DCS_SHORT_WRITE;
-		data[1] = 0;
-	}
-
-	spin_lock_irqsave(&sender->lock, flags);
-	send_pkg(sender, data_type, data, sizeof(data), hs);
-	spin_unlock_irqrestore(&sender->lock, flags);
-
-	return 0;
-}
-
-int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
-			u8 param1, u8 param_num, bool hs)
-{
-	u8 data[2];
-	unsigned long flags;
-	u8 data_type;
-
-	if (!sender || param_num > 2) {
-		DRM_ERROR("Invalid parameter\n");
-		return -EINVAL;
-	}
-
-	switch (param_num) {
-	case 0:
-		data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
-		data[0] = 0;
-		data[1] = 0;
-		break;
-	case 1:
-		data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
-		data[0] = param0;
-		data[1] = 0;
-		break;
-	case 2:
-		data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
-		data[0] = param0;
-		data[1] = param1;
-		break;
-	}
-
-	spin_lock_irqsave(&sender->lock, flags);
-	send_pkg(sender, data_type, data, sizeof(data), hs);
-	spin_unlock_irqrestore(&sender->lock, flags);
-
-	return 0;
-}
-
-int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
-			u32 len, bool hs)
-{
-	unsigned long flags;
-
-	if (!sender || !data || !len) {
-		DRM_ERROR("Invalid parameters\n");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&sender->lock, flags);
-	send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
-	spin_unlock_irqrestore(&sender->lock, flags);
-
-	return 0;
-}
-
-static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
-			u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
-{
-	unsigned long flags;
-	struct drm_device *dev;
-	int i;
-	u32 gen_data_reg;
-	int retry = MDFLD_DSI_READ_MAX_COUNT;
-
-	if (!sender || !data_out || !len_out) {
-		DRM_ERROR("Invalid parameters\n");
-		return -EINVAL;
-	}
-
-	dev = sender->dev;
-
-	/**
-	 * do reading.
-	 * 0) send out generic read request
-	 * 1) polling read data avail interrupt
-	 * 2) read data
-	 */
-	spin_lock_irqsave(&sender->lock, flags);
-
-	REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
-
-	if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
-		DRM_ERROR("Can NOT clean read data valid interrupt\n");
-
-	/*send out read request*/
-	send_pkg(sender, data_type, data, len, hs);
-
-	/*polling read data avail interrupt*/
-	while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
-		udelay(100);
-		retry--;
-	}
-
-	if (!retry) {
-		spin_unlock_irqrestore(&sender->lock, flags);
-		return -ETIMEDOUT;
-	}
-
-	REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
-
-	/*read data*/
-	if (hs)
-		gen_data_reg = sender->mipi_hs_gen_data_reg;
-	else
-		gen_data_reg = sender->mipi_lp_gen_data_reg;
-
-	for (i = 0; i < len_out; i++)
-		*(data_out + i) = REG_READ(gen_data_reg);
-
-	spin_unlock_irqrestore(&sender->lock, flags);
-
-	return 0;
-}
-
-int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
-		u32 *data, u16 len, bool hs)
-{
-	if (!sender || !data || !len) {
-		DRM_ERROR("Invalid parameters\n");
-		return -EINVAL;
-	}
-
-	return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
-				data, len, hs);
-}
-
-int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
-								int pipe)
-{
-	struct mdfld_dsi_pkg_sender *pkg_sender;
-	struct mdfld_dsi_config *dsi_config =
-				mdfld_dsi_get_config(dsi_connector);
-	struct drm_device *dev = dsi_config->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	u32 mipi_val = 0;
-
-	if (!dsi_connector) {
-		DRM_ERROR("Invalid parameter\n");
-		return -EINVAL;
-	}
-
-	pkg_sender = dsi_connector->pkg_sender;
-
-	if (!pkg_sender || IS_ERR(pkg_sender)) {
-		pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
-								GFP_KERNEL);
-		if (!pkg_sender) {
-			DRM_ERROR("Create DSI pkg sender failed\n");
-			return -ENOMEM;
-		}
-		dsi_connector->pkg_sender = (void *)pkg_sender;
-	}
-
-	pkg_sender->dev = dev;
-	pkg_sender->dsi_connector = dsi_connector;
-	pkg_sender->pipe = pipe;
-	pkg_sender->pkg_num = 0;
-	pkg_sender->panel_mode = 0;
-	pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
-	/*init regs*/
-	/* FIXME: should just copy the regmap ptr ? */
-	pkg_sender->dpll_reg = map->dpll;
-	pkg_sender->dspcntr_reg = map->cntr;
-	pkg_sender->pipeconf_reg = map->conf;
-	pkg_sender->dsplinoff_reg = map->linoff;
-	pkg_sender->dspsurf_reg = map->surf;
-	pkg_sender->pipestat_reg = map->status;
-
-	pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
-	pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
-	pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
-	pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
-	pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
-	pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
-	pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
-	pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
-	pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
-	pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
-
-	/*init lock*/
-	spin_lock_init(&pkg_sender->lock);
-
-	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
-		/**
-		 * For video mode, don't enable DPI timing output here,
-		 * will init the DPI timing output during mode setting.
-		 */
-		mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
-		if (pipe == 0)
-			mipi_val |= 0x2;
-
-		REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
-		REG_READ(MIPI_PORT_CONTROL(pipe));
-
-		/* do dsi controller init */
-		mdfld_dsi_controller_init(dsi_config, pipe);
-	}
-
-	return 0;
-}
-
-void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
-{
-	if (!sender || IS_ERR(sender))
-		return;
-
-	/*free*/
-	kfree(sender);
-}
-
-
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
deleted file mode 100644
index 0478a21c15d52d1d0eae8604183c2bfd39b7323e..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-#ifndef __MDFLD_DSI_PKG_SENDER_H__
-#define __MDFLD_DSI_PKG_SENDER_H__
-
-#include <linux/kthread.h>
-
-#define MDFLD_MAX_DCS_PARAM	8
-
-struct mdfld_dsi_pkg_sender {
-	struct drm_device *dev;
-	struct mdfld_dsi_connector *dsi_connector;
-	u32 status;
-	u32 panel_mode;
-
-	int pipe;
-
-	spinlock_t lock;
-
-	u32 pkg_num;
-
-	/* Registers */
-	u32 dpll_reg;
-	u32 dspcntr_reg;
-	u32 pipeconf_reg;
-	u32 pipestat_reg;
-	u32 dsplinoff_reg;
-	u32 dspsurf_reg;
-
-	u32 mipi_intr_stat_reg;
-	u32 mipi_lp_gen_data_reg;
-	u32 mipi_hs_gen_data_reg;
-	u32 mipi_lp_gen_ctrl_reg;
-	u32 mipi_hs_gen_ctrl_reg;
-	u32 mipi_gen_fifo_stat_reg;
-	u32 mipi_data_addr_reg;
-	u32 mipi_data_len_reg;
-	u32 mipi_cmd_addr_reg;
-	u32 mipi_cmd_len_reg;
-};
-
-extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
-					int pipe);
-extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
-int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
-					u8 param, u8 param_num, bool hs);
-int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
-					u32 len, bool hs);
-int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
-					u8 param1, u8 param_num, bool hs);
-int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
-					u32 len, bool hs);
-/* Read interfaces */
-int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
-		u32 *data, u16 len, bool hs);
-
-#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
deleted file mode 100644
index aae2d358364cc522546e170641b175fe89318f72..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ /dev/null
@@ -1,966 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright © 2006-2007 Intel Corporation
- *
- * Authors:
- *	Eric Anholt <eric@anholt.net>
- */
-
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-
-#include "framebuffer.h"
-#include "gma_display.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "psb_intel_reg.h"
-
-/* Hardcoded currently */
-static int ksel = KSEL_CRYSTAL_19;
-
-struct psb_intel_range_t {
-	int min, max;
-};
-
-struct mrst_limit_t {
-	struct psb_intel_range_t dot, m, p1;
-};
-
-struct mrst_clock_t {
-	/* derived values */
-	int dot;
-	int m;
-	int p1;
-};
-
-#define COUNT_MAX 0x10000000
-
-void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	int count, temp;
-
-	switch (pipe) {
-	case 0:
-	case 1:
-	case 2:
-		break;
-	default:
-		DRM_ERROR("Illegal Pipe Number.\n");
-		return;
-	}
-
-	/* FIXME JLIU7_PO */
-	gma_wait_for_vblank(dev);
-	return;
-
-	/* Wait for for the pipe disable to take effect. */
-	for (count = 0; count < COUNT_MAX; count++) {
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_PIPE_STATE) == 0)
-			break;
-	}
-}
-
-void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	int count, temp;
-
-	switch (pipe) {
-	case 0:
-	case 1:
-	case 2:
-		break;
-	default:
-		DRM_ERROR("Illegal Pipe Number.\n");
-		return;
-	}
-
-	/* FIXME JLIU7_PO */
-	gma_wait_for_vblank(dev);
-	return;
-
-	/* Wait for for the pipe enable to take effect. */
-	for (count = 0; count < COUNT_MAX; count++) {
-		temp = REG_READ(map->conf);
-		if (temp & PIPEACONF_PIPE_STATE)
-			break;
-	}
-}
-
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
-{
-	u32 pfit_control;
-
-	pfit_control = REG_READ(PFIT_CONTROL);
-
-	/* See if the panel fitter is in use */
-	if ((pfit_control & PFIT_ENABLE) == 0)
-		return -1;
-
-	/* 965 can place panel fitter on either pipe */
-	return (pfit_control >> 29) & 0x3;
-}
-
-static int check_fb(struct drm_framebuffer *fb)
-{
-	if (!fb)
-		return 0;
-
-	switch (fb->format->cpp[0] * 8) {
-	case 8:
-	case 16:
-	case 24:
-	case 32:
-		return 0;
-	default:
-		DRM_ERROR("Unknown color depth\n");
-		return -EINVAL;
-	}
-}
-
-static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
-				struct drm_framebuffer *old_fb)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
-	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-	int pipe = gma_crtc->pipe;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	unsigned long start, offset;
-	u32 dspcntr;
-	int ret;
-
-	dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
-
-	/* no fb bound */
-	if (!fb) {
-		dev_dbg(dev->dev, "No FB bound\n");
-		return 0;
-	}
-
-	ret = check_fb(fb);
-	if (ret)
-		return ret;
-
-	if (pipe > 2) {
-		DRM_ERROR("Illegal Pipe Number.\n");
-		return -EINVAL;
-	}
-
-	if (!gma_power_begin(dev, true))
-		return 0;
-
-	start = to_gtt_range(fb->obj[0])->offset;
-	offset = y * fb->pitches[0] + x * fb->format->cpp[0];
-
-	REG_WRITE(map->stride, fb->pitches[0]);
-	dspcntr = REG_READ(map->cntr);
-	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
-	switch (fb->format->cpp[0] * 8) {
-	case 8:
-		dspcntr |= DISPPLANE_8BPP;
-		break;
-	case 16:
-		if (fb->format->depth == 15)
-			dspcntr |= DISPPLANE_15_16BPP;
-		else
-			dspcntr |= DISPPLANE_16BPP;
-		break;
-	case 24:
-	case 32:
-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-		break;
-	}
-	REG_WRITE(map->cntr, dspcntr);
-
-	dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
-						start, offset, x, y);
-	REG_WRITE(map->linoff, offset);
-	REG_READ(map->linoff);
-	REG_WRITE(map->surf, start);
-	REG_READ(map->surf);
-
-	gma_power_end(dev);
-
-	return 0;
-}
-
-/*
- * Disable the pipe, plane and pll.
- *
- */
-void mdfld_disable_crtc(struct drm_device *dev, int pipe)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	u32 temp;
-
-	dev_dbg(dev->dev, "pipe = %d\n", pipe);
-
-
-	if (pipe != 1)
-		mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
-				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
-	/* Disable display plane */
-	temp = REG_READ(map->cntr);
-	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-		REG_WRITE(map->cntr,
-			  temp & ~DISPLAY_PLANE_ENABLE);
-		/* Flush the plane changes */
-		REG_WRITE(map->base, REG_READ(map->base));
-		REG_READ(map->base);
-	}
-
-	/* FIXME_JLIU7 MDFLD_PO revisit */
-
-	/* Next, disable display pipes */
-	temp = REG_READ(map->conf);
-	if ((temp & PIPEACONF_ENABLE) != 0) {
-		temp &= ~PIPEACONF_ENABLE;
-		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
-		REG_WRITE(map->conf, temp);
-		REG_READ(map->conf);
-
-		/* Wait for for the pipe disable to take effect. */
-		mdfldWaitForPipeDisable(dev, pipe);
-	}
-
-	temp = REG_READ(map->dpll);
-	if (temp & DPLL_VCO_ENABLE) {
-		if ((pipe != 1 &&
-			!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
-				& PIPEACONF_ENABLE)) || pipe == 1) {
-			temp &= ~(DPLL_VCO_ENABLE);
-			REG_WRITE(map->dpll, temp);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to turn off. */
-			/* FIXME_MDFLD PO may need more delay */
-			udelay(500);
-
-			if (!(temp & MDFLD_PWR_GATE_EN)) {
-				/* gating power of DPLL */
-				REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
-				/* FIXME_MDFLD PO - change 500 to 1 after PO */
-				udelay(5000);
-			}
-		}
-	}
-
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-	int pipe = gma_crtc->pipe;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	u32 pipeconf = dev_priv->pipeconf[pipe];
-	u32 temp;
-	int timeout = 0;
-
-	dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-
-	/* Note: Old code uses pipe a stat for pipe b but that appears
-	   to be a bug */
-
-	if (!gma_power_begin(dev, true))
-		return;
-
-	/* XXX: When our outputs are all unaware of DPMS modes other than off
-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-	 */
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-		/* Enable the DPLL */
-		temp = REG_READ(map->dpll);
-
-		if ((temp & DPLL_VCO_ENABLE) == 0) {
-			/* When ungating power of DPLL, needs to wait 0.5us
-			   before enable the VCO */
-			if (temp & MDFLD_PWR_GATE_EN) {
-				temp &= ~MDFLD_PWR_GATE_EN;
-				REG_WRITE(map->dpll, temp);
-				/* FIXME_MDFLD PO - change 500 to 1 after PO */
-				udelay(500);
-			}
-
-			REG_WRITE(map->dpll, temp);
-			REG_READ(map->dpll);
-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
-			udelay(500);
-
-			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
-
-			/**
-			 * wait for DSI PLL to lock
-			 * NOTE: only need to poll status of pipe 0 and pipe 1,
-			 * since both MIPI pipes share the same PLL.
-			 */
-			while ((pipe != 2) && (timeout < 20000) &&
-			  !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
-				udelay(150);
-				timeout++;
-			}
-		}
-
-		/* Enable the plane */
-		temp = REG_READ(map->cntr);
-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(map->cntr,
-				temp | DISPLAY_PLANE_ENABLE);
-			/* Flush the plane changes */
-			REG_WRITE(map->base, REG_READ(map->base));
-		}
-
-		/* Enable the pipe */
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_ENABLE) == 0) {
-			REG_WRITE(map->conf, pipeconf);
-
-			/* Wait for for the pipe enable to take effect. */
-			mdfldWaitForPipeEnable(dev, pipe);
-		}
-
-		/*workaround for sighting 3741701 Random X blank display*/
-		/*perform w/a in video mode only on pipe A or C*/
-		if (pipe == 0 || pipe == 2) {
-			REG_WRITE(map->status, REG_READ(map->status));
-			msleep(100);
-			if (PIPE_VBLANK_STATUS & REG_READ(map->status))
-				dev_dbg(dev->dev, "OK");
-			else {
-				dev_dbg(dev->dev, "STUCK!!!!");
-				/*shutdown controller*/
-				temp = REG_READ(map->cntr);
-				REG_WRITE(map->cntr,
-						temp & ~DISPLAY_PLANE_ENABLE);
-				REG_WRITE(map->base, REG_READ(map->base));
-				/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
-				REG_WRITE(0xb048, 1);
-				msleep(100);
-				temp = REG_READ(map->conf);
-				temp &= ~PIPEACONF_ENABLE;
-				REG_WRITE(map->conf, temp);
-				msleep(100); /*wait for pipe disable*/
-				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
-				msleep(100);
-				REG_WRITE(0xb004, REG_READ(0xb004));
-				/* try to bring the controller back up again*/
-				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
-				temp = REG_READ(map->cntr);
-				REG_WRITE(map->cntr,
-						temp | DISPLAY_PLANE_ENABLE);
-				REG_WRITE(map->base, REG_READ(map->base));
-				/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
-				REG_WRITE(0xb048, 2);
-				msleep(100);
-				temp = REG_READ(map->conf);
-				temp |= PIPEACONF_ENABLE;
-				REG_WRITE(map->conf, temp);
-			}
-		}
-
-		gma_crtc_load_lut(crtc);
-
-		/* Give the overlay scaler a chance to enable
-		   if it's on this pipe */
-		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
-
-		break;
-	case DRM_MODE_DPMS_OFF:
-		/* Give the overlay scaler a chance to disable
-		 * if it's on this pipe */
-		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
-		if (pipe != 1)
-			mdfld_dsi_gen_fifo_ready(dev,
-				MIPI_GEN_FIFO_STAT_REG(pipe),
-				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
-		/* Disable the VGA plane that we never use */
-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
-		/* Disable display plane */
-		temp = REG_READ(map->cntr);
-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(map->cntr,
-				  temp & ~DISPLAY_PLANE_ENABLE);
-			/* Flush the plane changes */
-			REG_WRITE(map->base, REG_READ(map->base));
-			REG_READ(map->base);
-		}
-
-		/* Next, disable display pipes */
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_ENABLE) != 0) {
-			temp &= ~PIPEACONF_ENABLE;
-			temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
-			REG_WRITE(map->conf, temp);
-			REG_READ(map->conf);
-
-			/* Wait for for the pipe disable to take effect. */
-			mdfldWaitForPipeDisable(dev, pipe);
-		}
-
-		temp = REG_READ(map->dpll);
-		if (temp & DPLL_VCO_ENABLE) {
-			if ((pipe != 1 && !((REG_READ(PIPEACONF)
-				| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
-					|| pipe == 1) {
-				temp &= ~(DPLL_VCO_ENABLE);
-				REG_WRITE(map->dpll, temp);
-				REG_READ(map->dpll);
-				/* Wait for the clocks to turn off. */
-				/* FIXME_MDFLD PO may need more delay */
-				udelay(500);
-			}
-		}
-		break;
-	}
-	gma_power_end(dev);
-}
-
-
-#define MDFLD_LIMT_DPLL_19	    0
-#define MDFLD_LIMT_DPLL_25	    1
-#define MDFLD_LIMT_DPLL_83	    2
-#define MDFLD_LIMT_DPLL_100	    3
-#define MDFLD_LIMT_DSIPLL_19	    4
-#define MDFLD_LIMT_DSIPLL_25	    5
-#define MDFLD_LIMT_DSIPLL_83	    6
-#define MDFLD_LIMT_DSIPLL_100	    7
-
-#define MDFLD_DOT_MIN		  19750
-#define MDFLD_DOT_MAX		  120000
-#define MDFLD_DPLL_M_MIN_19	    113
-#define MDFLD_DPLL_M_MAX_19	    155
-#define MDFLD_DPLL_P1_MIN_19	    2
-#define MDFLD_DPLL_P1_MAX_19	    10
-#define MDFLD_DPLL_M_MIN_25	    101
-#define MDFLD_DPLL_M_MAX_25	    130
-#define MDFLD_DPLL_P1_MIN_25	    2
-#define MDFLD_DPLL_P1_MAX_25	    10
-#define MDFLD_DPLL_M_MIN_83	    64
-#define MDFLD_DPLL_M_MAX_83	    64
-#define MDFLD_DPLL_P1_MIN_83	    2
-#define MDFLD_DPLL_P1_MAX_83	    2
-#define MDFLD_DPLL_M_MIN_100	    64
-#define MDFLD_DPLL_M_MAX_100	    64
-#define MDFLD_DPLL_P1_MIN_100	    2
-#define MDFLD_DPLL_P1_MAX_100	    2
-#define MDFLD_DSIPLL_M_MIN_19	    131
-#define MDFLD_DSIPLL_M_MAX_19	    175
-#define MDFLD_DSIPLL_P1_MIN_19	    3
-#define MDFLD_DSIPLL_P1_MAX_19	    8
-#define MDFLD_DSIPLL_M_MIN_25	    97
-#define MDFLD_DSIPLL_M_MAX_25	    140
-#define MDFLD_DSIPLL_P1_MIN_25	    3
-#define MDFLD_DSIPLL_P1_MAX_25	    9
-#define MDFLD_DSIPLL_M_MIN_83	    33
-#define MDFLD_DSIPLL_M_MAX_83	    92
-#define MDFLD_DSIPLL_P1_MIN_83	    2
-#define MDFLD_DSIPLL_P1_MAX_83	    3
-#define MDFLD_DSIPLL_M_MIN_100	    97
-#define MDFLD_DSIPLL_M_MAX_100	    140
-#define MDFLD_DSIPLL_P1_MIN_100	    3
-#define MDFLD_DSIPLL_P1_MAX_100	    9
-
-static const struct mrst_limit_t mdfld_limits[] = {
-	{			/* MDFLD_LIMT_DPLL_19 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
-	 },
-	{			/* MDFLD_LIMT_DPLL_25 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
-	 },
-	{			/* MDFLD_LIMT_DPLL_83 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
-	 },
-	{			/* MDFLD_LIMT_DPLL_100 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
-	 },
-	{			/* MDFLD_LIMT_DSIPLL_19 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
-	 },
-	{			/* MDFLD_LIMT_DSIPLL_25 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
-	 },
-	{			/* MDFLD_LIMT_DSIPLL_83 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
-	 },
-	{			/* MDFLD_LIMT_DSIPLL_100 */
-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
-	 .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
-	 },
-};
-
-#define MDFLD_M_MIN	    21
-#define MDFLD_M_MAX	    180
-static const u32 mdfld_m_converts[] = {
-/* M configuration table from 9-bit LFSR table */
-	224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
-	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,   /* 31 - 40 */
-	388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
-	83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
-	341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
-	461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
-	106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
-	71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
-	253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
-	478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
-	477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
-	210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
-	145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
-	380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
-	103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
-	396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
-};
-
-static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
-{
-	const struct mrst_limit_t *limit = NULL;
-	struct drm_device *dev = crtc->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
-	    || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
-		else if (ksel == KSEL_BYPASS_25)
-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
-		else if ((ksel == KSEL_BYPASS_83_100) &&
-				(dev_priv->core_freq == 166))
-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
-		else if ((ksel == KSEL_BYPASS_83_100) &&
-			 (dev_priv->core_freq == 100 ||
-				dev_priv->core_freq == 200))
-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
-	} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
-		else if (ksel == KSEL_BYPASS_25)
-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
-		else if ((ksel == KSEL_BYPASS_83_100) &&
-				(dev_priv->core_freq == 166))
-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
-		else if ((ksel == KSEL_BYPASS_83_100) &&
-				 (dev_priv->core_freq == 100 ||
-				 dev_priv->core_freq == 200))
-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
-	} else {
-		limit = NULL;
-		dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
-	}
-
-	return limit;
-}
-
-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
-{
-	clock->dot = (refclk * clock->m) / clock->p1;
-}
-
-/**
- * Returns a set of divisors for the desired target clock with the given refclk,
- * or FALSE.  Divisor values are the actual divisors for
- */
-static bool
-mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
-		struct mrst_clock_t *best_clock)
-{
-	struct mrst_clock_t clock;
-	const struct mrst_limit_t *limit = mdfld_limit(crtc);
-	int err = target;
-
-	memset(best_clock, 0, sizeof(*best_clock));
-
-	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
-		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
-		     clock.p1++) {
-			int this_err;
-
-			mdfld_clock(refclk, &clock);
-
-			this_err = abs(clock.dot - target);
-			if (this_err < err) {
-				*best_clock = clock;
-				err = this_err;
-			}
-		}
-	}
-	return err != target;
-}
-
-static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
-			      struct drm_display_mode *mode,
-			      struct drm_display_mode *adjusted_mode,
-			      int x, int y,
-			      struct drm_framebuffer *old_fb)
-{
-	struct drm_device *dev = crtc->dev;
-	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	int pipe = gma_crtc->pipe;
-	const struct psb_offset *map = &dev_priv->regmap[pipe];
-	int refclk = 0;
-	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
-								clk_tmp = 0;
-	struct mrst_clock_t clock;
-	bool ok;
-	u32 dpll = 0, fp = 0;
-	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct gma_encoder *gma_encoder = NULL;
-	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	int timeout = 0;
-	int ret;
-
-	dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
-
-	ret = check_fb(crtc->primary->fb);
-	if (ret)
-		return ret;
-
-	dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
-		 adjusted_mode->hdisplay);
-	dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
-		 adjusted_mode->vdisplay);
-	dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
-		 adjusted_mode->hsync_start);
-	dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
-		 adjusted_mode->hsync_end);
-	dev_dbg(dev->dev, "adjusted_htotal = %d\n",
-		 adjusted_mode->htotal);
-	dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
-		 adjusted_mode->vsync_start);
-	dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
-		 adjusted_mode->vsync_end);
-	dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
-		 adjusted_mode->vtotal);
-	dev_dbg(dev->dev, "adjusted_clock = %d\n",
-		 adjusted_mode->clock);
-	dev_dbg(dev->dev, "hdisplay = %d\n",
-		 mode->hdisplay);
-	dev_dbg(dev->dev, "vdisplay = %d\n",
-		 mode->vdisplay);
-
-	if (!gma_power_begin(dev, true))
-		return 0;
-
-	memcpy(&gma_crtc->saved_mode, mode,
-					sizeof(struct drm_display_mode));
-	memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
-					sizeof(struct drm_display_mode));
-
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		encoder = connector->encoder;
-		if (!encoder)
-			continue;
-
-		if (encoder->crtc != crtc)
-			continue;
-
-		gma_encoder = gma_attached_encoder(connector);
-
-		switch (gma_encoder->type) {
-		case INTEL_OUTPUT_MIPI:
-			is_mipi = true;
-			break;
-		case INTEL_OUTPUT_MIPI2:
-			is_mipi2 = true;
-			break;
-		case INTEL_OUTPUT_HDMI:
-			is_hdmi = true;
-			break;
-		}
-	}
-
-	/* Disable the VGA plane that we never use */
-	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
-	/* Disable the panel fitter if it was on our pipe */
-	if (psb_intel_panel_fitter_pipe(dev) == pipe)
-		REG_WRITE(PFIT_CONTROL, 0);
-
-	/* pipesrc and dspsize control the size that is scaled from,
-	 * which should always be the user's requested size.
-	 */
-	if (pipe == 1) {
-		/* FIXME: To make HDMI display with 864x480 (TPO), 480x864
-		 * (PYR) or 480x854 (TMD), set the sprite width/height and
-		 * souce image size registers with the adjusted mode for
-		 * pipe B.
-		 */
-
-		/*
-		 * The defined sprite rectangle must always be completely
-		 * contained within the displayable area of the screen image
-		 * (frame buffer).
-		 */
-		REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
-				| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
-		/* Set the CRTC with encoder mode. */
-		REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
-				 | (mode->crtc_vdisplay - 1));
-	} else {
-		REG_WRITE(map->size,
-				((mode->crtc_vdisplay - 1) << 16) |
-						(mode->crtc_hdisplay - 1));
-		REG_WRITE(map->src,
-				((mode->crtc_hdisplay - 1) << 16) |
-						(mode->crtc_vdisplay - 1));
-	}
-
-	REG_WRITE(map->pos, 0);
-
-	if (gma_encoder)
-		drm_object_property_get_value(&connector->base,
-			dev->mode_config.scaling_mode_property, &scalingType);
-
-	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
-		/* Medfield doesn't have register support for centering so we
-		 * need to mess with the h/vblank and h/vsync start and ends
-		 * to get centering
-		 */
-		int offsetX = 0, offsetY = 0;
-
-		offsetX = (adjusted_mode->crtc_hdisplay -
-					mode->crtc_hdisplay) / 2;
-		offsetY = (adjusted_mode->crtc_vdisplay -
-					mode->crtc_vdisplay) / 2;
-
-		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
-			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
-			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
-								offsetX - 1) |
-			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
-		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
-								offsetX - 1) |
-			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
-		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
-								offsetY - 1) |
-			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
-		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
-								offsetY - 1) |
-			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
-	} else {
-		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
-			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
-			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
-			((adjusted_mode->crtc_hblank_end - 1) << 16));
-		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
-			((adjusted_mode->crtc_hsync_end - 1) << 16));
-		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
-			((adjusted_mode->crtc_vblank_end - 1) << 16));
-		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
-			((adjusted_mode->crtc_vsync_end - 1) << 16));
-	}
-
-	/* Flush the plane changes */
-	{
-		const struct drm_crtc_helper_funcs *crtc_funcs =
-		    crtc->helper_private;
-		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
-	}
-
-	/* setup pipeconf */
-	dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
-
-	/* Set up the display plane register */
-	dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
-	dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
-	dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
-
-	if (is_mipi2)
-		goto mrst_crtc_mode_set_exit;
-	clk = adjusted_mode->clock;
-
-	if (is_hdmi) {
-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
-			refclk = 19200;
-
-			if (is_mipi || is_mipi2)
-				clk_n = 1, clk_p2 = 8;
-			else if (is_hdmi)
-				clk_n = 1, clk_p2 = 10;
-		} else if (ksel == KSEL_BYPASS_25) {
-			refclk = 25000;
-
-			if (is_mipi || is_mipi2)
-				clk_n = 1, clk_p2 = 8;
-			else if (is_hdmi)
-				clk_n = 1, clk_p2 = 10;
-		} else if ((ksel == KSEL_BYPASS_83_100) &&
-					dev_priv->core_freq == 166) {
-			refclk = 83000;
-
-			if (is_mipi || is_mipi2)
-				clk_n = 4, clk_p2 = 8;
-			else if (is_hdmi)
-				clk_n = 4, clk_p2 = 10;
-		} else if ((ksel == KSEL_BYPASS_83_100) &&
-					(dev_priv->core_freq == 100 ||
-					dev_priv->core_freq == 200)) {
-			refclk = 100000;
-			if (is_mipi || is_mipi2)
-				clk_n = 4, clk_p2 = 8;
-			else if (is_hdmi)
-				clk_n = 4, clk_p2 = 10;
-		}
-
-		if (is_mipi)
-			clk_byte = dev_priv->bpp / 8;
-		else if (is_mipi2)
-			clk_byte = dev_priv->bpp2 / 8;
-
-		clk_tmp = clk * clk_n * clk_p2 * clk_byte;
-
-		dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
-					clk, clk_n, clk_p2);
-		dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
-					adjusted_mode->clock, clk_tmp);
-
-		ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
-
-		if (!ok) {
-			DRM_ERROR
-			    ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
-		} else {
-			m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
-
-			dev_dbg(dev->dev, "dot clock = %d,"
-				 "m = %d, p1 = %d, m_conv = %d.\n",
-					clock.dot, clock.m,
-					clock.p1, m_conv);
-		}
-
-		dpll = REG_READ(map->dpll);
-
-		if (dpll & DPLL_VCO_ENABLE) {
-			dpll &= ~DPLL_VCO_ENABLE;
-			REG_WRITE(map->dpll, dpll);
-			REG_READ(map->dpll);
-
-			/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
-			udelay(500);
-
-			/* reset M1, N1 & P1 */
-			REG_WRITE(map->fp0, 0);
-			dpll &= ~MDFLD_P1_MASK;
-			REG_WRITE(map->dpll, dpll);
-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
-			udelay(500);
-		}
-
-		/* When ungating power of DPLL, needs to wait 0.5us before
-		 * enable the VCO */
-		if (dpll & MDFLD_PWR_GATE_EN) {
-			dpll &= ~MDFLD_PWR_GATE_EN;
-			REG_WRITE(map->dpll, dpll);
-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
-			udelay(500);
-		}
-		dpll = 0;
-
-		if (is_hdmi)
-			dpll |= MDFLD_VCO_SEL;
-
-		fp = (clk_n / 2) << 16;
-		fp |= m_conv;
-
-		/* compute bitmask from p1 value */
-		dpll |= (1 << (clock.p1 - 2)) << 17;
-
-	} else {
-		dpll = 0x00800000;
-		fp = 0x000000c1;
-	}
-
-	REG_WRITE(map->fp0, fp);
-	REG_WRITE(map->dpll, dpll);
-	/* FIXME_MDFLD PO - change 500 to 1 after PO */
-	udelay(500);
-
-	dpll |= DPLL_VCO_ENABLE;
-	REG_WRITE(map->dpll, dpll);
-	REG_READ(map->dpll);
-
-	/* wait for DSI PLL to lock */
-	while (timeout < 20000 &&
-			!(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
-		udelay(150);
-		timeout++;
-	}
-
-	if (is_mipi)
-		goto mrst_crtc_mode_set_exit;
-
-	dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
-
-	REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
-	REG_READ(map->conf);
-
-	/* Wait for for the pipe enable to take effect. */
-	REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
-	gma_wait_for_vblank(dev);
-
-mrst_crtc_mode_set_exit:
-
-	gma_power_end(dev);
-
-	return 0;
-}
-
-const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
-	.dpms = mdfld_crtc_dpms,
-	.mode_set = mdfld_crtc_mode_set,
-	.mode_set_base = mdfld__intel_pipe_set_base,
-	.prepare = gma_crtc_prepare,
-	.commit = gma_crtc_commit,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c
deleted file mode 100644
index c95966bb0c96e9859fc0204f8875e71faa29c228..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_output.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c)  2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#include "mdfld_output.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-
-#include "tc35876x-dsi-lvds.h"
-
-int mdfld_get_panel_type(struct drm_device *dev, int pipe)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	return dev_priv->mdfld_panel_id;
-}
-
-static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
-								int p_type)
-{
-	switch (p_type) {
-	case TPO_VID:
-		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
-		break;
-	case TC35876X:
-		tc35876x_init(dev);
-		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
-		break;
-	case TMD_VID:
-		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
-		break;
-	case HDMI:
-/*		if (dev_priv->mdfld_hdmi_present)
-			mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
-		break;
-	}
-}
-
-
-int mdfld_output_init(struct drm_device *dev)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	/* FIXME: hardcoded for now */
-	dev_priv->mdfld_panel_id = TC35876X;
-	/* MIPI panel 1 */
-	mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
-	/* HDMI panel */
-	mdfld_init_panel(dev, 1, HDMI);
-	return 0;
-}
-
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
deleted file mode 100644
index 37a516cc56beac1aaaf9b9b0ce4e05018663c0c9..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c)  2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef MDFLD_OUTPUT_H
-#define MDFLD_OUTPUT_H
-
-#include "psb_drv.h"
-
-#define TPO_PANEL_WIDTH		84
-#define TPO_PANEL_HEIGHT	46
-#define TMD_PANEL_WIDTH		39
-#define TMD_PANEL_HEIGHT	71
-
-struct mdfld_dsi_config;
-
-enum panel_type {
-	TPO_VID,
-	TMD_VID,
-	HDMI,
-	TC35876X,
-};
-
-struct panel_info {
-	u32 width_mm;
-	u32 height_mm;
-	/* Other info */
-};
-
-struct panel_funcs {
-	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
-	struct drm_display_mode * (*get_config_mode)(struct drm_device *);
-	int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
-	int (*reset)(struct drm_device *, int);
-	void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
-};
-
-int mdfld_output_init(struct drm_device *dev);
-
-struct backlight_device *mdfld_get_backlight_device(void);
-int mdfld_set_brightness(struct backlight_device *bd);
-
-int mdfld_get_panel_type(struct drm_device *dev, int pipe);
-
-extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
-
-extern const struct panel_funcs mdfld_tmd_vid_funcs;
-extern const struct panel_funcs mdfld_tpo_vid_funcs;
-
-extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
-#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
deleted file mode 100644
index 25e897b98f8621587b3d482aac668e490047ace4..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jim Liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- * Gideon Eaton <eaton.
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#include <linux/delay.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
-{
-	struct drm_display_mode *mode;
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
-	bool use_gct = false; /*Disable GCT for now*/
-
-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
-	if (!mode)
-		return NULL;
-
-	if (use_gct) {
-		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
-		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
-		mode->hsync_start = mode->hdisplay + \
-				((ti->hsync_offset_hi << 8) | \
-				ti->hsync_offset_lo);
-		mode->hsync_end = mode->hsync_start + \
-				((ti->hsync_pulse_width_hi << 8) | \
-				ti->hsync_pulse_width_lo);
-		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
-								ti->hblank_lo);
-		mode->vsync_start = \
-			mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
-						ti->vsync_offset_lo);
-		mode->vsync_end = \
-			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
-						ti->vsync_pulse_width_lo);
-		mode->vtotal = mode->vdisplay + \
-				((ti->vblank_hi << 8) | ti->vblank_lo);
-		mode->clock = ti->pixel_clock * 10;
-
-		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
-		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
-		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
-		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
-		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
-		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
-		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
-		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
-		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
-	} else {
-		mode->hdisplay = 480;
-		mode->vdisplay = 854;
-		mode->hsync_start = 487;
-		mode->hsync_end = 490;
-		mode->htotal = 499;
-		mode->vsync_start = 861;
-		mode->vsync_end = 865;
-		mode->vtotal = 873;
-		mode->clock = 33264;
-	}
-
-	drm_mode_set_name(mode);
-	drm_mode_set_crtcinfo(mode, 0);
-
-	mode->type |= DRM_MODE_TYPE_PREFERRED;
-
-	return mode;
-}
-
-static int tmd_vid_get_panel_info(struct drm_device *dev,
-				int pipe,
-				struct panel_info *pi)
-{
-	if (!dev || !pi)
-		return -EINVAL;
-
-	pi->width_mm = TMD_PANEL_WIDTH;
-	pi->height_mm = TMD_PANEL_HEIGHT;
-
-	return 0;
-}
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_init_TMD_MIPI
- *
- * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
- *               restore_display_registers.  since this function does not
- *               acquire the mutex, it is important that the calling function
- *               does!
-\* ************************************************************************* */
-
-/* FIXME: make the below data u8 instead of u32; note byte order! */
-static u32 tmd_cmd_mcap_off[] = {0x000000b2};
-static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
-static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
-static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
-static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
-static u32 tmd_cmd_set_mode[] = {0x000000b3};
-static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
-static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
-static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
-static u32 tmd_cmd_set_video_mode[] = {0x00000153};
-/*no auto_bl,need add in furture*/
-static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
-static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
-
-static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
-				      int pipe)
-{
-	struct mdfld_dsi_pkg_sender *sender
-			= mdfld_dsi_get_pkg_sender(dsi_config);
-
-	DRM_INFO("Enter mdfld init TMD MIPI display.\n");
-
-	if (!sender) {
-		DRM_ERROR("Cannot get sender\n");
-		return;
-	}
-
-	if (dsi_config->dvr_ic_inited)
-		return;
-
-	msleep(3);
-
-	/* FIXME: make the below data u8 instead of u32; note byte order! */
-
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
-				sizeof(tmd_cmd_mcap_off), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
-				sizeof(tmd_cmd_enable_lane_switch), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
-				sizeof(tmd_cmd_set_lane_num), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
-				sizeof(tmd_cmd_pushing_clock0), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
-				sizeof(tmd_cmd_pushing_clock1), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
-				sizeof(tmd_cmd_set_mode), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
-				sizeof(tmd_cmd_set_sync_pulse_mode), false);
-	mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
-				sizeof(tmd_cmd_set_column), false);
-	mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
-				sizeof(tmd_cmd_set_page), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
-				sizeof(tmd_cmd_set_video_mode), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
-				sizeof(tmd_cmd_enable_backlight), false);
-	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
-				sizeof(tmd_cmd_set_backlight_dimming), false);
-
-	dsi_config->dvr_ic_inited = 1;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
-				mdfld_tpo_dpi_encoder_helper_funcs = {
-	.dpms = mdfld_dsi_dpi_dpms,
-	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
-	.prepare = mdfld_dsi_dpi_prepare,
-	.mode_set = mdfld_dsi_dpi_mode_set,
-	.commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tmd_vid_funcs = {
-	.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
-	.get_config_mode = &tmd_vid_get_config_mode,
-	.get_panel_info = tmd_vid_get_panel_info,
-	.reset = mdfld_dsi_panel_reset,
-	.drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
deleted file mode 100644
index 11845978fb0a68efa7679332aa7f534b5e5f845a..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dpi.h"
-
-static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
-{
-	struct drm_display_mode *mode;
-
-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
-	if (!mode)
-		return NULL;
-
-	mode->hdisplay = 864;
-	mode->vdisplay = 480;
-	mode->hsync_start = 873;
-	mode->hsync_end = 876;
-	mode->htotal = 887;
-	mode->vsync_start = 487;
-	mode->vsync_end = 490;
-	mode->vtotal = 499;
-	mode->clock = 33264;
-
-	drm_mode_set_name(mode);
-	drm_mode_set_crtcinfo(mode, 0);
-
-	mode->type |= DRM_MODE_TYPE_PREFERRED;
-
-	return mode;
-}
-
-static int tpo_vid_get_panel_info(struct drm_device *dev,
-				int pipe,
-				struct panel_info *pi)
-{
-	if (!dev || !pi)
-		return -EINVAL;
-
-	pi->width_mm = TPO_PANEL_WIDTH;
-	pi->height_mm = TPO_PANEL_HEIGHT;
-
-	return 0;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
-				mdfld_tpo_dpi_encoder_helper_funcs = {
-	.dpms = mdfld_dsi_dpi_dpms,
-	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
-	.prepare = mdfld_dsi_dpi_prepare,
-	.mode_set = mdfld_dsi_dpi_mode_set,
-	.commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tpo_vid_funcs = {
-	.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
-	.get_config_mode = &tpo_vid_get_config_mode,
-	.get_panel_info = tpo_vid_get_panel_info,
-};
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 8ab44fec4bfa4990bf747b4610894b961eeb8da9..68e787924ed04e018f0bb05683973b6a92a03e0e 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -19,8 +19,9 @@
 static void mid_get_fuse_settings(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct pci_dev *pci_root =
-		pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
 					    0, 0);
 	uint32_t fuse_value = 0;
 	uint32_t fuse_value_tmp = 0;
@@ -93,7 +94,8 @@ static void mid_get_fuse_settings(struct drm_device *dev)
 static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
 {
 	uint32_t platform_rev_id = 0;
-	int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
+	struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+	int domain = pci_domain_nr(pdev->bus);
 	struct pci_dev *pci_gfx_root =
 		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
 
@@ -269,11 +271,12 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
 static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 addr;
 	u8 __iomem *vbt_virtual;
 	struct mid_vbt_header vbt_header;
 	struct pci_dev *pci_gfx_root =
-		pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
 					    0, PCI_DEVFN(2, 0));
 	int ret = -1;
 
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 505044c9a6732483411d4d03a8afe9fab7cb7db4..d856580b8111e8cb70f347716f063606acd49a9a 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -48,7 +48,6 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
 	return offset >> PSB_PDE_SHIFT;
 }
 
-#if defined(CONFIG_X86)
 static inline void psb_clflush(void *addr)
 {
 	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
@@ -63,13 +62,6 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
 	psb_clflush(addr);
 	mb();
 }
-#else
-
-static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-{;
-}
-
-#endif
 
 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
 {
@@ -293,7 +285,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
 		*ptes++ = pd->invalid_pte;
 
-#if defined(CONFIG_X86)
 	if (pd->driver->has_clflush && pd->hw_context != -1) {
 		mb();
 		for (i = 0; i < clflush_count; ++i) {
@@ -302,7 +293,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
 		}
 		mb();
 	}
-#endif
 	kunmap_atomic(v);
 	spin_unlock(lock);
 
@@ -313,8 +303,8 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
 	return pt;
 }
 
-struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
-					     unsigned long addr)
+static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+						    unsigned long addr)
 {
 	uint32_t index = psb_mmu_pd_index(addr);
 	struct psb_mmu_pt *pt;
@@ -416,15 +406,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
 	return pd;
 }
 
-/* Returns the physical address of the PD shared by sgx/msvdx */
-uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
-{
-	struct psb_mmu_pd *pd;
-
-	pd = psb_mmu_get_default_pd(driver);
-	return page_to_pfn(pd->p) << PAGE_SHIFT;
-}
-
 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
 {
 	struct drm_device *dev = driver->dev;
@@ -468,7 +449,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
 
 	driver->has_clflush = 0;
 
-#if defined(CONFIG_X86)
 	if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
 		uint32_t tfms, misc, cap0, cap4, clflush_size;
 
@@ -485,7 +465,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
 		driver->clflush_mask = driver->clflush_add - 1;
 		driver->clflush_mask = ~driver->clflush_mask;
 	}
-#endif
 
 	up_write(&driver->sem);
 	return driver;
@@ -495,7 +474,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
 	return NULL;
 }
 
-#if defined(CONFIG_X86)
 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
 			       uint32_t num_pages, uint32_t desired_tile_stride,
 			       uint32_t hw_tile_stride)
@@ -543,14 +521,6 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
 	}
 	mb();
 }
-#else
-static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
-			       uint32_t num_pages, uint32_t desired_tile_stride,
-			       uint32_t hw_tile_stride)
-{
-	drm_ttm_cache_flush();
-}
-#endif
 
 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
 				 unsigned long address, uint32_t num_pages)
@@ -690,7 +660,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
 	if (pd->hw_context != -1)
 		psb_mmu_flush(pd->driver);
 
-	return 0;
+	return ret;
 }
 
 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 900e5499249d5b852103e3e148761301d2649a7b..129f8797100220aa0c1186031878d7f151abe856 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -174,7 +174,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
 	return min_error == 0;
 }
 
-/**
+/*
  * Returns a set of divisors for the desired target clock with the given refclk,
  * or FALSE.  Divisor values are the actual divisors for
  */
@@ -205,7 +205,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
 	return err != target;
 }
 
-/**
+/*
  * Sets the power management mode of the pipe and plane.
  *
  * This code should probably grow support for turning the cursor off and back
@@ -337,7 +337,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 	gma_power_end(dev);
 }
 
-/**
+/*
  * Return the pipe currently connected to the panel fitter,
  * or -1 if the panel fitter is not present or not in use
  */
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 8754290b0e2343ebf75895832e5cacd9df0d0309..08cd5f73c8685ee615ed5e5adfb8198b5ae06881 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -504,9 +504,10 @@ static const struct psb_offset oaktrail_regmap[2] = {
 static int oaktrail_chip_setup(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int ret;
-	
-	if (pci_enable_msi(dev->pdev))
+
+	if (pci_enable_msi(pdev))
 		dev_warn(dev->dev, "Enabling MSI failed!\n");
 
 	dev_priv->regmap = oaktrail_regmap;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index e281070611480694006a6fe3e2d629524dbb422f..fc9a34ed58bd136f298a1cffb23ec2f88b8f2d70 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
 	hdmi_dev = pci_get_drvdata(dev);
 
 	i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
-	if (i2c_dev == NULL) {
-		DRM_ERROR("Can't allocate interface\n");
-		ret = -ENOMEM;
-		goto exit;
-	}
+	if (!i2c_dev)
+		return -ENOMEM;
 
 	i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
 	i2c_dev->status = I2C_STAT_INIT;
@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
 			  oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
 	if (ret) {
 		DRM_ERROR("Failed to request IRQ for I2C controller\n");
-		goto err;
+		goto free_dev;
 	}
 
 	/* Adapter registration */
 	ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
-	return ret;
+	if (ret) {
+		DRM_ERROR("Failed to add I2C adapter\n");
+		goto free_irq;
+	}
 
-err:
+	return 0;
+
+free_irq:
+	free_irq(dev->irq, hdmi_dev);
+free_dev:
 	kfree(i2c_dev);
-exit:
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 2828360153d16e6e19eaf53841cd42e905c83c13..432bdcc57ac9e4e4438980a523ea805966feb440 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -29,7 +29,7 @@
 #define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
 #define BRIGHTNESS_MAX_LEVEL 100
 
-/**
+/*
  * Sets the power state for the panel.
  */
 static void oaktrail_lvds_set_power(struct drm_device *dev,
@@ -60,7 +60,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
 			pp_status = REG_READ(PP_STATUS);
 		} while (pp_status & PP_ON);
 		dev_priv->is_lvds_on = false;
-		pm_request_idle(&dev->pdev->dev);
+		pm_request_idle(dev->dev);
 	}
 	gma_power_end(dev);
 }
@@ -282,6 +282,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
 /**
  * oaktrail_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
+ * @mode_dev: PSB mode device
  *
  * Create the connector, register the LVDS DDC bus, and try to figure out what
  * modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index baaf8212e01d1ead4cb9c437ce2a87a54a9ce034..1d2dd6ea1c717e60600e3ddb4d68e0e96891320c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -66,12 +66,12 @@
 static int get_clock(void *data)
 {
 	struct psb_intel_i2c_chan *chan = data;
-	u32 val, tmp;
+	u32 val;
 
 	val = LPC_READ_REG(chan, RGIO);
 	val |= GPIO_CLOCK;
 	LPC_WRITE_REG(chan, RGIO, val);
-	tmp = LPC_READ_REG(chan, RGLVL);
+	LPC_READ_REG(chan, RGLVL);
 	val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
 
 	return val;
@@ -80,12 +80,12 @@ static int get_clock(void *data)
 static int get_data(void *data)
 {
 	struct psb_intel_i2c_chan *chan = data;
-	u32 val, tmp;
+	u32 val;
 
 	val = LPC_READ_REG(chan, RGIO);
 	val |= GPIO_DATA;
 	LPC_WRITE_REG(chan, RGIO, val);
-	tmp = LPC_READ_REG(chan, RGLVL);
+	LPC_READ_REG(chan, RGLVL);
 	val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
 
 	return val;
@@ -145,7 +145,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
 	strncpy(chan->adapter.name, "gma500 LPC",  I2C_NAME_SIZE - 1);
 	chan->adapter.owner = THIS_MODULE;
 	chan->adapter.algo_data = &chan->algo;
-	chan->adapter.dev.parent = &dev->pdev->dev;
+	chan->adapter.dev.parent = dev->dev;
 	chan->algo.setsda = set_data;
 	chan->algo.setscl = set_clock;
 	chan->algo.getsda = get_data;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index eab6d889bde980e816da94eda27110fbae6e7da8..a1ffc6a1c2555a423947ac6c8e598eb22e7da175 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -305,12 +305,13 @@ void psb_intel_opregion_fini(struct drm_device *dev)
 int psb_intel_opregion_setup(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct psb_intel_opregion *opregion = &dev_priv->opregion;
 	u32 opregion_phy, mboxes;
 	void __iomem *base;
 	int err = 0;
 
-	pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+	pci_read_config_dword(pdev, PCI_ASLS, &opregion_phy);
 	if (opregion_phy == 0) {
 		DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
 		return -ENOTSUPP;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index bea8578846d1577feba1d812773d7f8548d8306e..56ef88237ef662e6b6436be5872e8b6d62d217ff 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -70,8 +70,8 @@ void gma_power_init(struct drm_device *dev)
  */
 void gma_power_uninit(struct drm_device *dev)
 {
-	pm_runtime_disable(&dev->pdev->dev);
-	pm_runtime_set_suspended(&dev->pdev->dev);
+	pm_runtime_disable(dev->dev);
+	pm_runtime_set_suspended(dev->dev);
 }
 
 /**
@@ -93,6 +93,7 @@ static void gma_suspend_display(struct drm_device *dev)
 
 /**
  *	gma_resume_display	-	resume display side logic
+ *	@pdev: PCI device
  *
  *	Resume the display hardware restoring state and enabling
  *	as necessary.
@@ -146,7 +147,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
 
 /**
  *	gma_resume_pci		-	resume helper
- *	@dev: our PCI device
+ *	@pdev: our PCI device
  *
  *	Perform the resume processing on our PCI device state - rewrite
  *	register state and re-enable the PCI device
@@ -178,8 +179,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
 
 /**
  *	gma_power_suspend		-	bus callback for suspend
- *	@pdev: our PCI device
- *	@state: suspend type
+ *	@_dev: our device
  *
  *	Called back by the PCI layer during a suspend of the system. We
  *	perform the necessary shut down steps and save enough state that
@@ -208,7 +208,7 @@ int gma_power_suspend(struct device *_dev)
 
 /**
  *	gma_power_resume		-	resume power
- *	@pdev: PCI device
+ *	@_dev: our device
  *
  *	Resume the PCI side of the graphics and then the displays
  */
@@ -249,6 +249,7 @@ bool gma_power_is_on(struct drm_device *dev)
 bool gma_power_begin(struct drm_device *dev, bool force_on)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int ret;
 	unsigned long flags;
 
@@ -256,7 +257,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
 	/* Power already on ? */
 	if (dev_priv->display_power) {
 		dev_priv->display_count++;
-		pm_runtime_get(&dev->pdev->dev);
+		pm_runtime_get(dev->dev);
 		spin_unlock_irqrestore(&power_ctrl_lock, flags);
 		return true;
 	}
@@ -264,11 +265,11 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
 		goto out_false;
 
 	/* Ok power up needed */
-	ret = gma_resume_pci(dev->pdev);
+	ret = gma_resume_pci(pdev);
 	if (ret == 0) {
 		psb_irq_preinstall(dev);
 		psb_irq_postinstall(dev);
-		pm_runtime_get(&dev->pdev->dev);
+		pm_runtime_get(dev->dev);
 		dev_priv->display_count++;
 		spin_unlock_irqrestore(&power_ctrl_lock, flags);
 		return true;
@@ -293,7 +294,7 @@ void gma_power_end(struct drm_device *dev)
 	dev_priv->display_count--;
 	WARN_ON(dev_priv->display_count < 0);
 	spin_unlock_irqrestore(&power_ctrl_lock, flags);
-	pm_runtime_put(&dev->pdev->dev);
+	pm_runtime_put(dev->dev);
 }
 
 int psb_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index cc2d59e8471da5549a3979c6160147f87b1e8d5b..0bcab065242c29cd8e2223becbb147e5a6798e68 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -46,13 +46,12 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  * PowerVR SGX535    - Poulsbo    - Intel GMA 500, Intel Atom Z5xx
  * PowerVR SGX535    - Moorestown - Intel GMA 600
  * PowerVR SGX535    - Oaktrail   - Intel GMA 600, Intel Atom Z6xx, E6xx
- * PowerVR SGX540    - Medfield   - Intel Atom Z2460
- * PowerVR SGX544MP2 - Medfield   -
  * PowerVR SGX545    - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
  * PowerVR SGX545    - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
  *                                  N2800
  */
 static const struct pci_device_id pciidlist[] = {
+	/* Poulsbo */
 	{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
 	{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
 #if defined(CONFIG_DRM_GMA600)
@@ -66,17 +65,7 @@ static const struct pci_device_id pciidlist[] = {
 	{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
 	{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
 #endif
-#if defined(CONFIG_DRM_MEDFIELD)
-	{ 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-	{ 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-#endif
-#if defined(CONFIG_DRM_GMA3600)
+	/* Cedartrail */
 	{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
 	{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
 	{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
@@ -93,7 +82,6 @@ static const struct pci_device_id pciidlist[] = {
 	{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
 	{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
 	{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
-#endif
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -208,6 +196,7 @@ static void psb_driver_unload(struct drm_device *dev)
 
 static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct drm_psb_private *dev_priv;
 	unsigned long resource_start, resource_len;
 	unsigned long irqflags;
@@ -227,11 +216,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 
 	pg = &dev_priv->gtt;
 
-	pci_set_master(dev->pdev);
+	pci_set_master(pdev);
 
 	dev_priv->num_pipe = dev_priv->ops->pipes;
 
-	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+	resource_start = pci_resource_start(pdev, PSB_MMIO_RESOURCE);
 
 	dev_priv->vdc_reg =
 	    ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
@@ -244,7 +233,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 		goto out_err;
 
 	if (IS_MRST(dev)) {
-		int domain = pci_domain_nr(dev->pdev->bus);
+		int domain = pci_domain_nr(pdev->bus);
 
 		dev_priv->aux_pdev =
 			pci_get_domain_bus_and_slot(domain, 0,
@@ -312,6 +301,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 	if (ret)
 		goto out_err;
 
+	ret = -ENOMEM;
+
 	dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
 	if (!dev_priv->mmu)
 		goto out_err;
@@ -359,7 +350,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
 	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 
-	drm_irq_install(dev, dev->pdev->irq);
+	drm_irq_install(dev, pdev->irq);
 
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
@@ -385,8 +376,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 	psb_intel_opregion_enable_asle(dev);
 #if 0
 	/* Enable runtime pm at last */
-	pm_runtime_enable(&dev->pdev->dev);
-	pm_runtime_set_active(&dev->pdev->dev);
+	pm_runtime_enable(dev->dev);
+	pm_runtime_set_active(dev->dev);
 #endif
 	/* Intel drm driver load is done, continue doing pvr load */
 	return 0;
@@ -415,7 +406,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
 
 	if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
 		runtime_allowed++;
-		pm_runtime_allow(&dev->pdev->dev);
+		pm_runtime_allow(dev->dev);
 		dev_priv->rpm_enabled = 1;
 	}
 	return drm_ioctl(filp, cmd, arg);
@@ -437,7 +428,6 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_pci_disable_device;
 	}
 
-	dev->pdev = pdev;
 	pci_set_drvdata(pdev, dev);
 
 	ret = psb_driver_load(dev, ent->driver_data);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5b7f7a312d53ffe84b6fb86c0ab60b199c89e23a..020a71b915774158d05adfd9628323a29c83dc69 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -40,19 +40,16 @@ enum {
 	CHIP_PSB_8108 = 0,		/* Poulsbo */
 	CHIP_PSB_8109 = 1,		/* Poulsbo */
 	CHIP_MRST_4100 = 2,		/* Moorestown/Oaktrail */
-	CHIP_MFLD_0130 = 3,		/* Medfield */
 };
 
-#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
+#define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108)
+#define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100)
+#define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0)
 
 /* Hardware offsets */
 #define PSB_VDC_OFFSET		 0x00000000
 #define PSB_VDC_SIZE		 0x000080000
 #define MRST_MMIO_SIZE		 0x0000C0000
-#define MDFLD_MMIO_SIZE          0x000100000
 #define PSB_SGX_SIZE		 0x8000
 #define PSB_SGX_OFFSET		 0x00040000
 #define MRST_SGX_OFFSET		 0x00080000
@@ -109,8 +106,6 @@ enum {
 #define _PSB_DPST_PIPEA_FLAG      (1<<6)
 #define _PSB_PIPEA_EVENT_FLAG     (1<<6)
 #define _PSB_VSYNC_PIPEA_FLAG	  (1<<7)
-#define _MDFLD_MIPIA_FLAG	  (1<<16)
-#define _MDFLD_MIPIC_FLAG	  (1<<17)
 #define _PSB_IRQ_DISP_HOTSYNC	  (1<<17)
 #define _PSB_IRQ_SGX_FLAG	  (1<<18)
 #define _PSB_IRQ_MSVDX_FLAG	  (1<<19)
@@ -119,13 +114,6 @@ enum {
 #define _PSB_PIPE_EVENT_FLAG	(_PSB_VSYNC_PIPEA_FLAG | \
 				 _PSB_VSYNC_PIPEB_FLAG)
 
-/* This flag includes all the display IRQ bits excepts the vblank irqs. */
-#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
-				  _MDFLD_PIPEB_EVENT_FLAG | \
-				  _PSB_PIPEA_EVENT_FLAG | \
-				  _PSB_VSYNC_PIPEA_FLAG | \
-				  _MDFLD_MIPIA_FLAG | \
-				  _MDFLD_MIPIC_FLAG)
 #define PSB_INT_IDENTITY_R	  0x20A4
 #define PSB_INT_MASK_R		  0x20A8
 #define PSB_INT_ENABLE_R	  0x20A0
@@ -191,25 +179,6 @@ enum {
 #define PSB_WATCHDOG_DELAY (HZ * 2)
 #define PSB_LID_DELAY (HZ / 10)
 
-#define MDFLD_PNW_B0 0x04
-#define MDFLD_PNW_C0 0x08
-
-#define MDFLD_DSR_2D_3D_0 	(1 << 0)
-#define MDFLD_DSR_2D_3D_2 	(1 << 1)
-#define MDFLD_DSR_CURSOR_0 	(1 << 2)
-#define MDFLD_DSR_CURSOR_2	(1 << 3)
-#define MDFLD_DSR_OVERLAY_0 	(1 << 4)
-#define MDFLD_DSR_OVERLAY_2 	(1 << 5)
-#define MDFLD_DSR_MIPI_CONTROL	(1 << 6)
-#define MDFLD_DSR_DAMAGE_MASK_0	((1 << 0) | (1 << 2) | (1 << 4))
-#define MDFLD_DSR_DAMAGE_MASK_2	((1 << 1) | (1 << 3) | (1 << 5))
-#define MDFLD_DSR_2D_3D 	(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
-
-#define MDFLD_DSR_RR		45
-#define MDFLD_DPU_ENABLE 	(1 << 31)
-#define MDFLD_DSR_FULLSCREEN 	(1 << 30)
-#define MDFLD_DSR_DELAY		(HZ / MDFLD_DSR_RR)
-
 #define PSB_PWR_STATE_ON		1
 #define PSB_PWR_STATE_OFF		2
 
@@ -382,16 +351,6 @@ struct psb_state {
 	uint32_t savePWM_CONTROL_LOGIC;
 };
 
-struct medfield_state {
-	uint32_t saveMIPI;
-	uint32_t saveMIPI_C;
-
-	uint32_t savePFIT_CONTROL;
-	uint32_t savePFIT_PGM_RATIOS;
-	uint32_t saveHDMIPHYMISCCTL;
-	uint32_t saveHDMIB_CONTROL;
-};
-
 struct cdv_state {
 	uint32_t saveDSPCLK_GATE_D;
 	uint32_t saveRAMCLK_GATE_D;
@@ -417,7 +376,6 @@ struct psb_save_area {
 	uint32_t saveVBT;
 	union {
 	        struct psb_state psb;
-		struct medfield_state mdfld;
 		struct cdv_state cdv;
 	};
 	uint32_t saveBLC_PWM_CTL2;
@@ -590,8 +548,6 @@ struct drm_psb_private {
 	u32 pipeconf[3];
 	u32 dspcntr[3];
 
-	int mdfld_panel_id;
-
 	bool dplla_96mhz;	/* DPLL data from the VBT */
 
 	struct {
@@ -737,9 +693,6 @@ extern const struct psb_ops psb_chip_ops;
 /* oaktrail_device.c */
 extern const struct psb_ops oaktrail_chip_ops;
 
-/* mdlfd_device.c */
-extern const struct psb_ops mdfld_chip_ops;
-
 /* cdv_device.c */
 extern const struct psb_ops cdv_chip_ops;
 
@@ -779,25 +732,6 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
 	pci_write_config_dword(pci_root, 0xD0, mcr);
 	pci_dev_put(pci_root);
 }
-static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset)
-{
-	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
-	uint32_t ret_val = 0;
-	struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
-	pci_write_config_dword(pci_root, 0xD0, mcr);
-	pci_read_config_dword(pci_root, 0xD4, &ret_val);
-	pci_dev_put(pci_root);
-	return ret_val;
-}
-static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset,
-				     u32 value)
-{
-	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
-	struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
-	pci_write_config_dword(pci_root, 0xD4, value);
-	pci_write_config_dword(pci_root, 0xD0, mcr);
-	pci_dev_put(pci_root);
-}
 
 static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
 {
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c5485be172adcf608f627c81f02ede272f633..9c3cb1b80bbdb19e5cb11a9259b95d1317218202 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -71,7 +71,7 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
 	clock->dot = clock->vco / clock->p;
 }
 
-/**
+/*
  * Return the pipe currently connected to the panel fitter,
  * or -1 if the panel fitter is not present or not in use
  */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 063c66bb946d0c21872e0ad72db962d60ed565d5..ace95d4bdb6f88f26294d5fa034380ff9d752b6b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -216,7 +216,7 @@ static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
 	        dev_err(dev->dev, "set power, chip off!\n");
 		return;
         }
-        
+
 	if (on) {
 		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
 			  POWER_TARGET_ON);
@@ -626,6 +626,7 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
 /**
  * psb_intel_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
+ * @mode_dev: mode device
  *
  * Create the connector, register the LVDS DDC bus, and try to figure out what
  * modes we can display on the LVDS panel (if present).
@@ -700,7 +701,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
 	lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
 	if (!lvds_priv->i2c_bus) {
 		dev_printk(KERN_ERR,
-			&dev->pdev->dev, "I2C bus registration failed.\n");
+			dev->dev, "I2C bus registration failed.\n");
 		goto failed_blc_i2c;
 	}
 	lvds_priv->i2c_bus->slave_addr = 0x2C;
@@ -719,7 +720,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
 	/* Set up the DDC bus. */
 	lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
 	if (!lvds_priv->ddc_bus) {
-		dev_printk(KERN_ERR, &dev->pdev->dev,
+		dev_printk(KERN_ERR, dev->dev,
 			   "DDC bus registration " "failed.\n");
 		goto failed_ddc;
 	}
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 88653a40aeb5cdfe208772d6eb710a79009b735f..60306780e16cd5aed7736dc9557e3dfa6b13869e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -11,7 +11,7 @@
 
 /**
  * psb_intel_ddc_probe
- *
+ * @adapter:   Associated I2C adaptor
  */
 bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
 {
@@ -43,6 +43,7 @@ bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
 /**
  * psb_intel_ddc_get_modes - get modelist from monitor
  * @connector: DRM connector device to use
+ * @adapter:   Associated I2C adaptor
  *
  * Fetch the EDID information from @connector using the DDC bus.
  */
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 835cc924c45ad20dcc97a0754448cbed94880e00..364ea8f06f9cad6e2b1e0429b52ca6efbfe98eb5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -595,7 +595,7 @@ struct dpst_guardband {
 #define PIPE_PIXEL_MASK		0x00ffffff
 #define PIPE_PIXEL_SHIFT	0
 
-#define FW_BLC_SELF		0x20e0 
+#define FW_BLC_SELF		0x20e0
 #define FW_BLC_SELF_EN          (1<<15)
 
 #define DSPARB			0x70030
@@ -789,17 +789,9 @@ struct dpst_guardband {
  * MOORESTOWN delta registers
  */
 #define MRST_DPLL_A		0x0f014
-#define MDFLD_DPLL_B		0x0f018
-#define MDFLD_INPUT_REF_SEL		(1 << 14)
-#define MDFLD_VCO_SEL			(1 << 16)
 #define DPLLA_MODE_LVDS			(2 << 26)	/* mrst */
-#define MDFLD_PLL_LATCHEN		(1 << 28)
-#define MDFLD_PWR_GATE_EN		(1 << 30)
-#define MDFLD_P1_MASK			(0x1FF << 17)
 #define MRST_FPA0		0x0f040
 #define MRST_FPA1		0x0f044
-#define MDFLD_DPLL_DIV0		0x0f048
-#define MDFLD_DPLL_DIV1		0x0f04c
 #define MRST_PERF_MODE		0x020f4
 
 /*
@@ -848,7 +840,6 @@ struct dpst_guardband {
 
 #define MRST_DSPABASE		0x7019c
 #define MRST_DSPBBASE		0x7119c
-#define MDFLD_DSPCBASE		0x7219c
 
 /*
  * Moorestown registers.
@@ -930,7 +921,6 @@ struct dpst_guardband {
 #define DEVICE_RESET_REG		0xb01C
 #define DPI_RESOLUTION_REG		0xb020
 #define RES_V_POS				0x10
-#define DBI_RESOLUTION_REG		0xb024 /* Reserved for MDFLD */
 #define HORIZ_SYNC_PAD_COUNT_REG	0xb028
 #define HORIZ_BACK_PORCH_COUNT_REG	0xb02C
 #define HORIZ_FRONT_PORCH_COUNT_REG	0xb030
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 907f966d6f221a7923ae6702f6f2769e61a66bb2..355da28563891754e28ea60c96baa14b4b04eb10 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -221,7 +221,7 @@ static bool
 psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
 				   struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
 
-/**
+/*
  * Writes the SDVOB or SDVOC with the given value, but always writes both
  * SDVOB and SDVOC to work around apparent hardware issues (according to
  * comments in the BIOS).
@@ -588,7 +588,7 @@ static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdv
 				    &targets, sizeof(targets));
 }
 
-/**
+/*
  * Return whether each input is trained.
  *
  * This function is making an assumption about the layout of the response,
@@ -1818,7 +1818,7 @@ psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
 #endif
 }
 
-/**
+/*
  * Choose the appropriate DDC bus for control bus switch command for this
  * SDVO output based on the controlled output.
  *
@@ -2406,7 +2406,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
 	sdvo->ddc.owner = THIS_MODULE;
 	sdvo->ddc.class = I2C_CLASS_DDC;
 	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
-	sdvo->ddc.dev.parent = &dev->pdev->dev;
+	sdvo->ddc.dev.parent = dev->dev;
 	sdvo->ddc.algo_data = sdvo;
 	sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
 
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 361e3a0c5ab6b56b5405566d39fcefa338b3c8f0..ae9b100e640baa45ef1ec897c633f88f02d2bdb0 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -10,7 +10,6 @@
 
 #include <drm/drm_vblank.h>
 
-#include "mdfld_output.h"
 #include "power.h"
 #include "psb_drv.h"
 #include "psb_intel_reg.h"
@@ -126,9 +125,8 @@ static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
 	}
 }
 
-/**
+/*
  * Display controller interrupt handler for pipe event.
- *
  */
 static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
 {
@@ -165,8 +163,7 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
 		"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
 		__func__, pipe, PSB_RVDC32(pipe_stat_reg));
 
-	if (pipe_stat_val & PIPE_VBLANK_STATUS ||
-	    (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
+	if (pipe_stat_val & PIPE_VBLANK_STATUS) {
 		struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
 		struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 		unsigned long flags;
@@ -264,11 +261,6 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
 	if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
 		dsp_int = 1;
 
-	/* FIXME: Handle Medfield
-	if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
-		dsp_int = 1;
-	*/
-
 	if (vdc_stat & _PSB_IRQ_SGX_FLAG)
 		sgx_int = 1;
 	if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
@@ -326,13 +318,6 @@ void psb_irq_preinstall(struct drm_device *dev)
 	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
 
-	/* FIXME: Handle Medfield irq mask
-	if (dev->vblank[1].enabled)
-		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
-	if (dev->vblank[2].enabled)
-		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
-	*/
-
 	/* Revisit this area - want per device masks ? */
 	if (dev_priv->ops->hotplug)
 		dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
@@ -505,11 +490,6 @@ int psb_enable_vblank(struct drm_crtc *crtc)
 	uint32_t reg_val = 0;
 	uint32_t pipeconf_reg = mid_pipeconf(pipe);
 
-	/* Medfield is different - we should perhaps extract out vblank
-	   and blacklight etc ops */
-	if (IS_MFLD(dev))
-		return mdfld_enable_te(dev, pipe);
-
 	if (gma_power_begin(dev, false)) {
 		reg_val = REG_READ(pipeconf_reg);
 		gma_power_end(dev);
@@ -544,8 +524,6 @@ void psb_disable_vblank(struct drm_crtc *crtc)
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	unsigned long irqflags;
 
-	if (IS_MFLD(dev))
-		mdfld_disable_te(dev, pipe);
 	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
 
 	if (pipe == 0)
@@ -560,55 +538,6 @@ void psb_disable_vblank(struct drm_crtc *crtc)
 	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 }
 
-/*
- * It is used to enable TE interrupt
- */
-int mdfld_enable_te(struct drm_device *dev, int pipe)
-{
-	struct drm_psb_private *dev_priv =
-		(struct drm_psb_private *) dev->dev_private;
-	unsigned long irqflags;
-	uint32_t reg_val = 0;
-	uint32_t pipeconf_reg = mid_pipeconf(pipe);
-
-	if (gma_power_begin(dev, false)) {
-		reg_val = REG_READ(pipeconf_reg);
-		gma_power_end(dev);
-	}
-
-	if (!(reg_val & PIPEACONF_ENABLE))
-		return -EINVAL;
-
-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
-	mid_enable_pipe_event(dev_priv, pipe);
-	psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-
-	return 0;
-}
-
-/*
- * It is used to disable TE interrupt
- */
-void mdfld_disable_te(struct drm_device *dev, int pipe)
-{
-	struct drm_psb_private *dev_priv =
-		(struct drm_psb_private *) dev->dev_private;
-	unsigned long irqflags;
-
-	if (!dev_priv->dsr_enable)
-		return;
-
-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
-	mid_disable_pipe_event(dev_priv, pipe);
-	psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-}
-
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 4f73998848d15c135ef655414e510f091a003726..1b577fa7010a7268a584244073d5e71c7ab4b951 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -31,6 +31,4 @@ int  psb_enable_vblank(struct drm_crtc *crtc);
 void psb_disable_vblank(struct drm_crtc *crtc);
 u32  psb_get_vblank_counter(struct drm_crtc *crtc);
 
-int mdfld_enable_te(struct drm_device *dev, int pipe);
-void mdfld_disable_te(struct drm_device *dev, int pipe);
 #endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
index fb22bac5bb748763684e8d1bebd5427c82c2cd5c..2a229a0ef36c0d76d9eea29ad75107b5784f4a08 100644
--- a/drivers/gpu/drm/gma500/psb_reg.h
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -550,21 +550,7 @@
 #define PSB_PM_SSC			0x20
 #define PSB_PM_SSS			0x30
 #define PSB_PWRGT_DISPLAY_MASK		0xc /*on a different BA than video/gfx*/
-#define MDFLD_PWRGT_DISPLAY_A_CNTR	0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_CNTR	0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_CNTR	0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_CNTR	0x000c0000
-#define MDFLD_PWRGT_DISPLAY_CNTR    (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
 /* Display SSS register bits are different in A0 vs. B0 */
 #define PSB_PWRGT_GFX_MASK		0x3
-#define MDFLD_PWRGT_DISPLAY_A_STS	0x000000c0
-#define MDFLD_PWRGT_DISPLAY_B_STS	0x00000300
-#define MDFLD_PWRGT_DISPLAY_C_STS	0x00000c00
 #define PSB_PWRGT_GFX_MASK_B0		0xc3
-#define MDFLD_PWRGT_DISPLAY_A_STS_B0	0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_STS_B0	0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_STS_B0	0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_STS	0x000c0000
-#define MDFLD_PWRGT_DISPLAY_STS_A0    (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
-#define MDFLD_PWRGT_DISPLAY_STS_B0    (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
 #endif
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
deleted file mode 100644
index e5bdd99ad453f4611e98411f03e9f226499044d8..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "tc35876x-dsi-lvds.h"
-
-static struct i2c_client *tc35876x_client;
-static struct i2c_client *cmi_lcd_i2c_client;
-/* Panel GPIOs */
-static struct gpio_desc *bridge_reset;
-static struct gpio_desc *bridge_bl_enable;
-static struct gpio_desc *backlight_voltage;
-
-
-#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-
-/* DSI D-PHY Layer Registers */
-#define D0W_DPHYCONTTX		0x0004
-#define CLW_DPHYCONTRX		0x0020
-#define D0W_DPHYCONTRX		0x0024
-#define D1W_DPHYCONTRX		0x0028
-#define D2W_DPHYCONTRX		0x002C
-#define D3W_DPHYCONTRX		0x0030
-#define COM_DPHYCONTRX		0x0038
-#define CLW_CNTRL		0x0040
-#define D0W_CNTRL		0x0044
-#define D1W_CNTRL		0x0048
-#define D2W_CNTRL		0x004C
-#define D3W_CNTRL		0x0050
-#define DFTMODE_CNTRL		0x0054
-
-/* DSI PPI Layer Registers */
-#define PPI_STARTPPI		0x0104
-#define PPI_BUSYPPI		0x0108
-#define PPI_LINEINITCNT		0x0110
-#define PPI_LPTXTIMECNT		0x0114
-#define PPI_LANEENABLE		0x0134
-#define PPI_TX_RX_TA		0x013C
-#define PPI_CLS_ATMR		0x0140
-#define PPI_D0S_ATMR		0x0144
-#define PPI_D1S_ATMR		0x0148
-#define PPI_D2S_ATMR		0x014C
-#define PPI_D3S_ATMR		0x0150
-#define PPI_D0S_CLRSIPOCOUNT	0x0164
-#define PPI_D1S_CLRSIPOCOUNT	0x0168
-#define PPI_D2S_CLRSIPOCOUNT	0x016C
-#define PPI_D3S_CLRSIPOCOUNT	0x0170
-#define CLS_PRE			0x0180
-#define D0S_PRE			0x0184
-#define D1S_PRE			0x0188
-#define D2S_PRE			0x018C
-#define D3S_PRE			0x0190
-#define CLS_PREP		0x01A0
-#define D0S_PREP		0x01A4
-#define D1S_PREP		0x01A8
-#define D2S_PREP		0x01AC
-#define D3S_PREP		0x01B0
-#define CLS_ZERO		0x01C0
-#define D0S_ZERO		0x01C4
-#define D1S_ZERO		0x01C8
-#define D2S_ZERO		0x01CC
-#define D3S_ZERO		0x01D0
-#define PPI_CLRFLG		0x01E0
-#define PPI_CLRSIPO		0x01E4
-#define HSTIMEOUT		0x01F0
-#define HSTIMEOUTENABLE		0x01F4
-
-/* DSI Protocol Layer Registers */
-#define DSI_STARTDSI		0x0204
-#define DSI_BUSYDSI		0x0208
-#define DSI_LANEENABLE		0x0210
-#define DSI_LANESTATUS0		0x0214
-#define DSI_LANESTATUS1		0x0218
-#define DSI_INTSTATUS		0x0220
-#define DSI_INTMASK		0x0224
-#define DSI_INTCLR		0x0228
-#define DSI_LPTXTO		0x0230
-
-/* DSI General Registers */
-#define DSIERRCNT		0x0300
-
-/* DSI Application Layer Registers */
-#define APLCTRL			0x0400
-#define RDPKTLN			0x0404
-
-/* Video Path Registers */
-#define VPCTRL			0x0450
-#define HTIM1			0x0454
-#define HTIM2			0x0458
-#define VTIM1			0x045C
-#define VTIM2			0x0460
-#define VFUEN			0x0464
-
-/* LVDS Registers */
-#define LVMX0003		0x0480
-#define LVMX0407		0x0484
-#define LVMX0811		0x0488
-#define LVMX1215		0x048C
-#define LVMX1619		0x0490
-#define LVMX2023		0x0494
-#define LVMX2427		0x0498
-#define LVCFG			0x049C
-#define LVPHY0			0x04A0
-#define LVPHY1			0x04A4
-
-/* System Registers */
-#define SYSSTAT			0x0500
-#define SYSRST			0x0504
-
-/* GPIO Registers */
-/*#define GPIOC			0x0520*/
-#define GPIOO			0x0524
-#define GPIOI			0x0528
-
-/* I2C Registers */
-#define I2CTIMCTRL		0x0540
-#define I2CMADDR		0x0544
-#define WDATAQ			0x0548
-#define RDATAQ			0x054C
-
-/* Chip/Rev Registers */
-#define IDREG			0x0580
-
-/* Debug Registers */
-#define DEBUG00			0x05A0
-#define DEBUG01			0x05A4
-
-/* Panel CABC registers */
-#define PANEL_PWM_CONTROL	0x90
-#define PANEL_FREQ_DIVIDER_HI	0x91
-#define PANEL_FREQ_DIVIDER_LO	0x92
-#define PANEL_DUTY_CONTROL	0x93
-#define PANEL_MODIFY_RGB	0x94
-#define PANEL_FRAMERATE_CONTROL	0x96
-#define PANEL_PWM_MIN		0x97
-#define PANEL_PWM_REF		0x98
-#define PANEL_PWM_MAX		0x99
-#define PANEL_ALLOW_DISTORT	0x9A
-#define PANEL_BYPASS_PWMI	0x9B
-
-/* Panel color management registers */
-#define PANEL_CM_ENABLE		0x700
-#define PANEL_CM_HUE		0x701
-#define PANEL_CM_SATURATION	0x702
-#define PANEL_CM_INTENSITY	0x703
-#define PANEL_CM_BRIGHTNESS	0x704
-#define PANEL_CM_CE_ENABLE	0x705
-#define PANEL_CM_PEAK_EN	0x710
-#define PANEL_CM_GAIN		0x711
-#define PANEL_CM_HUETABLE_START	0x730
-#define PANEL_CM_HUETABLE_END	0x747 /* inclusive */
-
-/* Input muxing for registers LVMX0003...LVMX2427 */
-enum {
-	INPUT_R0,	/* 0 */
-	INPUT_R1,
-	INPUT_R2,
-	INPUT_R3,
-	INPUT_R4,
-	INPUT_R5,
-	INPUT_R6,
-	INPUT_R7,
-	INPUT_G0,	/* 8 */
-	INPUT_G1,
-	INPUT_G2,
-	INPUT_G3,
-	INPUT_G4,
-	INPUT_G5,
-	INPUT_G6,
-	INPUT_G7,
-	INPUT_B0,	/* 16 */
-	INPUT_B1,
-	INPUT_B2,
-	INPUT_B3,
-	INPUT_B4,
-	INPUT_B5,
-	INPUT_B6,
-	INPUT_B7,
-	INPUT_HSYNC,	/* 24 */
-	INPUT_VSYNC,
-	INPUT_DE,
-	LOGIC_0,
-	/* 28...31 undefined */
-};
-
-#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00)		\
-	(FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) |	\
-	FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
-
-/**
- * tc35876x_regw - Write DSI-LVDS bridge register using I2C
- * @client: struct i2c_client to use
- * @reg: register address
- * @value: value to write
- *
- * Returns 0 on success, or a negative error value.
- */
-static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
-{
-	int r;
-	u8 tx_data[] = {
-		/* NOTE: Register address big-endian, data little-endian. */
-		(reg >> 8) & 0xff,
-		reg & 0xff,
-		value & 0xff,
-		(value >> 8) & 0xff,
-		(value >> 16) & 0xff,
-		(value >> 24) & 0xff,
-	};
-	struct i2c_msg msgs[] = {
-		{
-			.addr = client->addr,
-			.flags = 0,
-			.buf = tx_data,
-			.len = ARRAY_SIZE(tx_data),
-		},
-	};
-
-	r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-	if (r < 0) {
-		dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
-			__func__, reg, value, r);
-		return r;
-	}
-
-	if (r < ARRAY_SIZE(msgs)) {
-		dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
-			__func__, reg, value, r);
-		return -EAGAIN;
-	}
-
-	dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
-			__func__, reg, value);
-
-	return 0;
-}
-
-/**
- * tc35876x_regr - Read DSI-LVDS bridge register using I2C
- * @client: struct i2c_client to use
- * @reg: register address
- * @value: pointer for storing the value
- *
- * Returns 0 on success, or a negative error value.
- */
-static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
-{
-	int r;
-	u8 tx_data[] = {
-		(reg >> 8) & 0xff,
-		reg & 0xff,
-	};
-	u8 rx_data[4];
-	struct i2c_msg msgs[] = {
-		{
-			.addr = client->addr,
-			.flags = 0,
-			.buf = tx_data,
-			.len = ARRAY_SIZE(tx_data),
-		},
-		{
-			.addr = client->addr,
-			.flags = I2C_M_RD,
-			.buf = rx_data,
-			.len = ARRAY_SIZE(rx_data),
-		 },
-	};
-
-	r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-	if (r < 0) {
-		dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
-			reg, r);
-		return r;
-	}
-
-	if (r < ARRAY_SIZE(msgs)) {
-		dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
-			reg, r);
-		return -EAGAIN;
-	}
-
-	*value = rx_data[0] << 24 | rx_data[1] << 16 |
-		rx_data[2] << 8 | rx_data[3];
-
-	dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
-		reg, *value);
-
-	return 0;
-}
-
-void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
-{
-	if (WARN(!tc35876x_client, "%s called before probe", __func__))
-		return;
-
-	dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
-
-	if (!bridge_reset)
-		return;
-
-	if (state) {
-		gpiod_set_value_cansleep(bridge_reset, 0);
-		mdelay(10);
-	} else {
-		/* Pull MIPI Bridge reset pin to Low */
-		gpiod_set_value_cansleep(bridge_reset, 0);
-		mdelay(20);
-		/* Pull MIPI Bridge reset pin to High */
-		gpiod_set_value_cansleep(bridge_reset, 1);
-		mdelay(40);
-	}
-}
-
-void tc35876x_configure_lvds_bridge(struct drm_device *dev)
-{
-	struct i2c_client *i2c = tc35876x_client;
-	u32 ppi_lptxtimecnt;
-	u32 txtagocnt;
-	u32 txtasurecnt;
-	u32 id;
-
-	if (WARN(!tc35876x_client, "%s called before probe", __func__))
-		return;
-
-	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
-	if (!tc35876x_regr(i2c, IDREG, &id))
-		dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
-	else
-		dev_err(&tc35876x_client->dev, "Cannot read ID\n");
-
-	ppi_lptxtimecnt = 4;
-	txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
-	txtasurecnt = 3 * ppi_lptxtimecnt / 2;
-	tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
-		FLD_VAL(txtasurecnt, 10, 0));
-	tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
-
-	tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
-	tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
-	tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
-	tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
-
-	/* Enabling MIPI & PPI lanes, Enable 4 lanes */
-	tc35876x_regw(i2c, PPI_LANEENABLE,
-		BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
-	tc35876x_regw(i2c, DSI_LANEENABLE,
-		BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
-	tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
-	tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
-
-	/* Setting LVDS output frequency */
-	tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
-		FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
-
-	/* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
-	tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
-
-	/* Horizontal back porch and horizontal pulse width. 0x00280028 */
-	tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
-
-	/* Horizontal front porch and horizontal active video size. 0x00500500*/
-	tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
-
-	/* Vertical back porch and vertical sync pulse width. 0x000e000a */
-	tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
-
-	/* Vertical front porch and vertical display size. 0x000e0320 */
-	tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
-
-	/* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
-	tc35876x_regw(i2c, VFUEN, BIT(0));
-
-	/* Soft reset LCD controller. */
-	tc35876x_regw(i2c, SYSRST, BIT(2));
-
-	/* LVDS-TX input muxing */
-	tc35876x_regw(i2c, LVMX0003,
-		INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
-	tc35876x_regw(i2c, LVMX0407,
-		INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
-	tc35876x_regw(i2c, LVMX0811,
-		INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
-	tc35876x_regw(i2c, LVMX1215,
-		INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
-	tc35876x_regw(i2c, LVMX1619,
-		INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
-	tc35876x_regw(i2c, LVMX2023,
-		INPUT_MUX(LOGIC_0,  INPUT_B7, INPUT_B6, INPUT_B5));
-	tc35876x_regw(i2c, LVMX2427,
-		INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
-
-	/* Enable LVDS transmitter. */
-	tc35876x_regw(i2c, LVCFG, BIT(0));
-
-	/* Clear notifications. Don't write reserved bits. Was write 0xffffffff
-	 * to 0x0288, must be in error?! */
-	tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
-}
-
-#define GPIOPWMCTRL	0x38F
-#define PWM0CLKDIV0	0x62 /* low byte */
-#define PWM0CLKDIV1	0x61 /* high byte */
-
-#define SYSTEMCLK	19200000UL /* 19.2 MHz */
-#define PWM_FREQUENCY	9600 /* Hz */
-
-/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
-static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
-{
-	return (baseclk - f) / f;
-}
-
-static void tc35876x_brightness_init(struct drm_device *dev)
-{
-	int ret;
-	u8 pwmctrl;
-	u16 clkdiv;
-
-	/* Make sure the PWM reference is the 19.2 MHz system clock. Read first
-	 * instead of setting directly to catch potential conflicts between PWM
-	 * users. */
-	ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
-	if (ret || pwmctrl != 0x01) {
-		if (ret)
-			dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
-		else
-			dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
-
-		ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
-		if (ret)
-			dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
-	}
-
-	clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
-
-	ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
-	if (!ret)
-		ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
-
-	if (ret)
-		dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
-	else
-		dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
-			clkdiv, PWM_FREQUENCY);
-}
-
-#define PWM0DUTYCYCLE			0x67
-
-void tc35876x_brightness_control(struct drm_device *dev, int level)
-{
-	int ret;
-	u8 duty_val;
-	u8 panel_duty_val;
-
-	level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
-	/* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
-	duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
-
-	/* I won't pretend to understand this formula. The panel spec is quite
-	 * bad engrish.
-	 */
-	panel_duty_val = (2 * level - 100) * 0xA9 /
-			 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
-
-	ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
-	if (ret)
-		dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
-			__func__);
-
-	if (cmi_lcd_i2c_client) {
-		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
-						PANEL_PWM_MAX, panel_duty_val);
-		if (ret < 0)
-			dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
-				__func__);
-	}
-}
-
-void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
-{
-	if (WARN(!tc35876x_client, "%s called before probe", __func__))
-		return;
-
-	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
-	if (bridge_bl_enable)
-		gpiod_set_value_cansleep(bridge_bl_enable, 0);
-
-	if (backlight_voltage)
-		gpiod_set_value_cansleep(backlight_voltage, 0);
-}
-
-void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-
-	if (WARN(!tc35876x_client, "%s called before probe", __func__))
-		return;
-
-	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
-	if (backlight_voltage) {
-		gpiod_set_value_cansleep(backlight_voltage, 1);
-		msleep(260);
-	}
-
-	if (cmi_lcd_i2c_client) {
-		int ret;
-		dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
-		/* Bit 4 is average_saving. Setting it to 1, the brightness is
-		 * referenced to the average of the frame content. 0 means
-		 * reference to the maximum of frame contents. Bits 3:0 are
-		 * allow_distort. When set to a nonzero value, all color values
-		 * between 255-allow_distort*2 and 255 are mapped to the
-		 * 255-allow_distort*2 value.
-		 */
-		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
-						PANEL_ALLOW_DISTORT, 0x10);
-		if (ret < 0)
-			dev_err(&cmi_lcd_i2c_client->dev,
-				"i2c write failed (%d)\n", ret);
-		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
-						PANEL_BYPASS_PWMI, 0);
-		if (ret < 0)
-			dev_err(&cmi_lcd_i2c_client->dev,
-				"i2c write failed (%d)\n", ret);
-		/* Set minimum brightness value - this is tunable */
-		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
-						PANEL_PWM_MIN, 0x35);
-		if (ret < 0)
-			dev_err(&cmi_lcd_i2c_client->dev,
-				"i2c write failed (%d)\n", ret);
-	}
-
-	if (bridge_bl_enable)
-		gpiod_set_value_cansleep(bridge_bl_enable, 1);
-
-	tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
-}
-
-static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
-{
-	struct drm_display_mode *mode;
-
-	dev_dbg(&dev->pdev->dev, "%s\n", __func__);
-
-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
-	if (!mode)
-		return NULL;
-
-	/* FIXME: do this properly. */
-	mode->hdisplay = 1280;
-	mode->vdisplay = 800;
-	mode->hsync_start = 1360;
-	mode->hsync_end = 1400;
-	mode->htotal = 1440;
-	mode->vsync_start = 814;
-	mode->vsync_end = 824;
-	mode->vtotal = 838;
-	mode->clock = 33324 << 1;
-
-	dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
-	dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
-	dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
-	dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
-	dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
-	dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
-	dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
-	dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
-	dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
-
-	drm_mode_set_name(mode);
-	drm_mode_set_crtcinfo(mode, 0);
-
-	mode->type |= DRM_MODE_TYPE_PREFERRED;
-
-	return mode;
-}
-
-/* DV1 Active area 216.96 x 135.6 mm */
-#define DV1_PANEL_WIDTH 217
-#define DV1_PANEL_HEIGHT 136
-
-static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
-				struct panel_info *pi)
-{
-	if (!dev || !pi)
-		return -EINVAL;
-
-	pi->width_mm = DV1_PANEL_WIDTH;
-	pi->height_mm = DV1_PANEL_HEIGHT;
-
-	return 0;
-}
-
-static int tc35876x_bridge_probe(struct i2c_client *client,
-				const struct i2c_device_id *id)
-{
-	dev_info(&client->dev, "%s\n", __func__);
-
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
-			__func__);
-		return -ENODEV;
-	}
-
-	bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW);
-	if (IS_ERR(bridge_reset))
-		return PTR_ERR(bridge_reset);
-	if (bridge_reset)
-		gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset");
-
-	bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW);
-	if (IS_ERR(bridge_bl_enable))
-		return PTR_ERR(bridge_bl_enable);
-	if (bridge_bl_enable)
-		gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en");
-
-	backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW);
-	if (IS_ERR(backlight_voltage))
-		return PTR_ERR(backlight_voltage);
-	if (backlight_voltage)
-		gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd");
-
-	tc35876x_client = client;
-
-	return 0;
-}
-
-static int tc35876x_bridge_remove(struct i2c_client *client)
-{
-	dev_dbg(&client->dev, "%s\n", __func__);
-
-	tc35876x_client = NULL;
-
-	return 0;
-}
-
-static const struct i2c_device_id tc35876x_bridge_id[] = {
-	{ "i2c_disp_brig", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
-
-static struct i2c_driver tc35876x_bridge_i2c_driver = {
-	.driver = {
-		.name = "i2c_disp_brig",
-	},
-	.id_table = tc35876x_bridge_id,
-	.probe = tc35876x_bridge_probe,
-	.remove = tc35876x_bridge_remove,
-};
-
-/* LCD panel I2C */
-static int cmi_lcd_i2c_probe(struct i2c_client *client,
-			     const struct i2c_device_id *id)
-{
-	dev_info(&client->dev, "%s\n", __func__);
-
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
-			__func__);
-		return -ENODEV;
-	}
-
-	cmi_lcd_i2c_client = client;
-
-	return 0;
-}
-
-static int cmi_lcd_i2c_remove(struct i2c_client *client)
-{
-	dev_dbg(&client->dev, "%s\n", __func__);
-
-	cmi_lcd_i2c_client = NULL;
-
-	return 0;
-}
-
-static const struct i2c_device_id cmi_lcd_i2c_id[] = {
-	{ "cmi-lcd", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
-
-static struct i2c_driver cmi_lcd_i2c_driver = {
-	.driver = {
-		.name = "cmi-lcd",
-	},
-	.id_table = cmi_lcd_i2c_id,
-	.probe = cmi_lcd_i2c_probe,
-	.remove = cmi_lcd_i2c_remove,
-};
-
-/* HACK to create I2C device while it's not created by platform code */
-#define CMI_LCD_I2C_ADAPTER	2
-#define CMI_LCD_I2C_ADDR	0x60
-
-static int cmi_lcd_hack_create_device(void)
-{
-	struct i2c_adapter *adapter;
-	struct i2c_client *client;
-	struct i2c_board_info info = {
-		.type = "cmi-lcd",
-		.addr = CMI_LCD_I2C_ADDR,
-	};
-
-	pr_debug("%s\n", __func__);
-
-	adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
-	if (!adapter) {
-		pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
-			CMI_LCD_I2C_ADAPTER);
-		return -EINVAL;
-	}
-
-	client = i2c_new_client_device(adapter, &info);
-	if (IS_ERR(client)) {
-		pr_err("%s: creating I2C device failed\n", __func__);
-		i2c_put_adapter(adapter);
-		return PTR_ERR(client);
-	}
-
-	return 0;
-}
-
-static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
-	.dpms = mdfld_dsi_dpi_dpms,
-	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
-	.prepare = mdfld_dsi_dpi_prepare,
-	.mode_set = mdfld_dsi_dpi_mode_set,
-	.commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tc35876x_funcs = {
-	.encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
-	.get_config_mode = tc35876x_get_config_mode,
-	.get_panel_info = tc35876x_get_panel_info,
-};
-
-void tc35876x_init(struct drm_device *dev)
-{
-	int r;
-
-	dev_dbg(&dev->pdev->dev, "%s\n", __func__);
-
-	cmi_lcd_hack_create_device();
-
-	r = i2c_add_driver(&cmi_lcd_i2c_driver);
-	if (r < 0)
-		dev_err(&dev->pdev->dev,
-			"%s: i2c_add_driver() for %s failed (%d)\n",
-			__func__, cmi_lcd_i2c_driver.driver.name, r);
-
-	r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
-	if (r < 0)
-		dev_err(&dev->pdev->dev,
-			"%s: i2c_add_driver() for %s failed (%d)\n",
-			__func__, tc35876x_bridge_i2c_driver.driver.name, r);
-
-	tc35876x_brightness_init(dev);
-}
-
-void tc35876x_exit(void)
-{
-	pr_debug("%s\n", __func__);
-
-	i2c_del_driver(&tc35876x_bridge_i2c_driver);
-
-	if (cmi_lcd_i2c_client)
-		i2c_del_driver(&cmi_lcd_i2c_driver);
-}
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
deleted file mode 100644
index b14b7f9e7d1e0892813e559d8a3b428ebd942abc..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
-#define __MDFLD_DSI_LVDS_BRIDGE_H__
-
-void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
-void tc35876x_configure_lvds_bridge(struct drm_device *dev);
-void tc35876x_brightness_control(struct drm_device *dev, int level);
-void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
-void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
-void tc35876x_init(struct drm_device *dev);
-void tc35876x_exit(void);
-
-extern const struct panel_funcs mdfld_tc35876x_funcs;
-
-#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 684ef794eb7cbe57cfed783013e91c86d2b8d0c8..d25c75e60d3d4c3d67b10139d479306161288889 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
 
 obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index ea962acfeae0897f7b0b61ecdd2814a463cc2afd..096eea985b6f9ee68675759449774086bf259847 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -499,7 +499,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
 
 int hibmc_de_init(struct hibmc_drm_private *priv)
 {
-	struct drm_device *dev = priv->dev;
+	struct drm_device *dev = &priv->dev;
 	struct drm_crtc *crtc = &priv->crtc;
 	struct drm_plane *plane = &priv->primary_plane;
 	int ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index d845657fd99cc4f15034e3ed768e69335462e229..abd6250d5a149b6fd2bcabc8aa3369ff3039bdf3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -16,6 +16,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_irq.h>
 #include <drm/drm_managed.h>
@@ -43,6 +44,12 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
+static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+			     struct drm_mode_create_dumb *args)
+{
+	return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
+}
+
 static const struct drm_driver hibmc_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
 	.fops			= &hibmc_fops,
@@ -77,47 +84,48 @@ static const struct dev_pm_ops hibmc_pm_ops = {
 				hibmc_pm_resume)
 };
 
+static const struct drm_mode_config_funcs hibmc_mode_funcs = {
+	.mode_valid = drm_vram_helper_mode_valid,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+	.fb_create = drm_gem_fb_create,
+};
+
 static int hibmc_kms_init(struct hibmc_drm_private *priv)
 {
+	struct drm_device *dev = &priv->dev;
 	int ret;
 
-	drm_mode_config_init(priv->dev);
-	priv->mode_config_initialized = true;
+	ret = drmm_mode_config_init(dev);
+	if (ret)
+		return ret;
 
-	priv->dev->mode_config.min_width = 0;
-	priv->dev->mode_config.min_height = 0;
-	priv->dev->mode_config.max_width = 1920;
-	priv->dev->mode_config.max_height = 1200;
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = 1920;
+	dev->mode_config.max_height = 1200;
 
-	priv->dev->mode_config.fb_base = priv->fb_base;
-	priv->dev->mode_config.preferred_depth = 32;
-	priv->dev->mode_config.prefer_shadow = 1;
+	dev->mode_config.fb_base = priv->fb_base;
+	dev->mode_config.preferred_depth = 32;
+	dev->mode_config.prefer_shadow = 1;
 
-	priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+	dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
 
 	ret = hibmc_de_init(priv);
 	if (ret) {
-		drm_err(priv->dev, "failed to init de: %d\n", ret);
+		drm_err(dev, "failed to init de: %d\n", ret);
 		return ret;
 	}
 
 	ret = hibmc_vdac_init(priv);
 	if (ret) {
-		drm_err(priv->dev, "failed to init vdac: %d\n", ret);
+		drm_err(dev, "failed to init vdac: %d\n", ret);
 		return ret;
 	}
 
 	return 0;
 }
 
-static void hibmc_kms_fini(struct hibmc_drm_private *priv)
-{
-	if (priv->mode_config_initialized) {
-		drm_mode_config_cleanup(priv->dev);
-		priv->mode_config_initialized = false;
-	}
-}
-
 /*
  * It can operate in one of three modes: 0, 1 or Sleep.
  */
@@ -202,8 +210,8 @@ static void hibmc_hw_config(struct hibmc_drm_private *priv)
 
 static int hibmc_hw_map(struct hibmc_drm_private *priv)
 {
-	struct drm_device *dev = priv->dev;
-	struct pci_dev *pdev = dev->pdev;
+	struct drm_device *dev = &priv->dev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	resource_size_t addr, size, ioaddr, iosize;
 
 	ioaddr = pci_resource_start(pdev, 1);
@@ -242,40 +250,31 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
 
 static int hibmc_unload(struct drm_device *dev)
 {
-	struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
-
 	drm_atomic_helper_shutdown(dev);
 
 	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
-	pci_disable_msi(dev->pdev);
-	hibmc_kms_fini(priv);
-	hibmc_mm_fini(priv);
-	dev->dev_private = NULL;
+	pci_disable_msi(to_pci_dev(dev->dev));
+
 	return 0;
 }
 
 static int hibmc_load(struct drm_device *dev)
 {
-	struct hibmc_drm_private *priv;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+	struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
 	int ret;
 
-	priv = drmm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		drm_err(dev, "no memory to allocate for hibmc_drm_private\n");
-		return -ENOMEM;
-	}
-	dev->dev_private = priv;
-	priv->dev = dev;
-
 	ret = hibmc_hw_init(priv);
 	if (ret)
 		goto err;
 
-	ret = hibmc_mm_init(priv);
-	if (ret)
+	ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+	if (ret) {
+		drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
 		goto err;
+	}
 
 	ret = hibmc_kms_init(priv);
 	if (ret)
@@ -287,11 +286,11 @@ static int hibmc_load(struct drm_device *dev)
 		goto err;
 	}
 
-	ret = pci_enable_msi(dev->pdev);
+	ret = pci_enable_msi(pdev);
 	if (ret) {
 		drm_warn(dev, "enabling MSI failed: %d\n", ret);
 	} else {
-		ret = drm_irq_install(dev, dev->pdev->irq);
+		ret = drm_irq_install(dev, pdev->irq);
 		if (ret)
 			drm_warn(dev, "install irq failed: %d\n", ret);
 	}
@@ -310,6 +309,7 @@ static int hibmc_load(struct drm_device *dev)
 static int hibmc_pci_probe(struct pci_dev *pdev,
 			   const struct pci_device_id *ent)
 {
+	struct hibmc_drm_private *priv;
 	struct drm_device *dev;
 	int ret;
 
@@ -318,25 +318,26 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
 	if (ret)
 		return ret;
 
-	dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
-	if (IS_ERR(dev)) {
+	priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver,
+				  struct hibmc_drm_private, dev);
+	if (IS_ERR(priv)) {
 		DRM_ERROR("failed to allocate drm_device\n");
-		return PTR_ERR(dev);
+		return PTR_ERR(priv);
 	}
 
-	dev->pdev = pdev;
+	dev = &priv->dev;
 	pci_set_drvdata(pdev, dev);
 
-	ret = pci_enable_device(pdev);
+	ret = pcim_enable_device(pdev);
 	if (ret) {
 		drm_err(dev, "failed to enable pci device: %d\n", ret);
-		goto err_free;
+		goto err_return;
 	}
 
 	ret = hibmc_load(dev);
 	if (ret) {
 		drm_err(dev, "failed to load hibmc: %d\n", ret);
-		goto err_disable;
+		goto err_return;
 	}
 
 	ret = drm_dev_register(dev, 0);
@@ -352,11 +353,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
 
 err_unload:
 	hibmc_unload(dev);
-err_disable:
-	pci_disable_device(pdev);
-err_free:
-	drm_dev_put(dev);
-
+err_return:
 	return ret;
 }
 
@@ -366,7 +363,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
 
 	drm_dev_unregister(dev);
 	hibmc_unload(dev);
-	drm_dev_put(dev);
 }
 
 static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index f310a83d9c48d89c9d1160130f03c6e5bbbe4ff1..7d263f4d707845858a7f9f6253efe8a6d1c974be 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -37,12 +37,11 @@ struct hibmc_drm_private {
 	resource_size_t  fb_size;
 
 	/* drm */
-	struct drm_device  *dev;
+	struct drm_device dev;
 	struct drm_plane primary_plane;
 	struct drm_crtc crtc;
 	struct drm_encoder encoder;
 	struct hibmc_connector connector;
-	bool mode_config_initialized;
 };
 
 static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
@@ -52,7 +51,7 @@ static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *c
 
 static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
 {
-	return dev->dev_private;
+	return container_of(dev, struct hibmc_drm_private, dev);
 }
 
 void hibmc_set_power_mode(struct hibmc_drm_private *priv,
@@ -64,11 +63,6 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
 int hibmc_vdac_init(struct hibmc_drm_private *priv);
 
 int hibmc_mm_init(struct hibmc_drm_private *hibmc);
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
-		      struct drm_mode_create_dumb *args);
 int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
 
-extern const struct drm_mode_config_funcs hibmc_mode_funcs;
-
 #endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 86d712090d873b184d6103785130d2b26492ec1f..410bd019bb357257054b92ec14b2df1d7b0aa82a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -83,7 +83,7 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
 	connector->adapter.owner = THIS_MODULE;
 	connector->adapter.class = I2C_CLASS_DDC;
 	snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
-	connector->adapter.dev.parent = &drm_dev->pdev->dev;
+	connector->adapter.dev.parent = drm_dev->dev;
 	i2c_set_adapdata(&connector->adapter, connector);
 	connector->adapter.algo_data = &connector->bit_data;
 
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 74e26c27d8785de74377d3572a625c914245589e..c228091fb0e6a43e872cb8a298b4b327e4d52073 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "hibmc_drm_drv.h"
 #include "hibmc_drm_regs.h"
@@ -42,12 +43,6 @@ static int hibmc_connector_get_modes(struct drm_connector *connector)
 	return count;
 }
 
-static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
-						       struct drm_display_mode *mode)
-{
-	return MODE_OK;
-}
-
 static void hibmc_connector_destroy(struct drm_connector *connector)
 {
 	struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
@@ -59,7 +54,6 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
 static const struct drm_connector_helper_funcs
 	hibmc_connector_helper_funcs = {
 	.get_modes = hibmc_connector_get_modes,
-	.mode_valid = hibmc_connector_mode_valid,
 };
 
 static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -90,15 +84,12 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
 	.mode_set = hibmc_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs hibmc_encoder_funcs = {
-	.destroy = drm_encoder_cleanup,
-};
-
 int hibmc_vdac_init(struct hibmc_drm_private *priv)
 {
-	struct drm_device *dev = priv->dev;
+	struct drm_device *dev = &priv->dev;
 	struct hibmc_connector *hibmc_connector = &priv->connector;
 	struct drm_encoder *encoder = &priv->encoder;
+	struct drm_crtc *crtc = &priv->crtc;
 	struct drm_connector *connector = &hibmc_connector->base;
 	int ret;
 
@@ -108,9 +99,8 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
 		return ret;
 	}
 
-	encoder->possible_crtcs = 0x1;
-	ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
-			       DRM_MODE_ENCODER_DAC, NULL);
+	encoder->possible_crtcs = drm_crtc_mask(crtc);
+	ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
 	if (ret) {
 		drm_err(dev, "failed to init encoder: %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
deleted file mode 100644
index 602ece11bb4a4b01bd5269fb823c877d7751b3ce..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- *	Rongrong Zou <zourongrong@huawei.com>
- *	Rongrong Zou <zourongrong@gmail.com>
- *	Jianhua Li <lijianhua@huawei.com>
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_print.h>
-
-#include "hibmc_drm_drv.h"
-
-int hibmc_mm_init(struct hibmc_drm_private *hibmc)
-{
-	struct drm_vram_mm *vmm;
-	int ret;
-	struct drm_device *dev = hibmc->dev;
-
-	vmm = drm_vram_helper_alloc_mm(dev,
-				       pci_resource_start(dev->pdev, 0),
-				       hibmc->fb_size);
-	if (IS_ERR(vmm)) {
-		ret = PTR_ERR(vmm);
-		drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
-{
-	if (!hibmc->dev->vram_mm)
-		return;
-
-	drm_vram_helper_release_mm(hibmc->dev);
-}
-
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
-		      struct drm_mode_create_dumb *args)
-{
-	return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
-}
-
-const struct drm_mode_config_funcs hibmc_mode_funcs = {
-	.mode_valid = drm_vram_helper_mode_valid,
-	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = drm_atomic_helper_commit,
-	.fb_create = drm_gem_fb_create,
-};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 25cd9788a4d54b13b341de260ac27fe2c315450e..72a38f28393fc2bc36fa4d238e6d7406055508b4 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -19,6 +19,8 @@ config DRM_I915_WERROR
 config DRM_I915_DEBUG
 	bool "Enable additional driver debugging"
 	depends on DRM_I915
+	depends on EXPERT # only for developers
+	depends on !COMPILE_TEST # never built by robots
 	select DEBUG_FS
 	select PREEMPT_COUNT
 	select I2C_CHARDEV
@@ -31,10 +33,13 @@ config DRM_I915_DEBUG
 	select DRM_DEBUG_SELFTEST
 	select DMABUF_SELFTESTS
 	select SW_SYNC # signaling validation framework (igt/syncobj*)
+	select DRM_I915_WERROR
+	select DRM_I915_DEBUG_GEM
+	select DRM_I915_DEBUG_GEM_ONCE
+	select DRM_I915_DEBUG_MMIO
+	select DRM_I915_DEBUG_RUNTIME_PM
 	select DRM_I915_SW_FENCE_DEBUG_OBJECTS
 	select DRM_I915_SELFTEST
-	select DRM_I915_DEBUG_RUNTIME_PM
-	select DRM_I915_DEBUG_MMIO
 	default n
 	help
 	  Choose this option to turn on extra driver debugging that may affect
@@ -69,6 +74,21 @@ config DRM_I915_DEBUG_GEM
 
 	  If in doubt, say "N".
 
+config DRM_I915_DEBUG_GEM_ONCE
+	bool "Make a GEM debug failure fatal"
+	default n
+	depends on DRM_I915_DEBUG_GEM
+	help
+	  During development, we often only want the very first failure
+	  as that would otherwise be lost in the deluge of subsequent
+	  failures. However, more casual testers may not want to trigger
+	  a hard BUG_ON and hope that the system remains sufficiently usable
+	  to capture a bug report in situ.
+
+	  Recommended for driver developers only.
+
+	  If in doubt, say "N".
+
 config DRM_I915_ERRLOG_GEM
 	bool "Insert extra logging (very verbose) for common GEM errors"
 	default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6d9e81ea67f4b742d52afaf420a8ccfc9a4ef329..32bd1fdffffeca6f7b4a884e347778648fd3add2 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += i915_drv.o \
 
 # core library code
 i915-y += \
+	dma_resv_utils.o \
 	i915_memcpy.o \
 	i915_mm.o \
 	i915_sw_fence.o \
@@ -83,6 +84,7 @@ gt-y += \
 	gt/gen6_engine_cs.o \
 	gt/gen6_ppgtt.o \
 	gt/gen7_renderclear.o \
+	gt/gen8_engine_cs.o \
 	gt/gen8_ppgtt.o \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
@@ -92,6 +94,7 @@ gt-y += \
 	gt/intel_engine_heartbeat.o \
 	gt/intel_engine_pm.o \
 	gt/intel_engine_user.o \
+	gt/intel_execlists_submission.o \
 	gt/intel_ggtt.o \
 	gt/intel_ggtt_fencing.o \
 	gt/intel_gt.o \
@@ -107,6 +110,7 @@ gt-y += \
 	gt/intel_mocs.o \
 	gt/intel_ppgtt.o \
 	gt/intel_rc6.o \
+	gt/intel_region_lmem.o \
 	gt/intel_renderstate.o \
 	gt/intel_reset.o \
 	gt/intel_ring.o \
@@ -132,6 +136,7 @@ gem-y += \
 	gem/i915_gem_clflush.o \
 	gem/i915_gem_client_blt.o \
 	gem/i915_gem_context.o \
+	gem/i915_gem_create.o \
 	gem/i915_gem_dmabuf.o \
 	gem/i915_gem_domain.o \
 	gem/i915_gem_execbuffer.o \
@@ -167,7 +172,6 @@ i915-y += \
 	  i915_scheduler.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
-	  intel_region_lmem.o \
 	  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
@@ -197,13 +201,17 @@ i915-y += \
 	display/intel_color.o \
 	display/intel_combo_phy.o \
 	display/intel_connector.o \
+	display/intel_crtc.o \
 	display/intel_csr.o \
+	display/intel_cursor.o \
 	display/intel_display.o \
 	display/intel_display_power.o \
 	display/intel_dpio_phy.o \
+	display/intel_dpll.o \
 	display/intel_dpll_mgr.o \
 	display/intel_dsb.o \
 	display/intel_fbc.o \
+	display/intel_fdi.o \
 	display/intel_fifo_underrun.o \
 	display/intel_frontbuffer.o \
 	display/intel_global_state.o \
@@ -215,7 +223,8 @@ i915-y += \
 	display/intel_quirks.o \
 	display/intel_sprite.o \
 	display/intel_tc.o \
-	display/intel_vga.o
+	display/intel_vga.o \
+	display/i9xx_plane.o
 i915-$(CONFIG_ACPI) += \
 	display/intel_acpi.o \
 	display/intel_opregion.o
@@ -234,6 +243,7 @@ i915-y += \
 	display/intel_crt.o \
 	display/intel_ddi.o \
 	display/intel_dp.o \
+	display/intel_dp_aux.o \
 	display/intel_dp_aux_backlight.o \
 	display/intel_dp_hdcp.o \
 	display/intel_dp_link_training.o \
@@ -247,9 +257,11 @@ i915-y += \
 	display/intel_lspcon.o \
 	display/intel_lvds.o \
 	display/intel_panel.o \
+	display/intel_pps.o \
 	display/intel_sdvo.o \
 	display/intel_tv.o \
 	display/intel_vdsc.o \
+	display/intel_vrr.o \
 	display/vlv_dsi.o \
 	display/vlv_dsi_pll.o
 
@@ -284,15 +296,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
 
 # exclude some broken headers from the test coverage
 no-header-test := \
-	display/intel_vbt_defs.h \
-	gvt/execlist.h \
-	gvt/fb_decoder.h \
-	gvt/gtt.h \
-	gvt/gvt.h \
-	gvt/interrupt.h \
-	gvt/mmio_context.h \
-	gvt/mpt.h \
-	gvt/scheduler.h
+	display/intel_vbt_defs.h
 
 extra-$(CONFIG_DRM_I915_WERROR) += \
 	$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
new file mode 100644
index 0000000000000000000000000000000000000000..e3e69e6cef650c1ec85ed63b90a3548fda0f1b5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
+static const u32 i965_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_XBGR16161616F,
+};
+
+static const u64 i9xx_format_modifiers[] = {
+	I915_FORMAT_MOD_X_TILED,
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+					    u32 format, u64 modifier)
+{
+	switch (modifier) {
+	case DRM_FORMAT_MOD_LINEAR:
+	case I915_FORMAT_MOD_X_TILED:
+		break;
+	default:
+		return false;
+	}
+
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XRGB8888:
+		return modifier == DRM_FORMAT_MOD_LINEAR ||
+			modifier == I915_FORMAT_MOD_X_TILED;
+	default:
+		return false;
+	}
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+					    u32 format, u64 modifier)
+{
+	switch (modifier) {
+	case DRM_FORMAT_MOD_LINEAR:
+	case I915_FORMAT_MOD_X_TILED:
+		break;
+	default:
+		return false;
+	}
+
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_XBGR16161616F:
+		return modifier == DRM_FORMAT_MOD_LINEAR ||
+			modifier == I915_FORMAT_MOD_X_TILED;
+	default:
+		return false;
+	}
+}
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+			       enum i9xx_plane_id i9xx_plane)
+{
+	if (!HAS_FBC(dev_priv))
+		return false;
+
+	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		return i9xx_plane == PLANE_A; /* tied to pipe A */
+	else if (IS_IVYBRIDGE(dev_priv))
+		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+			i9xx_plane == PLANE_C;
+	else if (INTEL_GEN(dev_priv) >= 4)
+		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+	else
+		return i9xx_plane == PLANE_A;
+}
+
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		return i9xx_plane == PLANE_B;
+	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		return false;
+	else if (IS_GEN(dev_priv, 4))
+		return i9xx_plane == PLANE_C;
+	else
+		return i9xx_plane == PLANE_B ||
+			i9xx_plane == PLANE_C;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+			  const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
+	u32 dspcntr;
+
+	dspcntr = DISPLAY_PLANE_ENABLE;
+
+	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_C8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case DRM_FORMAT_XRGB1555:
+		dspcntr |= DISPPLANE_BGRX555;
+		break;
+	case DRM_FORMAT_ARGB1555:
+		dspcntr |= DISPPLANE_BGRA555;
+		break;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		dspcntr |= DISPPLANE_BGRA888;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		dspcntr |= DISPPLANE_RGBA888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
+		break;
+	case DRM_FORMAT_ARGB2101010:
+		dspcntr |= DISPPLANE_BGRA101010;
+		break;
+	case DRM_FORMAT_ABGR2101010:
+		dspcntr |= DISPPLANE_RGBA101010;
+		break;
+	case DRM_FORMAT_XBGR16161616F:
+		dspcntr |= DISPPLANE_RGBX161616;
+		break;
+	default:
+		MISSING_CASE(fb->format->format);
+		return 0;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 4 &&
+	    fb->modifier == I915_FORMAT_MOD_X_TILED)
+		dspcntr |= DISPPLANE_TILED;
+
+	if (rotation & DRM_MODE_ROTATE_180)
+		dspcntr |= DISPPLANE_ROTATE_180;
+
+	if (rotation & DRM_MODE_REFLECT_X)
+		dspcntr |= DISPPLANE_MIRROR;
+
+	return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	int src_x, src_y, src_w;
+	u32 offset;
+	int ret;
+
+	ret = intel_plane_compute_gtt(plane_state);
+	if (ret)
+		return ret;
+
+	if (!plane_state->uapi.visible)
+		return 0;
+
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	src_x = plane_state->uapi.src.x1 >> 16;
+	src_y = plane_state->uapi.src.y1 >> 16;
+
+	/* Undocumented hardware limit on i965/g4x/vlv/chv */
+	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+		return -EINVAL;
+
+	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+							    plane_state, 0);
+	else
+		offset = 0;
+
+	/*
+	 * When using an X-tiled surface the plane starts to
+	 * misbehave if the x offset + width exceeds the stride.
+	 * hsw/bdw: underrun galore
+	 * ilk/snb/ivb: wrap to the next tile row mid scanout
+	 * i965/g4x: so far appear immune to this
+	 * vlv/chv: TODO check
+	 *
+	 * Linear surfaces seem to work just fine, even on hsw/bdw
+	 * despite them not using the linear offset anymore.
+	 */
+	if (INTEL_GEN(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+		u32 alignment = intel_surf_alignment(fb, 0);
+		int cpp = fb->format->cpp[0];
+
+		while ((src_x + src_w) * cpp > plane_state->color_plane[0].stride) {
+			if (offset == 0) {
+				drm_dbg_kms(&dev_priv->drm,
+					    "Unable to find suitable display surface offset due to X-tiling\n");
+				return -EINVAL;
+			}
+
+			offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0,
+								   offset, offset - alignment);
+		}
+	}
+
+	/*
+	 * Put the final coordinates back so that the src
+	 * coordinate checks will see the right values.
+	 */
+	drm_rect_translate_to(&plane_state->uapi.src,
+			      src_x << 16, src_y << 16);
+
+	/* HSW/BDW do this automagically in hardware */
+	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+		unsigned int rotation = plane_state->hw.rotation;
+		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+		if (rotation & DRM_MODE_ROTATE_180) {
+			src_x += src_w - 1;
+			src_y += src_h - 1;
+		} else if (rotation & DRM_MODE_REFLECT_X) {
+			src_x += src_w - 1;
+		}
+	}
+
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
+	} else if (INTEL_GEN(dev_priv) >= 4 &&
+		   fb->modifier == I915_FORMAT_MOD_X_TILED) {
+		drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+	}
+
+	plane_state->color_plane[0].offset = offset;
+	plane_state->color_plane[0].x = src_x;
+	plane_state->color_plane[0].y = src_y;
+
+	return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+		 struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	int ret;
+
+	ret = chv_plane_check_rotation(plane_state);
+	if (ret)
+		return ret;
+
+	ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+						DRM_PLANE_HELPER_NO_SCALING,
+						DRM_PLANE_HELPER_NO_SCALING,
+						i9xx_plane_has_windowing(plane));
+	if (ret)
+		return ret;
+
+	ret = i9xx_check_plane_surface(plane_state);
+	if (ret)
+		return ret;
+
+	if (!plane_state->uapi.visible)
+		return 0;
+
+	ret = intel_plane_check_src_coordinates(plane_state);
+	if (ret)
+		return ret;
+
+	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+	return 0;
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	u32 dspcntr = 0;
+
+	if (crtc_state->gamma_enable)
+		dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+	if (crtc_state->csc_enable)
+		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+	if (INTEL_GEN(dev_priv) < 5)
+		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+	return dspcntr;
+}
+
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+			     const struct intel_plane_state *plane_state,
+			     unsigned int *num, unsigned int *den)
+{
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int cpp = fb->format->cpp[0];
+
+	/*
+	 * g4x bspec says 64bpp pixel rate can't exceed 80%
+	 * of cdclk when the sprite plane is enabled on the
+	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
+	 * never allowed to exceed 80% of cdclk. Let's just go
+	 * with the ilk/snb limit always.
+	 */
+	if (cpp == 8) {
+		*num = 10;
+		*den = 8;
+	} else {
+		*num = 1;
+		*den = 1;
+	}
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+				const struct intel_plane_state *plane_state)
+{
+	unsigned int pixel_rate;
+	unsigned int num, den;
+
+	/*
+	 * Note that crtc_state->pixel_rate accounts for both
+	 * horizontal and vertical panel fitter downscaling factors.
+	 * Pre-HSW bspec tells us to only consider the horizontal
+	 * downscaling factor here. We ignore that and just consider
+	 * both for simplicity.
+	 */
+	pixel_rate = crtc_state->pixel_rate;
+
+	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+	/* two pixels per clock with double wide pipe */
+	if (crtc_state->double_wide)
+		den *= 2;
+
+	return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static void i9xx_update_plane(struct intel_plane *plane,
+			      const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	u32 linear_offset;
+	int x = plane_state->color_plane[0].x;
+	int y = plane_state->color_plane[0].y;
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
+	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+	unsigned long irqflags;
+	u32 dspaddr_offset;
+	u32 dspcntr;
+
+	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		dspaddr_offset = plane_state->color_plane[0].offset;
+	else
+		dspaddr_offset = linear_offset;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+			  plane_state->color_plane[0].stride);
+
+	if (INTEL_GEN(dev_priv) < 4) {
+		/*
+		 * PLANE_A doesn't actually have a full window
+		 * generator but let's assume we still need to
+		 * program whatever is there.
+		 */
+		intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+				  (crtc_y << 16) | crtc_x);
+		intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+				  ((crtc_h - 1) << 16) | (crtc_w - 1));
+	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+		intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+				  (crtc_y << 16) | crtc_x);
+		intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+				  ((crtc_h - 1) << 16) | (crtc_w - 1));
+		intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
+	}
+
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+				  (y << 16) | x);
+	} else if (INTEL_GEN(dev_priv) >= 4) {
+		intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+				  linear_offset);
+		intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+				  (y << 16) | x);
+	}
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
+	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+	if (INTEL_GEN(dev_priv) >= 4)
+		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+	else
+		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_plane(struct intel_plane *plane,
+			       const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	unsigned long irqflags;
+	u32 dspcntr;
+
+	/*
+	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+	 * enable on ilk+ affect the pipe bottom color as
+	 * well, so we must configure them even if the plane
+	 * is disabled.
+	 *
+	 * On pre-g4x there is no way to gamma correct the
+	 * pipe bottom color but we'll keep on doing this
+	 * anyway so that the crtc state readout works correctly.
+	 */
+	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+	if (INTEL_GEN(dev_priv) >= 4)
+		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
+	else
+		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_primary_async_flip(struct intel_plane *plane,
+		       const struct intel_crtc_state *crtc_state,
+		       const struct intel_plane_state *plane_state,
+		       bool async_flip)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+	u32 dspaddr_offset = plane_state->color_plane[0].offset;
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	unsigned long irqflags;
+
+	if (async_flip)
+		dspcntr |= DISPPLANE_ASYNC_FLIP;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+	intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+			  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_primary_async_flip(struct intel_plane *plane,
+		       const struct intel_crtc_state *crtc_state,
+		       const struct intel_plane_state *plane_state,
+		       bool async_flip)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	u32 dspaddr_offset = plane_state->color_plane[0].offset;
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
+			  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+bdw_primary_enable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+
+	spin_lock_irq(&i915->irq_lock);
+	bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+bdw_primary_disable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+
+	spin_lock_irq(&i915->irq_lock);
+	bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_enable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+	spin_lock_irq(&i915->irq_lock);
+	ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_disable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+	spin_lock_irq(&i915->irq_lock);
+	ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_enable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+	spin_lock_irq(&i915->irq_lock);
+	ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_disable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+	spin_lock_irq(&i915->irq_lock);
+	ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_enable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+
+	spin_lock_irq(&i915->irq_lock);
+	i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_disable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+
+	spin_lock_irq(&i915->irq_lock);
+	i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+				    enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum intel_display_power_domain power_domain;
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	intel_wakeref_t wakeref;
+	bool ret;
+	u32 val;
+
+	/*
+	 * Not 100% correct for planes that can move between pipes,
+	 * but that's only the case for gen2-4 which don't have any
+	 * display power wells.
+	 */
+	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	if (!wakeref)
+		return false;
+
+	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+	ret = val & DISPLAY_PLANE_ENABLE;
+
+	if (INTEL_GEN(dev_priv) >= 5)
+		*pipe = plane->pipe;
+	else
+		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+			DISPPLANE_SEL_PIPE_SHIFT;
+
+	intel_display_power_put(dev_priv, power_domain, wakeref);
+
+	return ret;
+}
+
+static unsigned int
+hsw_primary_max_stride(struct intel_plane *plane,
+		       u32 pixel_format, u64 modifier,
+		       unsigned int rotation)
+{
+	const struct drm_format_info *info = drm_format_info(pixel_format);
+	int cpp = info->cpp[0];
+
+	/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+	return min(8192 * cpp, 32 * 1024);
+}
+
+static unsigned int
+ilk_primary_max_stride(struct intel_plane *plane,
+		       u32 pixel_format, u64 modifier,
+		       unsigned int rotation)
+{
+	const struct drm_format_info *info = drm_format_info(pixel_format);
+	int cpp = info->cpp[0];
+
+	/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+	if (modifier == I915_FORMAT_MOD_X_TILED)
+		return min(4096 * cpp, 32 * 1024);
+	else
+		return 32 * 1024;
+}
+
+unsigned int
+i965_plane_max_stride(struct intel_plane *plane,
+		      u32 pixel_format, u64 modifier,
+		      unsigned int rotation)
+{
+	const struct drm_format_info *info = drm_format_info(pixel_format);
+	int cpp = info->cpp[0];
+
+	/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+	if (modifier == I915_FORMAT_MOD_X_TILED)
+		return min(4096 * cpp, 16 * 1024);
+	else
+		return 32 * 1024;
+}
+
+static unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+		      u32 pixel_format, u64 modifier,
+		      unsigned int rotation)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+	if (INTEL_GEN(dev_priv) >= 3) {
+		if (modifier == I915_FORMAT_MOD_X_TILED)
+			return 8*1024;
+		else
+			return 16*1024;
+	} else {
+		if (plane->i9xx_plane == PLANE_C)
+			return 4*1024;
+		else
+			return 8*1024;
+	}
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = intel_plane_destroy,
+	.atomic_duplicate_state = intel_plane_duplicate_state,
+	.atomic_destroy_state = intel_plane_destroy_state,
+	.format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = intel_plane_destroy,
+	.atomic_duplicate_state = intel_plane_duplicate_state,
+	.atomic_destroy_state = intel_plane_destroy_state,
+	.format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	struct intel_plane *plane;
+	const struct drm_plane_funcs *plane_funcs;
+	unsigned int supported_rotations;
+	const u32 *formats;
+	int num_formats;
+	int ret, zpos;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		return skl_universal_plane_create(dev_priv, pipe,
+						  PLANE_PRIMARY);
+
+	plane = intel_plane_alloc();
+	if (IS_ERR(plane))
+		return plane;
+
+	plane->pipe = pipe;
+	/*
+	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+	 */
+	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+	    INTEL_NUM_PIPES(dev_priv) == 2)
+		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+	else
+		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+	plane->id = PLANE_PRIMARY;
+	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+	if (plane->has_fbc) {
+		struct intel_fbc *fbc = &dev_priv->fbc;
+
+		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+	}
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		formats = vlv_primary_formats;
+		num_formats = ARRAY_SIZE(vlv_primary_formats);
+	} else if (INTEL_GEN(dev_priv) >= 4) {
+		/*
+		 * WaFP16GammaEnabling:ivb
+		 * "Workaround : When using the 64-bit format, the plane
+		 *  output on each color channel has one quarter amplitude.
+		 *  It can be brought up to full amplitude by using pipe
+		 *  gamma correction or pipe color space conversion to
+		 *  multiply the plane output by four."
+		 *
+		 * There is no dedicated plane gamma for the primary plane,
+		 * and using the pipe gamma/csc could conflict with other
+		 * planes, so we choose not to expose fp16 on IVB primary
+		 * planes. HSW primary planes no longer have this problem.
+		 */
+		if (IS_IVYBRIDGE(dev_priv)) {
+			formats = ivb_primary_formats;
+			num_formats = ARRAY_SIZE(ivb_primary_formats);
+		} else {
+			formats = i965_primary_formats;
+			num_formats = ARRAY_SIZE(i965_primary_formats);
+		}
+	} else {
+		formats = i8xx_primary_formats;
+		num_formats = ARRAY_SIZE(i8xx_primary_formats);
+	}
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		plane_funcs = &i965_plane_funcs;
+	else
+		plane_funcs = &i8xx_plane_funcs;
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		plane->min_cdclk = vlv_plane_min_cdclk;
+	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		plane->min_cdclk = hsw_plane_min_cdclk;
+	else if (IS_IVYBRIDGE(dev_priv))
+		plane->min_cdclk = ivb_plane_min_cdclk;
+	else
+		plane->min_cdclk = i9xx_plane_min_cdclk;
+
+	if (HAS_GMCH(dev_priv)) {
+		if (INTEL_GEN(dev_priv) >= 4)
+			plane->max_stride = i965_plane_max_stride;
+		else
+			plane->max_stride = i9xx_plane_max_stride;
+	} else {
+		if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+			plane->max_stride = hsw_primary_max_stride;
+		else
+			plane->max_stride = ilk_primary_max_stride;
+	}
+
+	plane->update_plane = i9xx_update_plane;
+	plane->disable_plane = i9xx_disable_plane;
+	plane->get_hw_state = i9xx_plane_get_hw_state;
+	plane->check_plane = i9xx_plane_check;
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		plane->async_flip = vlv_primary_async_flip;
+		plane->enable_flip_done = vlv_primary_enable_flip_done;
+		plane->disable_flip_done = vlv_primary_disable_flip_done;
+	} else if (IS_BROADWELL(dev_priv)) {
+		plane->need_async_flip_disable_wa = true;
+		plane->async_flip = g4x_primary_async_flip;
+		plane->enable_flip_done = bdw_primary_enable_flip_done;
+		plane->disable_flip_done = bdw_primary_disable_flip_done;
+	} else if (INTEL_GEN(dev_priv) >= 7) {
+		plane->async_flip = g4x_primary_async_flip;
+		plane->enable_flip_done = ivb_primary_enable_flip_done;
+		plane->disable_flip_done = ivb_primary_disable_flip_done;
+	} else if (INTEL_GEN(dev_priv) >= 5) {
+		plane->async_flip = g4x_primary_async_flip;
+		plane->enable_flip_done = ilk_primary_enable_flip_done;
+		plane->disable_flip_done = ilk_primary_disable_flip_done;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+					       0, plane_funcs,
+					       formats, num_formats,
+					       i9xx_format_modifiers,
+					       DRM_PLANE_TYPE_PRIMARY,
+					       "primary %c", pipe_name(pipe));
+	else
+		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+					       0, plane_funcs,
+					       formats, num_formats,
+					       i9xx_format_modifiers,
+					       DRM_PLANE_TYPE_PRIMARY,
+					       "plane %c",
+					       plane_name(plane->i9xx_plane));
+	if (ret)
+		goto fail;
+
+	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+		supported_rotations =
+			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+			DRM_MODE_REFLECT_X;
+	} else if (INTEL_GEN(dev_priv) >= 4) {
+		supported_rotations =
+			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+	} else {
+		supported_rotations = DRM_MODE_ROTATE_0;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		drm_plane_create_rotation_property(&plane->base,
+						   DRM_MODE_ROTATE_0,
+						   supported_rotations);
+
+	zpos = 0;
+	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+	return plane;
+
+fail:
+	intel_plane_free(plane);
+
+	return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
new file mode 100644
index 0000000000000000000000000000000000000000..ca963c2a845738eef85362d7ba8a4664e5190c63
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I9XX_PLANE_H_
+#define _I9XX_PLANE_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+struct intel_plane_state;
+
+unsigned int i965_plane_max_stride(struct intel_plane *plane,
+				   u32 pixel_format, u64 modifier,
+				   unsigned int rotation);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index b3533a32f8ba2e4d015532123111c7ee989381a8..9d245a689323f62703ef8780a12871ce369ecdb6 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1535,6 +1535,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
 
 	vdsc_cfg->convert_rgb = true;
 
+	/* FIXME: initialize from VBT */
+	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
 	ret = intel_dsc_compute_params(encoder, crtc_state);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7e9f84b008596be42c1dc8020096903a75e09b13..4683f98f7e543ab79a5699c1cbe2055693727ee9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -312,10 +312,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	int ret;
 
 	intel_plane_set_invisible(new_crtc_state, new_plane_state);
+	new_crtc_state->enabled_planes &= ~BIT(plane->id);
 
 	if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
 		return 0;
 
+	new_crtc_state->enabled_planes |= BIT(plane->id);
+
 	ret = plane->check_plane(new_crtc_state, new_plane_state);
 	if (ret)
 		return ret;
@@ -449,7 +452,7 @@ void intel_update_plane(struct intel_plane *plane,
 	trace_intel_update_plane(&plane->base, crtc);
 
 	if (crtc_state->uapi.async_flip && plane->async_flip)
-		plane->async_flip(plane, crtc_state, plane_state);
+		plane->async_flip(plane, crtc_state, plane_state, true);
 	else
 		plane->update_plane(plane, crtc_state, plane_state);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4cc949b228f2dd5aa3f6c1383b5d69e8ef71bbdc..987cf509337f232546777f0847f8e73950e44a8c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = {
 	[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
 };
 
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+	[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+	[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+	[RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+	[RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
 static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 {
 	const u8 *ddc_pin_map;
@@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 
 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
 		return vbt_pin;
+	} else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
+		ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+		n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
 	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
 		ddc_pin_map = icp_ddc_pin_map;
 		n_entries = ARRAY_SIZE(icp_ddc_pin_map);
@@ -2555,16 +2565,11 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
 			      crtc_state->dsc.slice_count);
 
 	/*
-	 * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
-	 * implementation specific physical rate buffer size. Currently we use
-	 * the required rate buffer model size calculated in
-	 * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
-	 *
 	 * The VBT rc_buffer_block_size and rc_buffer_size definitions
-	 * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
-	 * implementation should also use the DPCD (or perhaps VBT for eDP)
-	 * provided value for the buffer size.
+	 * correspond to DP 1.4 DPCD offsets 0x62 and 0x63.
 	 */
+	vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size,
+							    dsc->rc_buffer_size);
 
 	/* FIXME: DSI spec says bpc + 1 for this one */
 	vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bd060404d24951af769138a7357075f12c94ffd6..4b5a30ac84bc36283b1e8ca4fb4181a425ad7bd4 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -20,76 +20,9 @@ struct intel_qgv_point {
 struct intel_qgv_info {
 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
 	u8 num_points;
-	u8 num_channels;
 	u8 t_bl;
-	enum intel_dram_type dram_type;
 };
 
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
-					  struct intel_qgv_info *qi)
-{
-	u32 val = 0;
-	int ret;
-
-	ret = sandybridge_pcode_read(dev_priv,
-				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
-				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
-				     &val, NULL);
-	if (ret)
-		return ret;
-
-	if (IS_GEN(dev_priv, 12)) {
-		switch (val & 0xf) {
-		case 0:
-			qi->dram_type = INTEL_DRAM_DDR4;
-			break;
-		case 3:
-			qi->dram_type = INTEL_DRAM_LPDDR4;
-			break;
-		case 4:
-			qi->dram_type = INTEL_DRAM_DDR3;
-			break;
-		case 5:
-			qi->dram_type = INTEL_DRAM_LPDDR3;
-			break;
-		default:
-			MISSING_CASE(val & 0xf);
-			break;
-		}
-	} else if (IS_GEN(dev_priv, 11)) {
-		switch (val & 0xf) {
-		case 0:
-			qi->dram_type = INTEL_DRAM_DDR4;
-			break;
-		case 1:
-			qi->dram_type = INTEL_DRAM_DDR3;
-			break;
-		case 2:
-			qi->dram_type = INTEL_DRAM_LPDDR3;
-			break;
-		case 3:
-			qi->dram_type = INTEL_DRAM_LPDDR4;
-			break;
-		default:
-			MISSING_CASE(val & 0xf);
-			break;
-		}
-	} else {
-		MISSING_CASE(INTEL_GEN(dev_priv));
-		qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
-	}
-
-	qi->num_channels = (val & 0xf0) >> 4;
-	qi->num_points = (val & 0xf00) >> 8;
-
-	if (IS_GEN(dev_priv, 12))
-		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
-	else if (IS_GEN(dev_priv, 11))
-		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
-
-	return 0;
-}
-
 static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
 					 struct intel_qgv_point *sp,
 					 int point)
@@ -139,11 +72,15 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
 static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
 			      struct intel_qgv_info *qi)
 {
+	const struct dram_info *dram_info = &dev_priv->dram_info;
 	int i, ret;
 
-	ret = icl_pcode_read_mem_global_info(dev_priv, qi);
-	if (ret)
-		return ret;
+	qi->num_points = dram_info->num_qgv_points;
+
+	if (IS_GEN(dev_priv, 12))
+		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
+	else if (IS_GEN(dev_priv, 11))
+		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
 
 	if (drm_WARN_ON(&dev_priv->drm,
 			qi->num_points > ARRAY_SIZE(qi->points)))
@@ -209,7 +146,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
 {
 	struct intel_qgv_info qi = {};
 	bool is_y_tile = true; /* assume y tile may be used */
-	int num_channels;
+	int num_channels = dev_priv->dram_info.num_channels;
 	int deinterleave;
 	int ipqdepth, ipqdepthpch;
 	int dclk_max;
@@ -222,7 +159,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
 		return ret;
 	}
-	num_channels = qi.num_channels;
 
 	deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
 	dclk_max = icl_sagv_max_dclk(&qi);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c449d28d0560a6f3c88f941aa82744e126ea25a2..2e878cc274b7bb6d14b9498589cbce9e95051e5d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2415,8 +2415,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
 		if (ret)
 			return ret;
 
-		ret = drm_atomic_add_affected_planes(&state->base,
-						     &crtc->base);
+		ret = intel_atomic_add_affected_planes(state, crtc);
 		if (ret)
 			return ret;
 
@@ -2710,8 +2709,8 @@ static int dg1_rawclk(struct drm_i915_private *dev_priv)
 	 * DG1 always uses a 38.4 MHz rawclk.  The bspec tells us
 	 * "Program Numerator=2, Denominator=4, Divider=37 decimal."
 	 */
-	I915_WRITE(PCH_RAWCLK_FREQ,
-		   CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
+	intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+		       CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
 
 	return 38400;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 172d398081eee994c6839d3edd9f0bf08b7b808b..ff7dcb7088bf4a48f4f3df0b5f312dd3ac255e66 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1485,6 +1485,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
 
 static int ivb_color_check(struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	bool limited_color_range = ilk_csc_limited_range(crtc_state);
 	int ret;
 
@@ -1492,6 +1493,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
 	if (ret)
 		return ret;
 
+	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+	    crtc_state->hw.ctm) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "YCBCR and CTM together are not possible\n");
+		return -EINVAL;
+	}
+
 	crtc_state->gamma_enable =
 		(crtc_state->hw.gamma_lut ||
 		 crtc_state->hw.degamma_lut) &&
@@ -1525,12 +1533,20 @@ static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
 
 static int glk_color_check(struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int ret;
 
 	ret = check_luts(crtc_state);
 	if (ret)
 		return ret;
 
+	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+	    crtc_state->hw.ctm) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "YCBCR and CTM together are not possible\n");
+		return -EINVAL;
+	}
+
 	crtc_state->gamma_enable =
 		crtc_state->hw.gamma_lut &&
 		!crtc_state->c8_planes;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index d5ad61e4083e55a7224ba477a015ccec7cfcd151..996ae0608a62178e4fc8ea0dbbb91865bef124f7 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -427,10 +427,22 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
 		u32 val;
 
 		if (phy == PHY_A &&
-		    !icl_combo_phy_verify_state(dev_priv, phy))
-			drm_warn(&dev_priv->drm,
-				 "Combo PHY %c HW state changed unexpectedly\n",
-				 phy_name(phy));
+		    !icl_combo_phy_verify_state(dev_priv, phy)) {
+			if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+				/*
+				 * A known problem with old ifwi:
+				 * https://gitlab.freedesktop.org/drm/intel/-/issues/2411
+				 * Suppress the warning for CI. Remove ASAP!
+				 */
+				drm_dbg_kms(&dev_priv->drm,
+					    "Combo PHY %c HW state changed unexpectedly\n",
+					    phy_name(phy));
+			} else {
+				drm_warn(&dev_priv->drm,
+					 "Combo PHY %c HW state changed unexpectedly\n",
+					 phy_name(phy));
+			}
+		}
 
 		if (!has_phy_misc(dev_priv, phy))
 			goto skip_phy_misc;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 406e96785c763760cf422f9cbcb72cf5c8a721e2..d5ceb7bdc14b396b75c36fad562910d7bc47e798 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -279,24 +279,17 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
 }
 
 void
-intel_attach_colorspace_property(struct drm_connector *connector)
+intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
 {
-	switch (connector->connector_type) {
-	case DRM_MODE_CONNECTOR_HDMIA:
-	case DRM_MODE_CONNECTOR_HDMIB:
-		if (drm_mode_create_hdmi_colorspace_property(connector))
-			return;
-		break;
-	case DRM_MODE_CONNECTOR_DisplayPort:
-	case DRM_MODE_CONNECTOR_eDP:
-		if (drm_mode_create_dp_colorspace_property(connector))
-			return;
-		break;
-	default:
-		MISSING_CASE(connector->connector_type);
-		return;
-	}
+	if (!drm_mode_create_hdmi_colorspace_property(connector))
+		drm_object_attach_property(&connector->base,
+					   connector->colorspace_property, 0);
+}
 
-	drm_object_attach_property(&connector->base,
-				   connector->colorspace_property, 0);
+void
+intel_attach_dp_colorspace_property(struct drm_connector *connector)
+{
+	if (!drm_mode_create_dp_colorspace_property(connector))
+		drm_object_attach_property(&connector->base,
+					   connector->colorspace_property, 0);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196dab2faa358c33506ee0228b6a944..661a37a3c6d834893c094e0b5cb66c0593ee0436 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -30,6 +30,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 void intel_attach_force_audio_property(struct drm_connector *connector);
 void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-void intel_attach_colorspace_property(struct drm_connector *connector);
+void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
+void intel_attach_dp_colorspace_property(struct drm_connector *connector);
 
 #endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
new file mode 100644
index 0000000000000000000000000000000000000000..57b0a3ebe908513748fb9b5c9010bfc97b55cc4b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_pipe_crc.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+		drm_crtc_vblank_put(crtc);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+	if (!vblank->max_vblank_count)
+		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+	return crtc->base.funcs->get_vblank_counter(&crtc->base);
+}
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	u32 mode_flags = crtc->mode_flags;
+
+	/*
+	 * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+	 * have updated at the beginning of TE, if we want to use
+	 * the hw counter, then we would find it updated in only
+	 * the next TE, hence switching to sw counter.
+	 */
+	if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+		return 0;
+
+	/*
+	 * On i965gm the hardware frame counter reads
+	 * zero when the TV encoder is enabled :(
+	 */
+	if (IS_I965GM(dev_priv) &&
+	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+		return 0;
+
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		return 0xffffffff; /* full 32 bit counter */
+	else if (INTEL_GEN(dev_priv) >= 3)
+		return 0xffffff; /* only 24 bits of frame count */
+	else
+		return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+	assert_vblank_disabled(&crtc->base);
+	drm_crtc_set_max_vblank_count(&crtc->base,
+				      intel_crtc_max_vblank_count(crtc_state));
+	drm_crtc_vblank_on(&crtc->base);
+}
+
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+	drm_crtc_vblank_off(&crtc->base);
+	assert_vblank_disabled(&crtc->base);
+}
+
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *crtc_state;
+
+	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+	if (crtc_state)
+		intel_crtc_state_reset(crtc_state, crtc);
+
+	return crtc_state;
+}
+
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+			    struct intel_crtc *crtc)
+{
+	memset(crtc_state, 0, sizeof(*crtc_state));
+
+	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+	crtc_state->master_transcoder = INVALID_TRANSCODER;
+	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+	crtc_state->scaler_state.scaler_id = -1;
+	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+	struct intel_crtc_state *crtc_state;
+	struct intel_crtc *crtc;
+
+	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+	if (!crtc)
+		return ERR_PTR(-ENOMEM);
+
+	crtc_state = intel_crtc_state_alloc(crtc);
+	if (!crtc_state) {
+		kfree(crtc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	crtc->base.state = &crtc_state->uapi;
+	crtc->config = crtc_state;
+
+	return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+	kfree(crtc);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(intel_crtc);
+}
+
+static int intel_crtc_late_register(struct drm_crtc *crtc)
+{
+	intel_crtc_debugfs_add(crtc);
+	return 0;
+}
+
+#define INTEL_CRTC_FUNCS \
+	.set_config = drm_atomic_helper_set_config, \
+	.destroy = intel_crtc_destroy, \
+	.page_flip = drm_atomic_helper_page_flip, \
+	.atomic_duplicate_state = intel_crtc_duplicate_state, \
+	.atomic_destroy_state = intel_crtc_destroy_state, \
+	.set_crc_source = intel_crtc_set_crc_source, \
+	.verify_crc_source = intel_crtc_verify_crc_source, \
+	.get_crc_sources = intel_crtc_get_crc_sources, \
+	.late_register = intel_crtc_late_register
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = g4x_get_vblank_counter,
+	.enable_vblank = bdw_enable_vblank,
+	.disable_vblank = bdw_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = g4x_get_vblank_counter,
+	.enable_vblank = ilk_enable_vblank,
+	.disable_vblank = ilk_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = g4x_get_vblank_counter,
+	.enable_vblank = i965_enable_vblank,
+	.disable_vblank = i965_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i965_enable_vblank,
+	.disable_vblank = i965_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i915gm_enable_vblank,
+	.disable_vblank = i915gm_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i8xx_enable_vblank,
+	.disable_vblank = i8xx_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	/* no hw vblank counter */
+	.enable_vblank = i8xx_enable_vblank,
+	.disable_vblank = i8xx_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	struct intel_plane *primary, *cursor;
+	const struct drm_crtc_funcs *funcs;
+	struct intel_crtc *crtc;
+	int sprite, ret;
+
+	crtc = intel_crtc_alloc();
+	if (IS_ERR(crtc))
+		return PTR_ERR(crtc);
+
+	crtc->pipe = pipe;
+	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+
+	primary = intel_primary_plane_create(dev_priv, pipe);
+	if (IS_ERR(primary)) {
+		ret = PTR_ERR(primary);
+		goto fail;
+	}
+	crtc->plane_ids_mask |= BIT(primary->id);
+
+	for_each_sprite(dev_priv, pipe, sprite) {
+		struct intel_plane *plane;
+
+		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+		if (IS_ERR(plane)) {
+			ret = PTR_ERR(plane);
+			goto fail;
+		}
+		crtc->plane_ids_mask |= BIT(plane->id);
+	}
+
+	cursor = intel_cursor_plane_create(dev_priv, pipe);
+	if (IS_ERR(cursor)) {
+		ret = PTR_ERR(cursor);
+		goto fail;
+	}
+	crtc->plane_ids_mask |= BIT(cursor->id);
+
+	if (HAS_GMCH(dev_priv)) {
+		if (IS_CHERRYVIEW(dev_priv) ||
+		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+			funcs = &g4x_crtc_funcs;
+		else if (IS_GEN(dev_priv, 4))
+			funcs = &i965_crtc_funcs;
+		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+			funcs = &i915gm_crtc_funcs;
+		else if (IS_GEN(dev_priv, 3))
+			funcs = &i915_crtc_funcs;
+		else
+			funcs = &i8xx_crtc_funcs;
+	} else {
+		if (INTEL_GEN(dev_priv) >= 8)
+			funcs = &bdw_crtc_funcs;
+		else
+			funcs = &ilk_crtc_funcs;
+	}
+
+	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+					&primary->base, &cursor->base,
+					funcs, "pipe %c", pipe_name(pipe));
+	if (ret)
+		goto fail;
+
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
+
+	if (INTEL_GEN(dev_priv) < 9) {
+		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 10)
+		drm_crtc_create_scaling_filter_property(&crtc->base,
+						BIT(DRM_SCALING_FILTER_DEFAULT) |
+						BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+	intel_color_init(crtc);
+
+	intel_crtc_crc_init(crtc);
+
+	drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+	return 0;
+
+fail:
+	intel_crtc_free(crtc);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
new file mode 100644
index 0000000000000000000000000000000000000000..08112d55741107c44f15be8b29c89311a181a291
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CRTC_H_
+#define _INTEL_CRTC_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+			    struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
new file mode 100644
index 0000000000000000000000000000000000000000..21fe4d2753e920f3b111a9854d70b916ccca14bd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_cursor.h"
+#include "intel_display_types.h"
+#include "intel_display.h"
+
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+	DRM_FORMAT_ARGB8888,
+};
+
+static const u64 cursor_format_modifiers[] = {
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	u32 base;
+
+	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+		base = sg_dma_address(obj->mm.pages->sgl);
+	else
+		base = intel_plane_ggtt_offset(plane_state);
+
+	return base + plane_state->color_plane[0].offset;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+	int x = plane_state->uapi.dst.x1;
+	int y = plane_state->uapi.dst.y1;
+	u32 pos = 0;
+
+	if (x < 0) {
+		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+		x = -x;
+	}
+	pos |= x << CURSOR_X_SHIFT;
+
+	if (y < 0) {
+		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+		y = -y;
+	}
+	pos |= y << CURSOR_Y_SHIFT;
+
+	return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+	const struct drm_mode_config *config =
+		&plane_state->uapi.plane->dev->mode_config;
+	int width = drm_rect_width(&plane_state->uapi.dst);
+	int height = drm_rect_height(&plane_state->uapi.dst);
+
+	return width > 0 && width <= config->cursor_width &&
+		height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->uapi.plane->dev);
+	unsigned int rotation = plane_state->hw.rotation;
+	int src_x, src_y;
+	u32 offset;
+	int ret;
+
+	ret = intel_plane_compute_gtt(plane_state);
+	if (ret)
+		return ret;
+
+	if (!plane_state->uapi.visible)
+		return 0;
+
+	src_x = plane_state->uapi.src.x1 >> 16;
+	src_y = plane_state->uapi.src.y1 >> 16;
+
+	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+						    plane_state, 0);
+
+	if (src_x != 0 || src_y != 0) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Arbitrary cursor panning not supported\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Put the final coordinates back so that the src
+	 * coordinate checks will see the right values.
+	 */
+	drm_rect_translate_to(&plane_state->uapi.src,
+			      src_x << 16, src_y << 16);
+
+	/* ILK+ do this automagically in hardware */
+	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+		const struct drm_framebuffer *fb = plane_state->hw.fb;
+		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+		offset += (src_h * src_w - 1) * fb->format->cpp[0];
+	}
+
+	plane_state->color_plane[0].offset = offset;
+	plane_state->color_plane[0].x = src_x;
+	plane_state->color_plane[0].y = src_y;
+
+	return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+			      struct intel_plane_state *plane_state)
+{
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+	const struct drm_rect src = plane_state->uapi.src;
+	const struct drm_rect dst = plane_state->uapi.dst;
+	int ret;
+
+	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+		drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+		return -EINVAL;
+	}
+
+	ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+						DRM_PLANE_HELPER_NO_SCALING,
+						DRM_PLANE_HELPER_NO_SCALING,
+						true);
+	if (ret)
+		return ret;
+
+	/* Use the unclipped src/dst rectangles, which we program to hw */
+	plane_state->uapi.src = src;
+	plane_state->uapi.dst = dst;
+
+	ret = intel_cursor_check_surface(plane_state);
+	if (ret)
+		return ret;
+
+	if (!plane_state->uapi.visible)
+		return 0;
+
+	ret = intel_plane_check_src_coordinates(plane_state);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+		       u32 pixel_format, u64 modifier,
+		       unsigned int rotation)
+{
+	return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+	u32 cntl = 0;
+
+	if (crtc_state->gamma_enable)
+		cntl |= CURSOR_GAMMA_ENABLE;
+
+	return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+			   const struct intel_plane_state *plane_state)
+{
+	return CURSOR_ENABLE |
+		CURSOR_FORMAT_ARGB |
+		CURSOR_STRIDE(plane_state->color_plane[0].stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+	int width = drm_rect_width(&plane_state->uapi.dst);
+
+	/*
+	 * 845g/865g are only limited by the width of their cursors,
+	 * the height is arbitrary up to the precision of the register.
+	 */
+	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+			     struct intel_plane_state *plane_state)
+{
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+	int ret;
+
+	ret = intel_check_cursor(crtc_state, plane_state);
+	if (ret)
+		return ret;
+
+	/* if we want to turn off the cursor ignore width and height */
+	if (!fb)
+		return 0;
+
+	/* Check for which cursor types we support */
+	if (!i845_cursor_size_ok(plane_state)) {
+		drm_dbg_kms(&i915->drm,
+			    "Cursor dimension %dx%d not supported\n",
+			    drm_rect_width(&plane_state->uapi.dst),
+			    drm_rect_height(&plane_state->uapi.dst));
+		return -EINVAL;
+	}
+
+	drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+		    plane_state->color_plane[0].stride != fb->pitches[0]);
+
+	switch (fb->pitches[0]) {
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+			     fb->pitches[0]);
+		return -EINVAL;
+	}
+
+	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+	return 0;
+}
+
+static void i845_update_cursor(struct intel_plane *plane,
+			       const struct intel_crtc_state *crtc_state,
+			       const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	u32 cntl = 0, base = 0, pos = 0, size = 0;
+	unsigned long irqflags;
+
+	if (plane_state && plane_state->uapi.visible) {
+		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+
+		cntl = plane_state->ctl |
+			i845_cursor_ctl_crtc(crtc_state);
+
+		size = (height << 12) | width;
+
+		base = intel_cursor_base(plane_state);
+		pos = intel_cursor_position(plane_state);
+	}
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	/* On these chipsets we can only modify the base/size/stride
+	 * whilst the cursor is disabled.
+	 */
+	if (plane->cursor.base != base ||
+	    plane->cursor.size != size ||
+	    plane->cursor.cntl != cntl) {
+		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+		intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+		intel_de_write_fw(dev_priv, CURSIZE, size);
+		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
+
+		plane->cursor.base = base;
+		plane->cursor.size = size;
+		plane->cursor.cntl = cntl;
+	} else {
+		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+				const struct intel_crtc_state *crtc_state)
+{
+	i845_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+				     enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum intel_display_power_domain power_domain;
+	intel_wakeref_t wakeref;
+	bool ret;
+
+	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	if (!wakeref)
+		return false;
+
+	ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+	*pipe = PIPE_A;
+
+	intel_display_power_put(dev_priv, power_domain, wakeref);
+
+	return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+		       u32 pixel_format, u64 modifier,
+		       unsigned int rotation)
+{
+	return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	u32 cntl = 0;
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		return cntl;
+
+	if (crtc_state->gamma_enable)
+		cntl = MCURSOR_GAMMA_ENABLE;
+
+	if (crtc_state->csc_enable)
+		cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+
+	return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+			   const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->uapi.plane->dev);
+	u32 cntl = 0;
+
+	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+	switch (drm_rect_width(&plane_state->uapi.dst)) {
+	case 64:
+		cntl |= MCURSOR_MODE_64_ARGB_AX;
+		break;
+	case 128:
+		cntl |= MCURSOR_MODE_128_ARGB_AX;
+		break;
+	case 256:
+		cntl |= MCURSOR_MODE_256_ARGB_AX;
+		break;
+	default:
+		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
+		return 0;
+	}
+
+	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
+		cntl |= MCURSOR_ROTATE_180;
+
+	return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->uapi.plane->dev);
+	int width = drm_rect_width(&plane_state->uapi.dst);
+	int height = drm_rect_height(&plane_state->uapi.dst);
+
+	if (!intel_cursor_size_ok(plane_state))
+		return false;
+
+	/* Cursor width is limited to a few power-of-two sizes */
+	switch (width) {
+	case 256:
+	case 128:
+	case 64:
+		break;
+	default:
+		return false;
+	}
+
+	/*
+	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+	 * height from 8 lines up to the cursor width, when the
+	 * cursor is not rotated. Everything else requires square
+	 * cursors.
+	 */
+	if (HAS_CUR_FBC(dev_priv) &&
+	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
+		if (height < 8 || height > width)
+			return false;
+	} else {
+		if (height != width)
+			return false;
+	}
+
+	return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+			     struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	enum pipe pipe = plane->pipe;
+	int ret;
+
+	ret = intel_check_cursor(crtc_state, plane_state);
+	if (ret)
+		return ret;
+
+	/* if we want to turn off the cursor ignore width and height */
+	if (!fb)
+		return 0;
+
+	/* Check for which cursor types we support */
+	if (!i9xx_cursor_size_ok(plane_state)) {
+		drm_dbg(&dev_priv->drm,
+			"Cursor dimension %dx%d not supported\n",
+			drm_rect_width(&plane_state->uapi.dst),
+			drm_rect_height(&plane_state->uapi.dst));
+		return -EINVAL;
+	}
+
+	drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+		    plane_state->color_plane[0].stride != fb->pitches[0]);
+
+	if (fb->pitches[0] !=
+	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Invalid cursor stride (%u) (cursor width %d)\n",
+			    fb->pitches[0],
+			    drm_rect_width(&plane_state->uapi.dst));
+		return -EINVAL;
+	}
+
+	/*
+	 * There's something wrong with the cursor on CHV pipe C.
+	 * If it straddles the left edge of the screen then
+	 * moving it away from the edge or disabling it often
+	 * results in a pipe underrun, and often that can lead to
+	 * dead pipe (constant underrun reported, and it scans
+	 * out just a solid color). To recover from that, the
+	 * display power well must be turned off and on again.
+	 * Refuse the put the cursor into that compromised position.
+	 */
+	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "CHV cursor C not allowed to straddle the left screen edge\n");
+		return -EINVAL;
+	}
+
+	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+	return 0;
+}
+
+static void i9xx_update_cursor(struct intel_plane *plane,
+			       const struct intel_crtc_state *crtc_state,
+			       const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+	unsigned long irqflags;
+
+	if (plane_state && plane_state->uapi.visible) {
+		int width = drm_rect_width(&plane_state->uapi.dst);
+		int height = drm_rect_height(&plane_state->uapi.dst);
+
+		cntl = plane_state->ctl |
+			i9xx_cursor_ctl_crtc(crtc_state);
+
+		if (width != height)
+			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
+
+		base = intel_cursor_base(plane_state);
+		pos = intel_cursor_position(plane_state);
+	}
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	/*
+	 * On some platforms writing CURCNTR first will also
+	 * cause CURPOS to be armed by the CURBASE write.
+	 * Without the CURCNTR write the CURPOS write would
+	 * arm itself. Thus we always update CURCNTR before
+	 * CURPOS.
+	 *
+	 * On other platforms CURPOS always requires the
+	 * CURBASE write to arm the update. Additonally
+	 * a write to any of the cursor register will cancel
+	 * an already armed cursor update. Thus leaving out
+	 * the CURBASE write after CURPOS could lead to a
+	 * cursor that doesn't appear to move, or even change
+	 * shape. Thus we always write CURBASE.
+	 *
+	 * The other registers are armed by the CURBASE write
+	 * except when the plane is getting enabled at which time
+	 * the CURCNTR write arms the update.
+	 */
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		skl_write_cursor_wm(plane, crtc_state);
+
+	if (!intel_crtc_needs_modeset(crtc_state))
+		intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+
+	if (plane->cursor.base != base ||
+	    plane->cursor.size != fbc_ctl ||
+	    plane->cursor.cntl != cntl) {
+		if (HAS_CUR_FBC(dev_priv))
+			intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+					  fbc_ctl);
+		intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+
+		plane->cursor.base = base;
+		plane->cursor.size = fbc_ctl;
+		plane->cursor.cntl = cntl;
+	} else {
+		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_cursor(struct intel_plane *plane,
+				const struct intel_crtc_state *crtc_state)
+{
+	i9xx_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+				     enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum intel_display_power_domain power_domain;
+	intel_wakeref_t wakeref;
+	bool ret;
+	u32 val;
+
+	/*
+	 * Not 100% correct for planes that can move between pipes,
+	 * but that's only the case for gen2-3 which don't have any
+	 * display power wells.
+	 */
+	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	if (!wakeref)
+		return false;
+
+	val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
+
+	ret = val & MCURSOR_MODE;
+
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		*pipe = plane->pipe;
+	else
+		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+			MCURSOR_PIPE_SELECT_SHIFT;
+
+	intel_display_power_put(dev_priv, power_domain, wakeref);
+
+	return ret;
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+					      u32 format, u64 modifier)
+{
+	return modifier == DRM_FORMAT_MOD_LINEAR &&
+		format == DRM_FORMAT_ARGB8888;
+}
+
+static int
+intel_legacy_cursor_update(struct drm_plane *_plane,
+			   struct drm_crtc *_crtc,
+			   struct drm_framebuffer *fb,
+			   int crtc_x, int crtc_y,
+			   unsigned int crtc_w, unsigned int crtc_h,
+			   u32 src_x, u32 src_y,
+			   u32 src_w, u32 src_h,
+			   struct drm_modeset_acquire_ctx *ctx)
+{
+	struct intel_plane *plane = to_intel_plane(_plane);
+	struct intel_crtc *crtc = to_intel_crtc(_crtc);
+	struct intel_plane_state *old_plane_state =
+		to_intel_plane_state(plane->base.state);
+	struct intel_plane_state *new_plane_state;
+	struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
+	struct intel_crtc_state *new_crtc_state;
+	int ret;
+
+	/*
+	 * When crtc is inactive or there is a modeset pending,
+	 * wait for it to complete in the slowpath
+	 *
+	 * FIXME bigjoiner fastpath would be good
+	 */
+	if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
+	    crtc_state->update_pipe || crtc_state->bigjoiner)
+		goto slow;
+
+	/*
+	 * Don't do an async update if there is an outstanding commit modifying
+	 * the plane.  This prevents our async update's changes from getting
+	 * overridden by a previous synchronous update's state.
+	 */
+	if (old_plane_state->uapi.commit &&
+	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
+		goto slow;
+
+	/*
+	 * If any parameters change that may affect watermarks,
+	 * take the slowpath. Only changing fb or position should be
+	 * in the fastpath.
+	 */
+	if (old_plane_state->uapi.crtc != &crtc->base ||
+	    old_plane_state->uapi.src_w != src_w ||
+	    old_plane_state->uapi.src_h != src_h ||
+	    old_plane_state->uapi.crtc_w != crtc_w ||
+	    old_plane_state->uapi.crtc_h != crtc_h ||
+	    !old_plane_state->uapi.fb != !fb)
+		goto slow;
+
+	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
+	if (!new_plane_state)
+		return -ENOMEM;
+
+	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
+	if (!new_crtc_state) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+
+	new_plane_state->uapi.src_x = src_x;
+	new_plane_state->uapi.src_y = src_y;
+	new_plane_state->uapi.src_w = src_w;
+	new_plane_state->uapi.src_h = src_h;
+	new_plane_state->uapi.crtc_x = crtc_x;
+	new_plane_state->uapi.crtc_y = crtc_y;
+	new_plane_state->uapi.crtc_w = crtc_w;
+	new_plane_state->uapi.crtc_h = crtc_h;
+
+	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+
+	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+						  old_plane_state, new_plane_state);
+	if (ret)
+		goto out_free;
+
+	ret = intel_plane_pin_fb(new_plane_state);
+	if (ret)
+		goto out_free;
+
+	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+				ORIGIN_FLIP);
+	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+				to_intel_frontbuffer(new_plane_state->hw.fb),
+				plane->frontbuffer_bit);
+
+	/* Swap plane state */
+	plane->base.state = &new_plane_state->uapi;
+
+	/*
+	 * We cannot swap crtc_state as it may be in use by an atomic commit or
+	 * page flip that's running simultaneously. If we swap crtc_state and
+	 * destroy the old state, we will cause a use-after-free there.
+	 *
+	 * Only update active_planes, which is needed for our internal
+	 * bookkeeping. Either value will do the right thing when updating
+	 * planes atomically. If the cursor was part of the atomic update then
+	 * we would have taken the slowpath.
+	 */
+	crtc_state->active_planes = new_crtc_state->active_planes;
+
+	if (new_plane_state->uapi.visible)
+		intel_update_plane(plane, crtc_state, new_plane_state);
+	else
+		intel_disable_plane(plane, crtc_state);
+
+	intel_plane_unpin_fb(old_plane_state);
+
+out_free:
+	if (new_crtc_state)
+		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
+	if (ret)
+		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
+	else
+		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
+	return ret;
+
+slow:
+	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
+					      crtc_x, crtc_y, crtc_w, crtc_h,
+					      src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+	.update_plane = intel_legacy_cursor_update,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = intel_plane_destroy,
+	.atomic_duplicate_state = intel_plane_duplicate_state,
+	.atomic_destroy_state = intel_plane_destroy_state,
+	.format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+			  enum pipe pipe)
+{
+	struct intel_plane *cursor;
+	int ret, zpos;
+
+	cursor = intel_plane_alloc();
+	if (IS_ERR(cursor))
+		return cursor;
+
+	cursor->pipe = pipe;
+	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+	cursor->id = PLANE_CURSOR;
+	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+		cursor->max_stride = i845_cursor_max_stride;
+		cursor->update_plane = i845_update_cursor;
+		cursor->disable_plane = i845_disable_cursor;
+		cursor->get_hw_state = i845_cursor_get_hw_state;
+		cursor->check_plane = i845_check_cursor;
+	} else {
+		cursor->max_stride = i9xx_cursor_max_stride;
+		cursor->update_plane = i9xx_update_cursor;
+		cursor->disable_plane = i9xx_disable_cursor;
+		cursor->get_hw_state = i9xx_cursor_get_hw_state;
+		cursor->check_plane = i9xx_check_cursor;
+	}
+
+	cursor->cursor.base = ~0;
+	cursor->cursor.cntl = ~0;
+
+	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+		cursor->cursor.size = ~0;
+
+	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+				       0, &intel_cursor_plane_funcs,
+				       intel_cursor_formats,
+				       ARRAY_SIZE(intel_cursor_formats),
+				       cursor_format_modifiers,
+				       DRM_PLANE_TYPE_CURSOR,
+				       "cursor %c", pipe_name(pipe));
+	if (ret)
+		goto fail;
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		drm_plane_create_rotation_property(&cursor->base,
+						   DRM_MODE_ROTATE_0,
+						   DRM_MODE_ROTATE_0 |
+						   DRM_MODE_ROTATE_180);
+
+	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
+	if (INTEL_GEN(dev_priv) >= 12)
+		drm_plane_enable_fb_damage_clips(&cursor->base);
+
+	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+	return cursor;
+
+fail:
+	intel_plane_free(cursor);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
new file mode 100644
index 0000000000000000000000000000000000000000..ce333bf4c2d5596742f11dd2623693b23050a790
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CURSOR_H_
+#define _INTEL_CURSOR_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+			  enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index dc13d1814d95d6c2074709754df8f71ee463655f..1bb40ec5fe5dbfd7525d72806fa3f61f9eb79835 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -46,10 +46,12 @@
 #include "intel_hotplug.h"
 #include "intel_lspcon.h"
 #include "intel_panel.h"
+#include "intel_pps.h"
 #include "intel_psr.h"
 #include "intel_sprite.h"
 #include "intel_tc.h"
 #include "intel_vdsc.h"
+#include "intel_vrr.h"
 
 struct ddi_buf_trans {
 	u32 trans1;	/* balance leg enable, de-emph level */
@@ -611,6 +613,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[]
 	{ 0xA, 0x35, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
 };
 
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x32, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x48, 0x35, 0x00, 0x0A },	/* 350   500      3.1   */
+	{ 0xC, 0x63, 0x2F, 0x00, 0x10 },	/* 350   700      6.0   */
+	{ 0x6, 0x7F, 0x2C, 0x00, 0x13 },	/* 350   900      8.2   */
+	{ 0xA, 0x43, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x60, 0x36, 0x00, 0x09 },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x30, 0x00, 0x0F },	/* 500   900      5.1   */
+	{ 0xC, 0x60, 0x3F, 0x00, 0x00 },	/* 650   700      0.6   */
+	{ 0x6, 0x7F, 0x37, 0x00, 0x08 },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x32, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x48, 0x35, 0x00, 0x0A },	/* 350   500      3.1   */
+	{ 0xC, 0x63, 0x2F, 0x00, 0x10 },	/* 350   700      6.0   */
+	{ 0x6, 0x7F, 0x2C, 0x00, 0x13 },	/* 350   900      8.2   */
+	{ 0xA, 0x43, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x60, 0x36, 0x00, 0x09 },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x30, 0x00, 0x0F },	/* 500   900      5.1   */
+	{ 0xC, 0x58, 0x3F, 0x00, 0x00 },	/* 650   700      0.6   */
+	{ 0x6, 0x7F, 0x35, 0x00, 0x0A },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
 struct icl_mg_phy_ddi_buf_trans {
 	u32 cri_txdeemph_override_11_6;
 	u32 cri_txdeemph_override_5_0;
@@ -766,6 +796,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho
 	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 2	1	*/
 };
 
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x2F, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x4F, 0x37, 0x00, 0x08 },	/* 350   500      3.1   */
+	{ 0xC, 0x63, 0x2F, 0x00, 0x10 },	/* 350   700      6.0   */
+	{ 0x6, 0x7D, 0x2A, 0x00, 0x15 },	/* 350   900      8.2   */
+	{ 0xA, 0x4C, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x73, 0x34, 0x00, 0x0B },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x2F, 0x00, 0x10 },	/* 500   900      5.1   */
+	{ 0xC, 0x6E, 0x3E, 0x00, 0x01 },	/* 650   700      0.6   */
+	{ 0x6, 0x7F, 0x35, 0x00, 0x0A },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x35, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x50, 0x38, 0x00, 0x07 },	/* 350   500      3.1   */
+	{ 0xC, 0x61, 0x33, 0x00, 0x0C },	/* 350   700      6.0   */
+	{ 0x6, 0x7F, 0x2E, 0x00, 0x11 },	/* 350   900      8.2   */
+	{ 0xA, 0x47, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x5F, 0x38, 0x00, 0x07 },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x2F, 0x00, 0x10 },	/* 500   900      5.1   */
+	{ 0xC, 0x5F, 0x3F, 0x00, 0x00 },	/* 650   700      0.6   */
+	{ 0x6, 0x7E, 0x36, 0x00, 0x09 },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
 static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
 {
 	return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -1093,6 +1151,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
 	} else if (dev_priv->vbt.edp.low_vswing) {
 		*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
 		return icl_combo_phy_ddi_translations_edp_hbr2;
+	} else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+		*n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+		return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+	} else if (IS_DG1(dev_priv)) {
+		*n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+		return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
 	}
 
 	return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1259,7 +1323,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	if (crtc_state->port_clock > 270000) {
-		if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+		if (IS_ROCKETLAKE(dev_priv)) {
+			*n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+			return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+		} else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
 			*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
 			return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
 		} else {
@@ -1267,8 +1334,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
 			return tgl_combo_phy_ddi_translations_dp_hbr2;
 		}
 	} else {
-		*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
-		return tgl_combo_phy_ddi_translations_dp_hbr;
+		if (IS_ROCKETLAKE(dev_priv)) {
+			*n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+			return rkl_combo_phy_ddi_translations_dp_hbr;
+		} else {
+			*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+			return tgl_combo_phy_ddi_translations_dp_hbr;
+		}
 	}
 }
 
@@ -2029,9 +2101,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
 	}
 }
 
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
-				     enum transcoder cpu_transcoder,
-				     bool enable)
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+			       enum transcoder cpu_transcoder,
+			       bool enable, u32 hdcp_mask)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2046,9 +2118,9 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 
 	tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
 	if (enable)
-		tmp |= TRANS_DDI_HDCP_SIGNALLING;
+		tmp |= hdcp_mask;
 	else
-		tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+		tmp &= ~hdcp_mask;
 	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
 	intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
 	return ret;
@@ -2285,18 +2357,23 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
 	dig_port = enc_to_dig_port(encoder);
 
 	if (!intel_phy_is_tc(dev_priv, phy) ||
-	    dig_port->tc_mode != TC_PORT_TBT_ALT)
-		intel_display_power_get(dev_priv,
-					dig_port->ddi_io_power_domain);
+	    dig_port->tc_mode != TC_PORT_TBT_ALT) {
+		drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+		dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+								   dig_port->ddi_io_power_domain);
+	}
 
 	/*
 	 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
 	 * ports.
 	 */
 	if (intel_crtc_has_dp_encoder(crtc_state) ||
-	    intel_phy_is_tc(dev_priv, phy))
-		intel_display_power_get(dev_priv,
-					intel_ddi_main_link_aux_domain(dig_port));
+	    intel_phy_is_tc(dev_priv, phy)) {
+		drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+		dig_port->aux_wakeref =
+			intel_display_power_get(dev_priv,
+						intel_ddi_main_link_aux_domain(dig_port));
+	}
 }
 
 void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
@@ -2626,15 +2703,11 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
 		ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
 	else
 		ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-	if (!ddi_translations)
-		return;
 
-	if (level >= n_entries) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "DDI translation not found for level %d. Using %d instead.",
-			    level, n_entries - 1);
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+		return;
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
-	}
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2758,12 +2831,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 		return;
 
 	ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
-	if (level >= n_entries) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "DDI translation not found for level %d. Using %d instead.",
-			    level, n_entries - 1);
+
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+		return;
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
-	}
 
 	/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
 	for (ln = 0; ln < 2; ln++) {
@@ -2898,7 +2970,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 
 	ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
 
-	if (level >= n_entries)
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+		return;
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
 
 	dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
@@ -3485,6 +3559,22 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
 		return DP_TP_STATUS(encoder->port);
 }
 
+static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
+							  const struct intel_crtc_state *crtc_state,
+							  bool enable)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+	if (!crtc_state->vrr.enable)
+		return;
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
+			       enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
+		drm_dbg_kms(&i915->drm,
+			    "Failed to set MSA_TIMING_PAR_IGNORE %s in the sink\n",
+			    enable ? "enable" : "disable");
+}
+
 static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
 					const struct intel_crtc_state *crtc_state)
 {
@@ -3512,12 +3602,6 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
 	val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
 	val |= DP_TP_CTL_FEC_ENABLE;
 	intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
-
-	if (intel_de_wait_for_set(dev_priv,
-				  dp_tp_status_reg(encoder, crtc_state),
-				  DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
-		drm_err(&dev_priv->drm,
-			"Timed out waiting for FEC Enable Status\n");
 }
 
 static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -3578,7 +3662,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
 	 */
 
 	/* 2. Enable Panel Power if PPS is required */
-	intel_edp_panel_on(intel_dp);
+	intel_pps_on(intel_dp);
 
 	/*
 	 * 3. For non-TBT Type-C ports, set FIA lane count
@@ -3599,9 +3683,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
 
 	/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
 	if (!intel_phy_is_tc(dev_priv, phy) ||
-	    dig_port->tc_mode != TC_PORT_TBT_ALT)
-		intel_display_power_get(dev_priv,
-					dig_port->ddi_io_power_domain);
+	    dig_port->tc_mode != TC_PORT_TBT_ALT) {
+		drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+		dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+								   dig_port->ddi_io_power_domain);
+	}
 
 	/* 6. Program DP_MODE */
 	icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3658,6 +3744,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
 	if (!is_mst)
 		intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
 
+	intel_dp_configure_protocol_converter(intel_dp, crtc_state);
 	intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
 	/*
 	 * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -3666,6 +3753,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
 	 */
 	intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
 
+	intel_dp_check_frl_training(intel_dp);
+	intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
 	/*
 	 * 7.i Follow DisplayPort specification training sequence (see notes for
 	 *     failure handling)
@@ -3708,14 +3798,16 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
 				 crtc_state->port_clock,
 				 crtc_state->lane_count);
 
-	intel_edp_panel_on(intel_dp);
+	intel_pps_on(intel_dp);
 
 	intel_ddi_clk_select(encoder, crtc_state);
 
 	if (!intel_phy_is_tc(dev_priv, phy) ||
-	    dig_port->tc_mode != TC_PORT_TBT_ALT)
-		intel_display_power_get(dev_priv,
-					dig_port->ddi_io_power_domain);
+	    dig_port->tc_mode != TC_PORT_TBT_ALT) {
+		drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+		dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+								   dig_port->ddi_io_power_domain);
+	}
 
 	icl_program_mg_dp_mode(dig_port, crtc_state);
 
@@ -3786,7 +3878,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
 	intel_ddi_clk_select(encoder, crtc_state);
 
-	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+	drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+	dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+							   dig_port->ddi_io_power_domain);
 
 	icl_program_mg_dp_mode(dig_port, crtc_state);
 
@@ -3939,13 +4033,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
 	if (INTEL_GEN(dev_priv) >= 12)
 		intel_ddi_disable_pipe_clock(old_crtc_state);
 
-	intel_edp_panel_vdd_on(intel_dp);
-	intel_edp_panel_off(intel_dp);
+	intel_pps_vdd_on(intel_dp);
+	intel_pps_off(intel_dp);
 
 	if (!intel_phy_is_tc(dev_priv, phy) ||
 	    dig_port->tc_mode != TC_PORT_TBT_ALT)
-		intel_display_power_put_unchecked(dev_priv,
-						  dig_port->ddi_io_power_domain);
+		intel_display_power_put(dev_priv,
+					dig_port->ddi_io_power_domain,
+					fetch_and_zero(&dig_port->ddi_io_wakeref));
 
 	intel_ddi_clk_disable(encoder);
 }
@@ -3966,8 +4061,9 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
 
 	intel_disable_ddi_buf(encoder, old_crtc_state);
 
-	intel_display_power_put_unchecked(dev_priv,
-					  dig_port->ddi_io_power_domain);
+	intel_display_power_put(dev_priv,
+				dig_port->ddi_io_power_domain,
+				fetch_and_zero(&dig_port->ddi_io_wakeref));
 
 	intel_ddi_clk_disable(encoder);
 
@@ -3989,6 +4085,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
 
 		intel_disable_pipe(old_crtc_state);
 
+		intel_vrr_disable(old_crtc_state);
+
 		intel_ddi_disable_transcoder_func(old_crtc_state);
 
 		intel_dsc_disable(old_crtc_state);
@@ -4040,8 +4138,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
 		icl_unmap_plls_to_ports(encoder);
 
 	if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
-		intel_display_power_put_unchecked(dev_priv,
-						  intel_ddi_main_link_aux_domain(dig_port));
+		intel_display_power_put(dev_priv,
+					intel_ddi_main_link_aux_domain(dig_port),
+					fetch_and_zero(&dig_port->aux_wakeref));
 
 	if (is_tc_port)
 		intel_tc_port_put_link(dig_port);
@@ -4126,6 +4225,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	enum port port = encoder->port;
 
 	if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -4133,7 +4233,10 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
 
 	intel_edp_backlight_on(crtc_state, conn_state);
 	intel_psr_enable(intel_dp, crtc_state, conn_state);
-	intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+	if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
+		intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
 	intel_edp_drrs_enable(intel_dp, crtc_state);
 
 	if (crtc_state->has_audio)
@@ -4237,6 +4340,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
 	if (!crtc_state->bigjoiner_slave)
 		intel_ddi_enable_transcoder_func(encoder, crtc_state);
 
+	intel_vrr_enable(encoder, crtc_state);
+
 	intel_enable_pipe(crtc_state);
 
 	intel_crtc_vblank_on(crtc_state);
@@ -4250,7 +4355,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
 	if (conn_state->content_protection ==
 	    DRM_MODE_CONTENT_PROTECTION_DESIRED)
 		intel_hdcp_enable(to_intel_connector(conn_state->connector),
-				  crtc_state->cpu_transcoder,
+				  crtc_state,
 				  (u8)conn_state->hdcp_content_type);
 }
 
@@ -4273,6 +4378,9 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
 	/* Disable the decompression in DP Sink */
 	intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
 					      false);
+	/* Disable Ignore_MSA bit in DP Sink */
+	intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
+						      false);
 }
 
 static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
@@ -4378,9 +4486,12 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
 	if (is_tc_port)
 		intel_tc_port_get_link(dig_port, crtc_state->lane_count);
 
-	if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
-		intel_display_power_get(dev_priv,
-					intel_ddi_main_link_aux_domain(dig_port));
+	if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) {
+		drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+		dig_port->aux_wakeref =
+			intel_display_power_get(dev_priv,
+						intel_ddi_main_link_aux_domain(dig_port));
+	}
 
 	if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
 		/*
@@ -4593,6 +4704,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	u32 temp, flags = 0;
 
 	temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4667,9 +4779,12 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
 				    pipe_config->fec_enable);
 		}
 
-		pipe_config->infoframes.enable |=
-			intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
+		if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+			pipe_config->infoframes.enable |=
+				intel_lspcon_infoframes_enabled(encoder, pipe_config);
+		else
+			pipe_config->infoframes.enable |=
+				intel_hdmi_infoframes_enabled(encoder, pipe_config);
 		break;
 	case TRANS_DDI_MODE_SELECT_DP_MST:
 		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -4956,6 +5071,8 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
 	intel_dp_encoder_flush_work(encoder);
 
 	drm_encoder_cleanup(encoder);
+	if (dig_port)
+		kfree(dig_port->hdcp_port_data.streams);
 	kfree(dig_port);
 }
 
@@ -5109,12 +5226,20 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &dig_port->dp;
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
 	bool is_tc = intel_phy_is_tc(i915, phy);
 	struct drm_modeset_acquire_ctx ctx;
 	enum intel_hotplug_state state;
 	int ret;
 
+	if (intel_dp->compliance.test_active &&
+	    intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+		intel_dp_phy_test(encoder);
+		/* just do the PHY test and nothing else */
+		return INTEL_HOTPLUG_UNCHANGED;
+	}
+
 	state = intel_encoder_hotplug(encoder, connector);
 
 	drm_modeset_acquire_init(&ctx, 0);
@@ -5272,8 +5397,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
 static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
 {
 	return i915->hti_state & HDPORT_ENABLED &&
-		(i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
-		 i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
+	       i915->hti_state & HDPORT_DDI_USED(phy);
 }
 
 static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index dcc711cfe4fe71f9057608c4dbb9bc3084b090b4..a4dd815c000012ef7c300c5be676127a5f2633bb 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -50,9 +50,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *crtc_state);
 u32 ddi_signal_levels(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *crtc_state);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
-				     enum transcoder cpu_transcoder,
-				     bool enable);
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+			       enum transcoder cpu_transcoder,
+			       bool enable, u32 hdcp_mask);
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
 
 #endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 61be6bed916291d614245b10a8cbe1299ff7eda1..7ea1e5b487b61cf1c44badfba3df507c820a84a1 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -45,8 +45,10 @@
 
 #include "display/intel_crt.h"
 #include "display/intel_ddi.h"
+#include "display/intel_display_debugfs.h"
 #include "display/intel_dp.h"
 #include "display/intel_dp_mst.h"
+#include "display/intel_dpll.h"
 #include "display/intel_dpll_mgr.h"
 #include "display/intel_dsi.h"
 #include "display/intel_dvo.h"
@@ -56,6 +58,9 @@
 #include "display/intel_sdvo.h"
 #include "display/intel_tv.h"
 #include "display/intel_vdsc.h"
+#include "display/intel_vrr.h"
+
+#include "gem/i915_gem_object.h"
 
 #include "gt/intel_rps.h"
 
@@ -67,10 +72,12 @@
 #include "intel_bw.h"
 #include "intel_cdclk.h"
 #include "intel_color.h"
+#include "intel_crtc.h"
 #include "intel_csr.h"
 #include "intel_display_types.h"
 #include "intel_dp_link_training.h"
 #include "intel_fbc.h"
+#include "intel_fdi.h"
 #include "intel_fbdev.h"
 #include "intel_fifo_underrun.h"
 #include "intel_frontbuffer.h"
@@ -79,72 +86,14 @@
 #include "intel_overlay.h"
 #include "intel_pipe_crc.h"
 #include "intel_pm.h"
+#include "intel_pps.h"
 #include "intel_psr.h"
 #include "intel_quirks.h"
 #include "intel_sideband.h"
 #include "intel_sprite.h"
 #include "intel_tc.h"
 #include "intel_vga.h"
-
-/* Primary plane formats for gen <= 3 */
-static const u32 i8xx_primary_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_XRGB1555,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-};
-
-/* Primary plane formats for ivb (no fp16 due to hw issue) */
-static const u32 ivb_primary_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-};
-
-/* Primary plane formats for gen >= 4, except ivb */
-static const u32 i965_primary_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_XBGR16161616F,
-};
-
-/* Primary plane formats for vlv/chv */
-static const u32 vlv_primary_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_ARGB2101010,
-	DRM_FORMAT_ABGR2101010,
-	DRM_FORMAT_XBGR16161616F,
-};
-
-static const u64 i9xx_format_modifiers[] = {
-	I915_FORMAT_MOD_X_TILED,
-	DRM_FORMAT_MOD_LINEAR,
-	DRM_FORMAT_MOD_INVALID
-};
-
-/* Cursor formats */
-static const u32 intel_cursor_formats[] = {
-	DRM_FORMAT_ARGB8888,
-};
-
-static const u64 cursor_format_modifiers[] = {
-	DRM_FORMAT_MOD_LINEAR,
-	DRM_FORMAT_MOD_INVALID
-};
+#include "i9xx_plane.h"
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_state *pipe_config);
@@ -171,18 +120,6 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
 static void intel_modeset_setup_hw_state(struct drm_device *dev,
 					 struct drm_modeset_acquire_ctx *ctx);
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
-
-struct intel_limit {
-	struct {
-		int min, max;
-	} dot, vco, n, m, m1, m2, p, p1;
-
-	struct {
-		int dot_limit;
-		int p2_slow, p2_fast;
-	} p2;
-};
 
 /* returns HPLL frequency in kHz */
 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -241,281 +178,6 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
 		dev_priv->czclk_freq);
 }
 
-/* units of 100MHz */
-static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
-			       const struct intel_crtc_state *pipe_config)
-{
-	if (HAS_DDI(dev_priv))
-		return pipe_config->port_clock; /* SPLL */
-	else
-		return dev_priv->fdi_pll_freq;
-}
-
-static const struct intel_limit intel_limits_i8xx_dac = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 908000, .max = 1512000 },
-	.n = { .min = 2, .max = 16 },
-	.m = { .min = 96, .max = 140 },
-	.m1 = { .min = 18, .max = 26 },
-	.m2 = { .min = 6, .max = 16 },
-	.p = { .min = 4, .max = 128 },
-	.p1 = { .min = 2, .max = 33 },
-	.p2 = { .dot_limit = 165000,
-		.p2_slow = 4, .p2_fast = 2 },
-};
-
-static const struct intel_limit intel_limits_i8xx_dvo = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 908000, .max = 1512000 },
-	.n = { .min = 2, .max = 16 },
-	.m = { .min = 96, .max = 140 },
-	.m1 = { .min = 18, .max = 26 },
-	.m2 = { .min = 6, .max = 16 },
-	.p = { .min = 4, .max = 128 },
-	.p1 = { .min = 2, .max = 33 },
-	.p2 = { .dot_limit = 165000,
-		.p2_slow = 4, .p2_fast = 4 },
-};
-
-static const struct intel_limit intel_limits_i8xx_lvds = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 908000, .max = 1512000 },
-	.n = { .min = 2, .max = 16 },
-	.m = { .min = 96, .max = 140 },
-	.m1 = { .min = 18, .max = 26 },
-	.m2 = { .min = 6, .max = 16 },
-	.p = { .min = 4, .max = 128 },
-	.p1 = { .min = 1, .max = 6 },
-	.p2 = { .dot_limit = 165000,
-		.p2_slow = 14, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_i9xx_sdvo = {
-	.dot = { .min = 20000, .max = 400000 },
-	.vco = { .min = 1400000, .max = 2800000 },
-	.n = { .min = 1, .max = 6 },
-	.m = { .min = 70, .max = 120 },
-	.m1 = { .min = 8, .max = 18 },
-	.m2 = { .min = 3, .max = 7 },
-	.p = { .min = 5, .max = 80 },
-	.p1 = { .min = 1, .max = 8 },
-	.p2 = { .dot_limit = 200000,
-		.p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_i9xx_lvds = {
-	.dot = { .min = 20000, .max = 400000 },
-	.vco = { .min = 1400000, .max = 2800000 },
-	.n = { .min = 1, .max = 6 },
-	.m = { .min = 70, .max = 120 },
-	.m1 = { .min = 8, .max = 18 },
-	.m2 = { .min = 3, .max = 7 },
-	.p = { .min = 7, .max = 98 },
-	.p1 = { .min = 1, .max = 8 },
-	.p2 = { .dot_limit = 112000,
-		.p2_slow = 14, .p2_fast = 7 },
-};
-
-
-static const struct intel_limit intel_limits_g4x_sdvo = {
-	.dot = { .min = 25000, .max = 270000 },
-	.vco = { .min = 1750000, .max = 3500000},
-	.n = { .min = 1, .max = 4 },
-	.m = { .min = 104, .max = 138 },
-	.m1 = { .min = 17, .max = 23 },
-	.m2 = { .min = 5, .max = 11 },
-	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 1, .max = 3},
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 10,
-		.p2_fast = 10
-	},
-};
-
-static const struct intel_limit intel_limits_g4x_hdmi = {
-	.dot = { .min = 22000, .max = 400000 },
-	.vco = { .min = 1750000, .max = 3500000},
-	.n = { .min = 1, .max = 4 },
-	.m = { .min = 104, .max = 138 },
-	.m1 = { .min = 16, .max = 23 },
-	.m2 = { .min = 5, .max = 11 },
-	.p = { .min = 5, .max = 80 },
-	.p1 = { .min = 1, .max = 8},
-	.p2 = { .dot_limit = 165000,
-		.p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
-	.dot = { .min = 20000, .max = 115000 },
-	.vco = { .min = 1750000, .max = 3500000 },
-	.n = { .min = 1, .max = 3 },
-	.m = { .min = 104, .max = 138 },
-	.m1 = { .min = 17, .max = 23 },
-	.m2 = { .min = 5, .max = 11 },
-	.p = { .min = 28, .max = 112 },
-	.p1 = { .min = 2, .max = 8 },
-	.p2 = { .dot_limit = 0,
-		.p2_slow = 14, .p2_fast = 14
-	},
-};
-
-static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
-	.dot = { .min = 80000, .max = 224000 },
-	.vco = { .min = 1750000, .max = 3500000 },
-	.n = { .min = 1, .max = 3 },
-	.m = { .min = 104, .max = 138 },
-	.m1 = { .min = 17, .max = 23 },
-	.m2 = { .min = 5, .max = 11 },
-	.p = { .min = 14, .max = 42 },
-	.p1 = { .min = 2, .max = 6 },
-	.p2 = { .dot_limit = 0,
-		.p2_slow = 7, .p2_fast = 7
-	},
-};
-
-static const struct intel_limit pnv_limits_sdvo = {
-	.dot = { .min = 20000, .max = 400000},
-	.vco = { .min = 1700000, .max = 3500000 },
-	/* Pineview's Ncounter is a ring counter */
-	.n = { .min = 3, .max = 6 },
-	.m = { .min = 2, .max = 256 },
-	/* Pineview only has one combined m divider, which we treat as m2. */
-	.m1 = { .min = 0, .max = 0 },
-	.m2 = { .min = 0, .max = 254 },
-	.p = { .min = 5, .max = 80 },
-	.p1 = { .min = 1, .max = 8 },
-	.p2 = { .dot_limit = 200000,
-		.p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit pnv_limits_lvds = {
-	.dot = { .min = 20000, .max = 400000 },
-	.vco = { .min = 1700000, .max = 3500000 },
-	.n = { .min = 3, .max = 6 },
-	.m = { .min = 2, .max = 256 },
-	.m1 = { .min = 0, .max = 0 },
-	.m2 = { .min = 0, .max = 254 },
-	.p = { .min = 7, .max = 112 },
-	.p1 = { .min = 1, .max = 8 },
-	.p2 = { .dot_limit = 112000,
-		.p2_slow = 14, .p2_fast = 14 },
-};
-
-/* Ironlake / Sandybridge
- *
- * We calculate clock using (register_value + 2) for N/M1/M2, so here
- * the range value for them is (actual_value - 2).
- */
-static const struct intel_limit ilk_limits_dac = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 1760000, .max = 3510000 },
-	.n = { .min = 1, .max = 5 },
-	.m = { .min = 79, .max = 127 },
-	.m1 = { .min = 12, .max = 22 },
-	.m2 = { .min = 5, .max = 9 },
-	.p = { .min = 5, .max = 80 },
-	.p1 = { .min = 1, .max = 8 },
-	.p2 = { .dot_limit = 225000,
-		.p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit ilk_limits_single_lvds = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 1760000, .max = 3510000 },
-	.n = { .min = 1, .max = 3 },
-	.m = { .min = 79, .max = 118 },
-	.m1 = { .min = 12, .max = 22 },
-	.m2 = { .min = 5, .max = 9 },
-	.p = { .min = 28, .max = 112 },
-	.p1 = { .min = 2, .max = 8 },
-	.p2 = { .dot_limit = 225000,
-		.p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 1760000, .max = 3510000 },
-	.n = { .min = 1, .max = 3 },
-	.m = { .min = 79, .max = 127 },
-	.m1 = { .min = 12, .max = 22 },
-	.m2 = { .min = 5, .max = 9 },
-	.p = { .min = 14, .max = 56 },
-	.p1 = { .min = 2, .max = 8 },
-	.p2 = { .dot_limit = 225000,
-		.p2_slow = 7, .p2_fast = 7 },
-};
-
-/* LVDS 100mhz refclk limits. */
-static const struct intel_limit ilk_limits_single_lvds_100m = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 1760000, .max = 3510000 },
-	.n = { .min = 1, .max = 2 },
-	.m = { .min = 79, .max = 126 },
-	.m1 = { .min = 12, .max = 22 },
-	.m2 = { .min = 5, .max = 9 },
-	.p = { .min = 28, .max = 112 },
-	.p1 = { .min = 2, .max = 8 },
-	.p2 = { .dot_limit = 225000,
-		.p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds_100m = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 1760000, .max = 3510000 },
-	.n = { .min = 1, .max = 3 },
-	.m = { .min = 79, .max = 126 },
-	.m1 = { .min = 12, .max = 22 },
-	.m2 = { .min = 5, .max = 9 },
-	.p = { .min = 14, .max = 42 },
-	.p1 = { .min = 2, .max = 6 },
-	.p2 = { .dot_limit = 225000,
-		.p2_slow = 7, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_vlv = {
-	 /*
-	  * These are the data rate limits (measured in fast clocks)
-	  * since those are the strictest limits we have. The fast
-	  * clock and actual rate limits are more relaxed, so checking
-	  * them would make no difference.
-	  */
-	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
-	.vco = { .min = 4000000, .max = 6000000 },
-	.n = { .min = 1, .max = 7 },
-	.m1 = { .min = 2, .max = 3 },
-	.m2 = { .min = 11, .max = 156 },
-	.p1 = { .min = 2, .max = 3 },
-	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
-};
-
-static const struct intel_limit intel_limits_chv = {
-	/*
-	 * These are the data rate limits (measured in fast clocks)
-	 * since those are the strictest limits we have.  The fast
-	 * clock and actual rate limits are more relaxed, so checking
-	 * them would make no difference.
-	 */
-	.dot = { .min = 25000 * 5, .max = 540000 * 5},
-	.vco = { .min = 4800000, .max = 6480000 },
-	.n = { .min = 1, .max = 1 },
-	.m1 = { .min = 2, .max = 2 },
-	.m2 = { .min = 24 << 22, .max = 175 << 22 },
-	.p1 = { .min = 2, .max = 4 },
-	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_bxt = {
-	/* FIXME: find real dot limits */
-	.dot = { .min = 0, .max = INT_MAX },
-	.vco = { .min = 4800000, .max = 6700000 },
-	.n = { .min = 1, .max = 1 },
-	.m1 = { .min = 2, .max = 2 },
-	/* FIXME: find real m2 limits */
-	.m2 = { .min = 2 << 22, .max = 255 << 22 },
-	.p1 = { .min = 2, .max = 4 },
-	.p2 = { .p2_slow = 1, .p2_fast = 20 },
-};
-
 /* WA Display #0827: Gen9:all */
 static void
 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -541,12 +203,6 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
 }
 
-static bool
-needs_modeset(const struct intel_crtc_state *state)
-{
-	return drm_atomic_crtc_needs_modeset(&state->uapi);
-}
-
 static bool
 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
 {
@@ -566,482 +222,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
 		is_trans_port_sync_slave(crtc_state);
 }
 
-/*
- * Platform specific helpers to calculate the port PLL loopback- (clock.m),
- * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
- * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
- * The helpers' return value is the rate of the clock that is fed to the
- * display engine's pipe which can be the above fast dot clock rate or a
- * divided-down version of it.
- */
-/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
-{
-	clock->m = clock->m2 + 2;
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return 0;
-	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-	return clock->dot;
-}
-
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
-{
-	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
-}
-
-static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
-{
-	clock->m = i9xx_dpll_compute_m(clock);
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
-		return 0;
-	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-	return clock->dot;
-}
-
-static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
-{
-	clock->m = clock->m1 * clock->m2;
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return 0;
-	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-	return clock->dot / 5;
-}
-
-int chv_calc_dpll_params(int refclk, struct dpll *clock)
-{
-	clock->m = clock->m1 * clock->m2;
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return 0;
-	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
-					   clock->n << 22);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-	return clock->dot / 5;
-}
-
-/*
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
-			       const struct intel_limit *limit,
-			       const struct dpll *clock)
-{
-	if (clock->n < limit->n.min || limit->n.max < clock->n)
-		return false;
-	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
-		return false;
-	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
-		return false;
-	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
-		return false;
-
-	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
-	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
-		if (clock->m1 <= clock->m2)
-			return false;
-
-	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-	    !IS_GEN9_LP(dev_priv)) {
-		if (clock->p < limit->p.min || limit->p.max < clock->p)
-			return false;
-		if (clock->m < limit->m.min || limit->m.max < clock->m)
-			return false;
-	}
-
-	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
-		return false;
-	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
-	 * connector, etc., rather than just a single range.
-	 */
-	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
-		return false;
-
-	return true;
-}
-
-static int
-i9xx_select_p2_div(const struct intel_limit *limit,
-		   const struct intel_crtc_state *crtc_state,
-		   int target)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		/*
-		 * For LVDS just rely on its current settings for dual-channel.
-		 * We haven't figured out how to reliably set up different
-		 * single/dual channel state, if we even can.
-		 */
-		if (intel_is_dual_link_lvds(dev_priv))
-			return limit->p2.p2_fast;
-		else
-			return limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			return limit->p2.p2_slow;
-		else
-			return limit->p2.p2_fast;
-	}
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-i9xx_find_best_dpll(const struct intel_limit *limit,
-		    struct intel_crtc_state *crtc_state,
-		    int target, int refclk, struct dpll *match_clock,
-		    struct dpll *best_clock)
-{
-	struct drm_device *dev = crtc_state->uapi.crtc->dev;
-	struct dpll clock;
-	int err = target;
-
-	memset(best_clock, 0, sizeof(*best_clock));
-
-	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
-	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
-	     clock.m1++) {
-		for (clock.m2 = limit->m2.min;
-		     clock.m2 <= limit->m2.max; clock.m2++) {
-			if (clock.m2 >= clock.m1)
-				break;
-			for (clock.n = limit->n.min;
-			     clock.n <= limit->n.max; clock.n++) {
-				for (clock.p1 = limit->p1.min;
-					clock.p1 <= limit->p1.max; clock.p1++) {
-					int this_err;
-
-					i9xx_calc_dpll_params(refclk, &clock);
-					if (!intel_pll_is_valid(to_i915(dev),
-								limit,
-								&clock))
-						continue;
-					if (match_clock &&
-					    clock.p != match_clock->p)
-						continue;
-
-					this_err = abs(clock.dot - target);
-					if (this_err < err) {
-						*best_clock = clock;
-						err = this_err;
-					}
-				}
-			}
-		}
-	}
-
-	return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-pnv_find_best_dpll(const struct intel_limit *limit,
-		   struct intel_crtc_state *crtc_state,
-		   int target, int refclk, struct dpll *match_clock,
-		   struct dpll *best_clock)
-{
-	struct drm_device *dev = crtc_state->uapi.crtc->dev;
-	struct dpll clock;
-	int err = target;
-
-	memset(best_clock, 0, sizeof(*best_clock));
-
-	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
-	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
-	     clock.m1++) {
-		for (clock.m2 = limit->m2.min;
-		     clock.m2 <= limit->m2.max; clock.m2++) {
-			for (clock.n = limit->n.min;
-			     clock.n <= limit->n.max; clock.n++) {
-				for (clock.p1 = limit->p1.min;
-					clock.p1 <= limit->p1.max; clock.p1++) {
-					int this_err;
-
-					pnv_calc_dpll_params(refclk, &clock);
-					if (!intel_pll_is_valid(to_i915(dev),
-								limit,
-								&clock))
-						continue;
-					if (match_clock &&
-					    clock.p != match_clock->p)
-						continue;
-
-					this_err = abs(clock.dot - target);
-					if (this_err < err) {
-						*best_clock = clock;
-						err = this_err;
-					}
-				}
-			}
-		}
-	}
-
-	return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-g4x_find_best_dpll(const struct intel_limit *limit,
-		   struct intel_crtc_state *crtc_state,
-		   int target, int refclk, struct dpll *match_clock,
-		   struct dpll *best_clock)
-{
-	struct drm_device *dev = crtc_state->uapi.crtc->dev;
-	struct dpll clock;
-	int max_n;
-	bool found = false;
-	/* approximately equals target * 0.00585 */
-	int err_most = (target >> 8) + (target >> 9);
-
-	memset(best_clock, 0, sizeof(*best_clock));
-
-	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
-	max_n = limit->n.max;
-	/* based on hardware requirement, prefer smaller n to precision */
-	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-		/* based on hardware requirement, prefere larger m1,m2 */
-		for (clock.m1 = limit->m1.max;
-		     clock.m1 >= limit->m1.min; clock.m1--) {
-			for (clock.m2 = limit->m2.max;
-			     clock.m2 >= limit->m2.min; clock.m2--) {
-				for (clock.p1 = limit->p1.max;
-				     clock.p1 >= limit->p1.min; clock.p1--) {
-					int this_err;
-
-					i9xx_calc_dpll_params(refclk, &clock);
-					if (!intel_pll_is_valid(to_i915(dev),
-								limit,
-								&clock))
-						continue;
-
-					this_err = abs(clock.dot - target);
-					if (this_err < err_most) {
-						*best_clock = clock;
-						err_most = this_err;
-						max_n = clock.n;
-						found = true;
-					}
-				}
-			}
-		}
-	}
-	return found;
-}
-
-/*
- * Check if the calculated PLL configuration is more optimal compared to the
- * best configuration and error found so far. Return the calculated error.
- */
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
-			       const struct dpll *calculated_clock,
-			       const struct dpll *best_clock,
-			       unsigned int best_error_ppm,
-			       unsigned int *error_ppm)
-{
-	/*
-	 * For CHV ignore the error and consider only the P value.
-	 * Prefer a bigger P value based on HW requirements.
-	 */
-	if (IS_CHERRYVIEW(to_i915(dev))) {
-		*error_ppm = 0;
-
-		return calculated_clock->p > best_clock->p;
-	}
-
-	if (drm_WARN_ON_ONCE(dev, !target_freq))
-		return false;
-
-	*error_ppm = div_u64(1000000ULL *
-				abs(target_freq - calculated_clock->dot),
-			     target_freq);
-	/*
-	 * Prefer a better P value over a better (smaller) error if the error
-	 * is small. Ensure this preference for future configurations too by
-	 * setting the error to 0.
-	 */
-	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
-		*error_ppm = 0;
-
-		return true;
-	}
-
-	return *error_ppm + 10 < best_error_ppm;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-vlv_find_best_dpll(const struct intel_limit *limit,
-		   struct intel_crtc_state *crtc_state,
-		   int target, int refclk, struct dpll *match_clock,
-		   struct dpll *best_clock)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	struct dpll clock;
-	unsigned int bestppm = 1000000;
-	/* min update 19.2 MHz */
-	int max_n = min(limit->n.max, refclk / 19200);
-	bool found = false;
-
-	target *= 5; /* fast clock */
-
-	memset(best_clock, 0, sizeof(*best_clock));
-
-	/* based on hardware requirement, prefer smaller n to precision */
-	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
-			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
-				clock.p = clock.p1 * clock.p2;
-				/* based on hardware requirement, prefer bigger m1,m2 values */
-				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-					unsigned int ppm;
-
-					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
-								     refclk * clock.m1);
-
-					vlv_calc_dpll_params(refclk, &clock);
-
-					if (!intel_pll_is_valid(to_i915(dev),
-								limit,
-								&clock))
-						continue;
-
-					if (!vlv_PLL_is_optimal(dev, target,
-								&clock,
-								best_clock,
-								bestppm, &ppm))
-						continue;
-
-					*best_clock = clock;
-					bestppm = ppm;
-					found = true;
-				}
-			}
-		}
-	}
-
-	return found;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-chv_find_best_dpll(const struct intel_limit *limit,
-		   struct intel_crtc_state *crtc_state,
-		   int target, int refclk, struct dpll *match_clock,
-		   struct dpll *best_clock)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	unsigned int best_error_ppm;
-	struct dpll clock;
-	u64 m2;
-	int found = false;
-
-	memset(best_clock, 0, sizeof(*best_clock));
-	best_error_ppm = 1000000;
-
-	/*
-	 * Based on hardware doc, the n always set to 1, and m1 always
-	 * set to 2.  If requires to support 200Mhz refclk, we need to
-	 * revisit this because n may not 1 anymore.
-	 */
-	clock.n = 1, clock.m1 = 2;
-	target *= 5;	/* fast clock */
-
-	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-		for (clock.p2 = limit->p2.p2_fast;
-				clock.p2 >= limit->p2.p2_slow;
-				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
-			unsigned int error_ppm;
-
-			clock.p = clock.p1 * clock.p2;
-
-			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
-						   refclk * clock.m1);
-
-			if (m2 > INT_MAX/clock.m1)
-				continue;
-
-			clock.m2 = m2;
-
-			chv_calc_dpll_params(refclk, &clock);
-
-			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
-				continue;
-
-			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
-						best_error_ppm, &error_ppm))
-				continue;
-
-			*best_clock = clock;
-			best_error_ppm = error_ppm;
-			found = true;
-		}
-	}
-
-	return found;
-}
-
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
-			struct dpll *best_clock)
-{
-	int refclk = 100000;
-	const struct intel_limit *limit = &intel_limits_bxt;
-
-	return chv_find_best_dpll(limit, crtc_state,
-				  crtc_state->port_clock, refclk,
-				  NULL, best_clock);
-}
-
 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
 				    enum pipe pipe)
 {
@@ -1315,12 +495,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
 		assert_plane_disabled(plane);
 }
 
-static void assert_vblank_disabled(struct drm_crtc *crtc)
-{
-	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
-		drm_crtc_vblank_put(crtc);
-}
-
 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
 				    enum pipe pipe)
 {
@@ -1672,7 +846,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 		/* Configure frame start delay to match the CPU */
 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
-		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 		intel_de_write(dev_priv, reg, val);
 	}
 
@@ -1683,7 +857,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 	if (HAS_PCH_IBX(dev_priv)) {
 		/* Configure frame start delay to match the CPU */
 		val &= ~TRANS_FRAME_START_DELAY_MASK;
-		val |= TRANS_FRAME_START_DELAY(0);
+		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 
 		/*
 		 * Make the BPC in transcoder be consistent with
@@ -1728,7 +902,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 	/* Configure frame start delay to match the CPU */
 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
-	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
 
 	val = TRANS_ENABLE;
@@ -1805,67 +979,18 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
 		return crtc->pipe;
 }
 
-static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	u32 mode_flags = crtc->mode_flags;
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t reg;
+	u32 val;
 
-	/*
-	 * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
-	 * have updated at the beginning of TE, if we want to use
-	 * the hw counter, then we would find it updated in only
-	 * the next TE, hence switching to sw counter.
-	 */
-	if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
-		return 0;
-
-	/*
-	 * On i965gm the hardware frame counter reads
-	 * zero when the TV encoder is enabled :(
-	 */
-	if (IS_I965GM(dev_priv) &&
-	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
-		return 0;
-
-	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		return 0xffffffff; /* full 32 bit counter */
-	else if (INTEL_GEN(dev_priv) >= 3)
-		return 0xffffff; /* only 24 bits of frame count */
-	else
-		return 0; /* Gen2 doesn't have a hardware frame counter */
-}
-
-void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
-	assert_vblank_disabled(&crtc->base);
-	drm_crtc_set_max_vblank_count(&crtc->base,
-				      intel_crtc_max_vblank_count(crtc_state));
-	drm_crtc_vblank_on(&crtc->base);
-}
-
-void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
-	drm_crtc_vblank_off(&crtc->base);
-	assert_vblank_disabled(&crtc->base);
-}
-
-void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
-	enum pipe pipe = crtc->pipe;
-	i915_reg_t reg;
-	u32 val;
-
-	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
-
-	assert_planes_disabled(crtc);
+	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
+
+	assert_planes_disabled(crtc);
 
 	/*
 	 * A pipe without a PLL won't actually be able to drive bits from
@@ -1968,8 +1093,8 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
 static bool is_gen12_ccs_modifier(u64 modifier)
 {
 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-
 }
 
 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
@@ -1977,6 +1102,12 @@ static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
 	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
 }
 
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+	return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+	       plane == 2;
+}
+
 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
 {
 	if (is_ccs_modifier(fb->modifier))
@@ -1998,6 +1129,9 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
 	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
 		    ccs_plane < fb->format->num_planes / 2);
 
+	if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+		return 0;
+
 	return ccs_plane - fb->format->num_planes / 2;
 }
 
@@ -2016,7 +1150,7 @@ int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
 
 bool
 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
-				    uint64_t modifier)
+				    u64 modifier)
 {
 	return info->is_yuv &&
 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
@@ -2048,6 +1182,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
 			return 128;
 		fallthrough;
 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
 		if (is_ccs_plane(fb, color_plane))
 			return 64;
@@ -2182,8 +1317,13 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
 		return 0;
 }
 
-static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
-					 int color_plane)
+static bool has_async_flips(struct drm_i915_private *i915)
+{
+	return INTEL_GEN(i915) >= 5;
+}
+
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+				  int color_plane)
 {
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 
@@ -2196,7 +1336,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
 	case DRM_FORMAT_MOD_LINEAR:
 		return intel_linear_alignment(dev_priv);
 	case I915_FORMAT_MOD_X_TILED:
-		if (INTEL_GEN(dev_priv) >= 9)
+		if (has_async_flips(dev_priv))
 			return 256 * 1024;
 		return 0;
 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
@@ -2204,6 +1344,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
 			return intel_tile_row_size(fb, color_plane);
 		fallthrough;
 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
 		return 16 * 1024;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -2449,10 +1590,10 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
  * Adjust the tile offset by moving the difference into
  * the x/y offsets.
  */
-static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
-					     const struct intel_plane_state *state,
-					     int color_plane,
-					     u32 old_offset, u32 new_offset)
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+				      const struct intel_plane_state *state,
+				      int color_plane,
+				      u32 old_offset, u32 new_offset)
 {
 	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
 					   state->hw.rotation,
@@ -2529,9 +1670,9 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
 	return offset_aligned;
 }
 
-static u32 intel_plane_compute_aligned_offset(int *x, int *y,
-					      const struct intel_plane_state *state,
-					      int color_plane)
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+				       const struct intel_plane_state *state,
+				       int color_plane)
 {
 	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
@@ -2605,6 +1746,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
 	case I915_FORMAT_MOD_Y_TILED:
 	case I915_FORMAT_MOD_Y_TILED_CCS:
 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
 		return I915_TILING_Y;
 	default:
@@ -2683,6 +1825,25 @@ static const struct drm_format_info gen12_ccs_formats[] = {
 	  .hsub = 2, .vsub = 2, .is_yuv = true },
 };
 
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+	  .hsub = 1, .vsub = 1, },
+	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+	  .hsub = 1, .vsub = 1, },
+	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+	  .hsub = 1, .vsub = 1, .has_alpha = true },
+	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+	  .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
 static const struct drm_format_info *
 lookup_format_info(const struct drm_format_info formats[],
 		   int num_formats, u32 format)
@@ -2711,6 +1872,10 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
 		return lookup_format_info(gen12_ccs_formats,
 					  ARRAY_SIZE(gen12_ccs_formats),
 					  cmd->pixel_format);
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+		return lookup_format_info(gen12_ccs_cc_formats,
+					  ARRAY_SIZE(gen12_ccs_cc_formats),
+					  cmd->pixel_format);
 	default:
 		return NULL;
 	}
@@ -2719,6 +1884,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
 bool is_ccs_modifier(u64 modifier)
 {
 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
 	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -2937,7 +2103,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
 	int ccs_x, ccs_y;
 	int main_x, main_y;
 
-	if (!is_ccs_plane(fb, ccs_plane))
+	if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
 		return 0;
 
 	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
@@ -3064,6 +2230,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		int x, y;
 		int ret;
 
+		/*
+		 * Plane 2 of Render Compression with Clear Color fb modifier
+		 * is consumed by the driver and not passed to DE. Skip the
+		 * arithmetic related to alignment and offset calculation.
+		 */
+		if (is_gen12_ccs_cc_plane(fb, i)) {
+			if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+				continue;
+			else
+				return -EINVAL;
+		}
+
 		cpp = fb->format->cpp[i];
 		intel_fb_plane_dims(&width, &height, fb, i);
 
@@ -3268,7 +2446,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
 	}
 }
 
-static int
+int
 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
 {
 	const struct intel_framebuffer *fb =
@@ -3548,7 +2726,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
 }
 
-static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	struct drm_plane *plane;
@@ -3558,11 +2736,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
 	 * have been used on the same (or wrong) pipe. plane_mask uses
 	 * unique ids, hence we can use that to reconstruct active_planes.
 	 */
+	crtc_state->enabled_planes = 0;
 	crtc_state->active_planes = 0;
 
 	drm_for_each_plane_mask(plane, &dev_priv->drm,
-				crtc_state->uapi.plane_mask)
+				crtc_state->uapi.plane_mask) {
+		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+	}
 }
 
 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
@@ -3580,7 +2761,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 		    crtc->base.base.id, crtc->base.name);
 
 	intel_set_plane_visible(crtc_state, plane_state, false);
-	fixup_active_planes(crtc_state);
+	fixup_plane_bitmasks(crtc_state);
 	crtc_state->data_rate[plane->id] = 0;
 	crtc_state->min_cdclk[plane->id] = 0;
 
@@ -3610,12 +2791,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 	intel_disable_plane(plane, crtc_state);
 }
 
-static struct intel_frontbuffer *
-to_intel_frontbuffer(struct drm_framebuffer *fb)
-{
-	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
-}
-
 static void
 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 			     struct intel_initial_plane_config *plane_config)
@@ -3814,33 +2989,19 @@ static int intel_plane_max_height(struct intel_plane *plane,
 		return INT_MAX;
 }
 
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+				 int *x, int *y, u32 *offset)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	unsigned int rotation = plane_state->hw.rotation;
-	int x = plane_state->uapi.src.x1 >> 16;
-	int y = plane_state->uapi.src.y1 >> 16;
-	int w = drm_rect_width(&plane_state->uapi.src) >> 16;
-	int h = drm_rect_height(&plane_state->uapi.src) >> 16;
-	int min_width = intel_plane_min_width(plane, fb, 0, rotation);
-	int max_width = intel_plane_max_width(plane, fb, 0, rotation);
-	int max_height = intel_plane_max_height(plane, fb, 0, rotation);
-	int aux_plane = intel_main_to_aux_plane(fb, 0);
-	u32 aux_offset = plane_state->color_plane[aux_plane].offset;
-	u32 alignment, offset;
-
-	if (w > max_width || w < min_width || h > max_height) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
-			    w, h, min_width, max_width, max_height);
-		return -EINVAL;
-	}
+	const int aux_plane = intel_main_to_aux_plane(fb, 0);
+	const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
+	const u32 alignment = intel_surf_alignment(fb, 0);
+	const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
 
-	intel_add_fb_offsets(&x, &y, plane_state, 0);
-	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
-	alignment = intel_surf_alignment(fb, 0);
+	intel_add_fb_offsets(x, y, plane_state, 0);
+	*offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
 	if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
 		return -EINVAL;
 
@@ -3849,9 +3010,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 	 * main surface offset, and it must be non-negative. Make
 	 * sure that is what we will get.
 	 */
-	if (aux_plane && offset > aux_offset)
-		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
-							   offset, aux_offset & ~(alignment - 1));
+	if (aux_plane && *offset > aux_offset)
+		*offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+							    *offset,
+							    aux_offset & ~(alignment - 1));
 
 	/*
 	 * When using an X-tiled surface, the plane blows up
@@ -3862,18 +3024,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
 		int cpp = fb->format->cpp[0];
 
-		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
-			if (offset == 0) {
+		while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
+			if (*offset == 0) {
 				drm_dbg_kms(&dev_priv->drm,
 					    "Unable to find suitable display surface offset due to X-tiling\n");
 				return -EINVAL;
 			}
 
-			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
-								   offset, offset - alignment);
+			*offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+								    *offset,
+								    *offset - alignment);
 		}
 	}
 
+	return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	const unsigned int rotation = plane_state->hw.rotation;
+	int x = plane_state->uapi.src.x1 >> 16;
+	int y = plane_state->uapi.src.y1 >> 16;
+	const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+	const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+	const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+	const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+	const int aux_plane = intel_main_to_aux_plane(fb, 0);
+	const u32 alignment = intel_surf_alignment(fb, 0);
+	u32 offset;
+	int ret;
+
+	if (w > max_width || w < min_width || h > max_height) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+			    w, h, min_width, max_width, max_height);
+		return -EINVAL;
+	}
+
+	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+	if (ret)
+		return ret;
+
 	/*
 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
 	 * they match with the main surface x/y offsets.
@@ -3896,6 +3091,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 		}
 	}
 
+	drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
 	plane_state->color_plane[0].offset = offset;
 	plane_state->color_plane[0].x = x;
 	plane_state->color_plane[0].y = y;
@@ -3968,6 +3165,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 		}
 	}
 
+	drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
 	plane_state->color_plane[uv_plane].offset = offset;
 	plane_state->color_plane[uv_plane].x = x;
 	plane_state->color_plane[uv_plane].y = y;
@@ -3988,7 +3187,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
 		int hsub, vsub;
 		int x, y;
 
-		if (!is_ccs_plane(fb, ccs_plane))
+		if (!is_ccs_plane(fb, ccs_plane) ||
+		    is_gen12_ccs_cc_plane(fb, ccs_plane))
 			continue;
 
 		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@@ -4060,548 +3260,132 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
 	return 0;
 }
 
-static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
-			     const struct intel_plane_state *plane_state,
-			     unsigned int *num, unsigned int *den)
-{
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	unsigned int cpp = fb->format->cpp[0];
-
-	/*
-	 * g4x bspec says 64bpp pixel rate can't exceed 80%
-	 * of cdclk when the sprite plane is enabled on the
-	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
-	 * never allowed to exceed 80% of cdclk. Let's just go
-	 * with the ilk/snb limit always.
-	 */
-	if (cpp == 8) {
-		*num = 10;
-		*den = 8;
-	} else {
-		*num = 1;
-		*den = 1;
-	}
-}
-
-static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
-				const struct intel_plane_state *plane_state)
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
-	unsigned int pixel_rate;
-	unsigned int num, den;
-
-	/*
-	 * Note that crtc_state->pixel_rate accounts for both
-	 * horizontal and vertical panel fitter downscaling factors.
-	 * Pre-HSW bspec tells us to only consider the horizontal
-	 * downscaling factor here. We ignore that and just consider
-	 * both for simplicity.
-	 */
-	pixel_rate = crtc_state->pixel_rate;
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	unsigned long irqflags;
 
-	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	/* two pixels per clock with double wide pipe */
-	if (crtc_state->double_wide)
-		den *= 2;
+	intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
 
-	return DIV_ROUND_UP(pixel_rate * num, den);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-unsigned int
-i9xx_plane_max_stride(struct intel_plane *plane,
-		      u32 pixel_format, u64 modifier,
-		      unsigned int rotation)
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	int i;
 
-	if (!HAS_GMCH(dev_priv)) {
-		return 32*1024;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		if (modifier == I915_FORMAT_MOD_X_TILED)
-			return 16*1024;
-		else
-			return 32*1024;
-	} else if (INTEL_GEN(dev_priv) >= 3) {
-		if (modifier == I915_FORMAT_MOD_X_TILED)
-			return 8*1024;
-		else
-			return 16*1024;
-	} else {
-		if (plane->i9xx_plane == PLANE_C)
-			return 4*1024;
-		else
-			return 8*1024;
+	/* loop through and disable scalers that aren't in use */
+	for (i = 0; i < intel_crtc->num_scalers; i++) {
+		if (!scaler_state->scalers[i].in_use)
+			skl_detach_scaler(intel_crtc, i);
 	}
 }
 
-static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+					  int color_plane, unsigned int rotation)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 dspcntr = 0;
-
-	if (crtc_state->gamma_enable)
-		dspcntr |= DISPPLANE_GAMMA_ENABLE;
-
-	if (crtc_state->csc_enable)
-		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
-	if (INTEL_GEN(dev_priv) < 5)
-		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
-	return dspcntr;
+	/*
+	 * The stride is either expressed as a multiple of 64 bytes chunks for
+	 * linear buffers or in number of tiles for tiled buffers.
+	 */
+	if (is_surface_linear(fb, color_plane))
+		return 64;
+	else if (drm_rotation_90_or_270(rotation))
+		return intel_tile_height(fb, color_plane);
+	else
+		return intel_tile_width_bytes(fb, color_plane);
 }
 
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
-			  const struct intel_plane_state *plane_state)
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+		     int color_plane)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->uapi.plane->dev);
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int rotation = plane_state->hw.rotation;
-	u32 dspcntr;
+	u32 stride = plane_state->color_plane[color_plane].stride;
 
-	dspcntr = DISPLAY_PLANE_ENABLE;
+	if (color_plane >= fb->format->num_planes)
+		return 0;
 
-	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
-	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
+}
 
-	switch (fb->format->format) {
+static u32 skl_plane_ctl_format(u32 pixel_format)
+{
+	switch (pixel_format) {
 	case DRM_FORMAT_C8:
-		dspcntr |= DISPPLANE_8BPP;
-		break;
-	case DRM_FORMAT_XRGB1555:
-		dspcntr |= DISPPLANE_BGRX555;
-		break;
-	case DRM_FORMAT_ARGB1555:
-		dspcntr |= DISPPLANE_BGRA555;
-		break;
+		return PLANE_CTL_FORMAT_INDEXED;
 	case DRM_FORMAT_RGB565:
-		dspcntr |= DISPPLANE_BGRX565;
-		break;
-	case DRM_FORMAT_XRGB8888:
-		dspcntr |= DISPPLANE_BGRX888;
-		break;
+		return PLANE_CTL_FORMAT_RGB_565;
 	case DRM_FORMAT_XBGR8888:
-		dspcntr |= DISPPLANE_RGBX888;
-		break;
-	case DRM_FORMAT_ARGB8888:
-		dspcntr |= DISPPLANE_BGRA888;
-		break;
 	case DRM_FORMAT_ABGR8888:
-		dspcntr |= DISPPLANE_RGBA888;
-		break;
-	case DRM_FORMAT_XRGB2101010:
-		dspcntr |= DISPPLANE_BGRX101010;
-		break;
+		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		return PLANE_CTL_FORMAT_XRGB_8888;
 	case DRM_FORMAT_XBGR2101010:
-		dspcntr |= DISPPLANE_RGBX101010;
-		break;
-	case DRM_FORMAT_ARGB2101010:
-		dspcntr |= DISPPLANE_BGRA101010;
-		break;
 	case DRM_FORMAT_ABGR2101010:
-		dspcntr |= DISPPLANE_RGBA101010;
-		break;
+		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		return PLANE_CTL_FORMAT_XRGB_2101010;
 	case DRM_FORMAT_XBGR16161616F:
-		dspcntr |= DISPPLANE_RGBX161616;
-		break;
+	case DRM_FORMAT_ABGR16161616F:
+		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+	case DRM_FORMAT_XRGB16161616F:
+	case DRM_FORMAT_ARGB16161616F:
+		return PLANE_CTL_FORMAT_XRGB_16161616F;
+	case DRM_FORMAT_XYUV8888:
+		return PLANE_CTL_FORMAT_XYUV;
+	case DRM_FORMAT_YUYV:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+	case DRM_FORMAT_YVYU:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+	case DRM_FORMAT_UYVY:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+	case DRM_FORMAT_VYUY:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+	case DRM_FORMAT_NV12:
+		return PLANE_CTL_FORMAT_NV12;
+	case DRM_FORMAT_P010:
+		return PLANE_CTL_FORMAT_P010;
+	case DRM_FORMAT_P012:
+		return PLANE_CTL_FORMAT_P012;
+	case DRM_FORMAT_P016:
+		return PLANE_CTL_FORMAT_P016;
+	case DRM_FORMAT_Y210:
+		return PLANE_CTL_FORMAT_Y210;
+	case DRM_FORMAT_Y212:
+		return PLANE_CTL_FORMAT_Y212;
+	case DRM_FORMAT_Y216:
+		return PLANE_CTL_FORMAT_Y216;
+	case DRM_FORMAT_XVYU2101010:
+		return PLANE_CTL_FORMAT_Y410;
+	case DRM_FORMAT_XVYU12_16161616:
+		return PLANE_CTL_FORMAT_Y412;
+	case DRM_FORMAT_XVYU16161616:
+		return PLANE_CTL_FORMAT_Y416;
 	default:
-		MISSING_CASE(fb->format->format);
-		return 0;
+		MISSING_CASE(pixel_format);
 	}
 
-	if (INTEL_GEN(dev_priv) >= 4 &&
-	    fb->modifier == I915_FORMAT_MOD_X_TILED)
-		dspcntr |= DISPPLANE_TILED;
-
-	if (rotation & DRM_MODE_ROTATE_180)
-		dspcntr |= DISPPLANE_ROTATE_180;
-
-	if (rotation & DRM_MODE_REFLECT_X)
-		dspcntr |= DISPPLANE_MIRROR;
-
-	return dspcntr;
+	return 0;
 }
 
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->uapi.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	int src_x, src_y, src_w;
-	u32 offset;
-	int ret;
-
-	ret = intel_plane_compute_gtt(plane_state);
-	if (ret)
-		return ret;
-
-	if (!plane_state->uapi.visible)
-		return 0;
-
-	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
-	src_x = plane_state->uapi.src.x1 >> 16;
-	src_y = plane_state->uapi.src.y1 >> 16;
-
-	/* Undocumented hardware limit on i965/g4x/vlv/chv */
-	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
-		return -EINVAL;
-
-	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-
-	if (INTEL_GEN(dev_priv) >= 4)
-		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
-							    plane_state, 0);
-	else
-		offset = 0;
-
-	/*
-	 * Put the final coordinates back so that the src
-	 * coordinate checks will see the right values.
-	 */
-	drm_rect_translate_to(&plane_state->uapi.src,
-			      src_x << 16, src_y << 16);
-
-	/* HSW/BDW do this automagically in hardware */
-	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
-		unsigned int rotation = plane_state->hw.rotation;
-		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
-		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
-		if (rotation & DRM_MODE_ROTATE_180) {
-			src_x += src_w - 1;
-			src_y += src_h - 1;
-		} else if (rotation & DRM_MODE_REFLECT_X) {
-			src_x += src_w - 1;
-		}
-	}
-
-	plane_state->color_plane[0].offset = offset;
-	plane_state->color_plane[0].x = src_x;
-	plane_state->color_plane[0].y = src_y;
-
-	return 0;
-}
-
-static bool i9xx_plane_has_windowing(struct intel_plane *plane)
-{
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-
-	if (IS_CHERRYVIEW(dev_priv))
-		return i9xx_plane == PLANE_B;
-	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		return false;
-	else if (IS_GEN(dev_priv, 4))
-		return i9xx_plane == PLANE_C;
-	else
-		return i9xx_plane == PLANE_B ||
-			i9xx_plane == PLANE_C;
-}
-
-static int
-i9xx_plane_check(struct intel_crtc_state *crtc_state,
-		 struct intel_plane_state *plane_state)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	int ret;
-
-	ret = chv_plane_check_rotation(plane_state);
-	if (ret)
-		return ret;
-
-	ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
-						DRM_PLANE_HELPER_NO_SCALING,
-						DRM_PLANE_HELPER_NO_SCALING,
-						i9xx_plane_has_windowing(plane));
-	if (ret)
-		return ret;
-
-	ret = i9xx_check_plane_surface(plane_state);
-	if (ret)
-		return ret;
-
-	if (!plane_state->uapi.visible)
-		return 0;
-
-	ret = intel_plane_check_src_coordinates(plane_state);
-	if (ret)
-		return ret;
-
-	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
-
-	return 0;
-}
-
-static void i9xx_update_plane(struct intel_plane *plane,
-			      const struct intel_crtc_state *crtc_state,
-			      const struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-	u32 linear_offset;
-	int x = plane_state->color_plane[0].x;
-	int y = plane_state->color_plane[0].y;
-	int crtc_x = plane_state->uapi.dst.x1;
-	int crtc_y = plane_state->uapi.dst.y1;
-	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
-	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
-	unsigned long irqflags;
-	u32 dspaddr_offset;
-	u32 dspcntr;
-
-	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
-	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
-	if (INTEL_GEN(dev_priv) >= 4)
-		dspaddr_offset = plane_state->color_plane[0].offset;
-	else
-		dspaddr_offset = linear_offset;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-	intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
-			  plane_state->color_plane[0].stride);
-
-	if (INTEL_GEN(dev_priv) < 4) {
-		/*
-		 * PLANE_A doesn't actually have a full window
-		 * generator but let's assume we still need to
-		 * program whatever is there.
-		 */
-		intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
-				  (crtc_y << 16) | crtc_x);
-		intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
-				  ((crtc_h - 1) << 16) | (crtc_w - 1));
-	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
-		intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
-				  (crtc_y << 16) | crtc_x);
-		intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
-				  ((crtc_h - 1) << 16) | (crtc_w - 1));
-		intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
-	}
-
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
-				  (y << 16) | x);
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
-				  linear_offset);
-		intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
-				  (y << 16) | x);
-	}
-
-	/*
-	 * The control register self-arms if the plane was previously
-	 * disabled. Try to make the plane enable atomic by writing
-	 * the control register just before the surface register.
-	 */
-	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
-	if (INTEL_GEN(dev_priv) >= 4)
-		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
-				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-	else
-		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
-				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_plane(struct intel_plane *plane,
-			       const struct intel_crtc_state *crtc_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-	unsigned long irqflags;
-	u32 dspcntr;
-
-	/*
-	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
-	 * enable on ilk+ affect the pipe bottom color as
-	 * well, so we must configure them even if the plane
-	 * is disabled.
-	 *
-	 * On pre-g4x there is no way to gamma correct the
-	 * pipe bottom color but we'll keep on doing this
-	 * anyway so that the crtc state readout works correctly.
-	 */
-	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
-	if (INTEL_GEN(dev_priv) >= 4)
-		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
-	else
-		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
-
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
-				    enum pipe *pipe)
-{
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum intel_display_power_domain power_domain;
-	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-	intel_wakeref_t wakeref;
-	bool ret;
-	u32 val;
-
-	/*
-	 * Not 100% correct for planes that can move between pipes,
-	 * but that's only the case for gen2-4 which don't have any
-	 * display power wells.
-	 */
-	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (!wakeref)
-		return false;
-
-	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
-	ret = val & DISPLAY_PLANE_ENABLE;
-
-	if (INTEL_GEN(dev_priv) >= 5)
-		*pipe = plane->pipe;
-	else
-		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
-			DISPPLANE_SEL_PIPE_SHIFT;
-
-	intel_display_power_put(dev_priv, power_domain, wakeref);
-
-	return ret;
-}
-
-static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
-{
-	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-	intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
-	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
-	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
-
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/*
- * This function detaches (aka. unbinds) unused scalers in hardware
- */
-static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	const struct intel_crtc_scaler_state *scaler_state =
-		&crtc_state->scaler_state;
-	int i;
-
-	/* loop through and disable scalers that aren't in use */
-	for (i = 0; i < intel_crtc->num_scalers; i++) {
-		if (!scaler_state->scalers[i].in_use)
-			skl_detach_scaler(intel_crtc, i);
-	}
-}
-
-static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
-					  int color_plane, unsigned int rotation)
-{
-	/*
-	 * The stride is either expressed as a multiple of 64 bytes chunks for
-	 * linear buffers or in number of tiles for tiled buffers.
-	 */
-	if (is_surface_linear(fb, color_plane))
-		return 64;
-	else if (drm_rotation_90_or_270(rotation))
-		return intel_tile_height(fb, color_plane);
-	else
-		return intel_tile_width_bytes(fb, color_plane);
-}
-
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
-		     int color_plane)
-{
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	unsigned int rotation = plane_state->hw.rotation;
-	u32 stride = plane_state->color_plane[color_plane].stride;
-
-	if (color_plane >= fb->format->num_planes)
-		return 0;
-
-	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
-}
-
-static u32 skl_plane_ctl_format(u32 pixel_format)
-{
-	switch (pixel_format) {
-	case DRM_FORMAT_C8:
-		return PLANE_CTL_FORMAT_INDEXED;
-	case DRM_FORMAT_RGB565:
-		return PLANE_CTL_FORMAT_RGB_565;
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_ABGR8888:
-		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_ARGB8888:
-		return PLANE_CTL_FORMAT_XRGB_8888;
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_ABGR2101010:
-		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_ARGB2101010:
-		return PLANE_CTL_FORMAT_XRGB_2101010;
-	case DRM_FORMAT_XBGR16161616F:
-	case DRM_FORMAT_ABGR16161616F:
-		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
-	case DRM_FORMAT_XRGB16161616F:
-	case DRM_FORMAT_ARGB16161616F:
-		return PLANE_CTL_FORMAT_XRGB_16161616F;
-	case DRM_FORMAT_XYUV8888:
-		return PLANE_CTL_FORMAT_XYUV;
-	case DRM_FORMAT_YUYV:
-		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
-	case DRM_FORMAT_YVYU:
-		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
-	case DRM_FORMAT_UYVY:
-		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
-	case DRM_FORMAT_VYUY:
-		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
-	case DRM_FORMAT_NV12:
-		return PLANE_CTL_FORMAT_NV12;
-	case DRM_FORMAT_P010:
-		return PLANE_CTL_FORMAT_P010;
-	case DRM_FORMAT_P012:
-		return PLANE_CTL_FORMAT_P012;
-	case DRM_FORMAT_P016:
-		return PLANE_CTL_FORMAT_P016;
-	case DRM_FORMAT_Y210:
-		return PLANE_CTL_FORMAT_Y210;
-	case DRM_FORMAT_Y212:
-		return PLANE_CTL_FORMAT_Y212;
-	case DRM_FORMAT_Y216:
-		return PLANE_CTL_FORMAT_Y216;
-	case DRM_FORMAT_XVYU2101010:
-		return PLANE_CTL_FORMAT_Y410;
-	case DRM_FORMAT_XVYU12_16161616:
-		return PLANE_CTL_FORMAT_Y412;
-	case DRM_FORMAT_XVYU16161616:
-		return PLANE_CTL_FORMAT_Y416;
-	default:
-		MISSING_CASE(pixel_format);
-	}
-
-	return 0;
-}
-
-static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
-{
-	if (!plane_state->hw.fb->format->has_alpha)
-		return PLANE_CTL_ALPHA_DISABLE;
+	if (!plane_state->hw.fb->format->has_alpha)
+		return PLANE_CTL_ALPHA_DISABLE;
 
 	switch (plane_state->hw.pixel_blend_mode) {
 	case DRM_MODE_BLEND_PIXEL_NONE:
@@ -4644,6 +3428,7 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
 	case I915_FORMAT_MOD_Y_TILED:
 		return PLANE_CTL_TILED_Y;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
 		return PLANE_CTL_TILED_Y |
@@ -4704,9 +3489,6 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 plane_ctl = 0;
 
-	if (crtc_state->uapi.async_flip)
-		plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		return plane_ctl;
 
@@ -4995,532 +3777,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
 }
 
-static void intel_fdi_normal_train(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = crtc->pipe;
-	i915_reg_t reg;
-	u32 temp;
-
-	/* enable normal train */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	if (IS_IVYBRIDGE(dev_priv)) {
-		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
-		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-	}
-	intel_de_write(dev_priv, reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	if (HAS_PCH_CPT(dev_priv)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_NONE;
-	}
-	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
-	/* wait one idle pattern time */
-	intel_de_posting_read(dev_priv, reg);
-	udelay(1000);
-
-	/* IVB wants error correction enabled */
-	if (IS_IVYBRIDGE(dev_priv))
-		intel_de_write(dev_priv, reg,
-		               intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
-}
-
-/* The FDI link training functions for ILK/Ibexpeak. */
-static void ilk_fdi_link_train(struct intel_crtc *crtc,
-			       const struct intel_crtc_state *crtc_state)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = crtc->pipe;
-	i915_reg_t reg;
-	u32 temp, tries;
-
-	/* FDI needs bits from pipe first */
-	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
-
-	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-	   for train result */
-	reg = FDI_RX_IMR(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_RX_SYMBOL_LOCK;
-	temp &= ~FDI_RX_BIT_LOCK;
-	intel_de_write(dev_priv, reg, temp);
-	intel_de_read(dev_priv, reg);
-	udelay(150);
-
-	/* enable CPU FDI TX and PCH FDI RX */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_DP_PORT_WIDTH_MASK;
-	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(150);
-
-	/* Ironlake workaround, enable clock pointer after FDI enable*/
-	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
-		       FDI_RX_PHASE_SYNC_POINTER_OVR);
-	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
-		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
-
-	reg = FDI_RX_IIR(pipe);
-	for (tries = 0; tries < 5; tries++) {
-		temp = intel_de_read(dev_priv, reg);
-		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
-		if ((temp & FDI_RX_BIT_LOCK)) {
-			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
-			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
-			break;
-		}
-	}
-	if (tries == 5)
-		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
-	/* Train 2 */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	intel_de_write(dev_priv, reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	intel_de_write(dev_priv, reg, temp);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(150);
-
-	reg = FDI_RX_IIR(pipe);
-	for (tries = 0; tries < 5; tries++) {
-		temp = intel_de_read(dev_priv, reg);
-		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
-		if (temp & FDI_RX_SYMBOL_LOCK) {
-			intel_de_write(dev_priv, reg,
-				       temp | FDI_RX_SYMBOL_LOCK);
-			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
-			break;
-		}
-	}
-	if (tries == 5)
-		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
-	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
-
-}
-
-static const int snb_b_fdi_train_param[] = {
-	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
-	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
-	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
-	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
-};
-
-/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct intel_crtc *crtc,
-				const struct intel_crtc_state *crtc_state)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = crtc->pipe;
-	i915_reg_t reg;
-	u32 temp, i, retry;
-
-	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-	   for train result */
-	reg = FDI_RX_IMR(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_RX_SYMBOL_LOCK;
-	temp &= ~FDI_RX_BIT_LOCK;
-	intel_de_write(dev_priv, reg, temp);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(150);
-
-	/* enable CPU FDI TX and PCH FDI RX */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_DP_PORT_WIDTH_MASK;
-	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-	/* SNB-B */
-	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
-	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
-		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	if (HAS_PCH_CPT(dev_priv)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_PATTERN_1;
-	}
-	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(150);
-
-	for (i = 0; i < 4; i++) {
-		reg = FDI_TX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-		temp |= snb_b_fdi_train_param[i];
-		intel_de_write(dev_priv, reg, temp);
-
-		intel_de_posting_read(dev_priv, reg);
-		udelay(500);
-
-		for (retry = 0; retry < 5; retry++) {
-			reg = FDI_RX_IIR(pipe);
-			temp = intel_de_read(dev_priv, reg);
-			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-			if (temp & FDI_RX_BIT_LOCK) {
-				intel_de_write(dev_priv, reg,
-					       temp | FDI_RX_BIT_LOCK);
-				drm_dbg_kms(&dev_priv->drm,
-					    "FDI train 1 done.\n");
-				break;
-			}
-			udelay(50);
-		}
-		if (retry < 5)
-			break;
-	}
-	if (i == 4)
-		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
-	/* Train 2 */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	if (IS_GEN(dev_priv, 6)) {
-		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-		/* SNB-B */
-		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-	}
-	intel_de_write(dev_priv, reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	if (HAS_PCH_CPT(dev_priv)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_PATTERN_2;
-	}
-	intel_de_write(dev_priv, reg, temp);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(150);
-
-	for (i = 0; i < 4; i++) {
-		reg = FDI_TX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-		temp |= snb_b_fdi_train_param[i];
-		intel_de_write(dev_priv, reg, temp);
-
-		intel_de_posting_read(dev_priv, reg);
-		udelay(500);
-
-		for (retry = 0; retry < 5; retry++) {
-			reg = FDI_RX_IIR(pipe);
-			temp = intel_de_read(dev_priv, reg);
-			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-			if (temp & FDI_RX_SYMBOL_LOCK) {
-				intel_de_write(dev_priv, reg,
-					       temp | FDI_RX_SYMBOL_LOCK);
-				drm_dbg_kms(&dev_priv->drm,
-					    "FDI train 2 done.\n");
-				break;
-			}
-			udelay(50);
-		}
-		if (retry < 5)
-			break;
-	}
-	if (i == 4)
-		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
-	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
-				      const struct intel_crtc_state *crtc_state)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = crtc->pipe;
-	i915_reg_t reg;
-	u32 temp, i, j;
-
-	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-	   for train result */
-	reg = FDI_RX_IMR(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_RX_SYMBOL_LOCK;
-	temp &= ~FDI_RX_BIT_LOCK;
-	intel_de_write(dev_priv, reg, temp);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(150);
-
-	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
-		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
-
-	/* Try each vswing and preemphasis setting twice before moving on */
-	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
-		/* disable first in case we need to retry */
-		reg = FDI_TX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
-		temp &= ~FDI_TX_ENABLE;
-		intel_de_write(dev_priv, reg, temp);
-
-		reg = FDI_RX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~FDI_LINK_TRAIN_AUTO;
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp &= ~FDI_RX_ENABLE;
-		intel_de_write(dev_priv, reg, temp);
-
-		/* enable CPU FDI TX and PCH FDI RX */
-		reg = FDI_TX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~FDI_DP_PORT_WIDTH_MASK;
-		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
-		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-		temp |= snb_b_fdi_train_param[j/2];
-		temp |= FDI_COMPOSITE_SYNC;
-		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
-		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
-			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
-		reg = FDI_RX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-		temp |= FDI_COMPOSITE_SYNC;
-		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
-		intel_de_posting_read(dev_priv, reg);
-		udelay(1); /* should be 0.5us */
-
-		for (i = 0; i < 4; i++) {
-			reg = FDI_RX_IIR(pipe);
-			temp = intel_de_read(dev_priv, reg);
-			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
-			if (temp & FDI_RX_BIT_LOCK ||
-			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
-				intel_de_write(dev_priv, reg,
-					       temp | FDI_RX_BIT_LOCK);
-				drm_dbg_kms(&dev_priv->drm,
-					    "FDI train 1 done, level %i.\n",
-					    i);
-				break;
-			}
-			udelay(1); /* should be 0.5us */
-		}
-		if (i == 4) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "FDI train 1 fail on vswing %d\n", j / 2);
-			continue;
-		}
-
-		/* Train 2 */
-		reg = FDI_TX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
-		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
-		intel_de_write(dev_priv, reg, temp);
-
-		reg = FDI_RX_CTL(pipe);
-		temp = intel_de_read(dev_priv, reg);
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-		intel_de_write(dev_priv, reg, temp);
-
-		intel_de_posting_read(dev_priv, reg);
-		udelay(2); /* should be 1.5us */
-
-		for (i = 0; i < 4; i++) {
-			reg = FDI_RX_IIR(pipe);
-			temp = intel_de_read(dev_priv, reg);
-			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
-			if (temp & FDI_RX_SYMBOL_LOCK ||
-			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
-				intel_de_write(dev_priv, reg,
-					       temp | FDI_RX_SYMBOL_LOCK);
-				drm_dbg_kms(&dev_priv->drm,
-					    "FDI train 2 done, level %i.\n",
-					    i);
-				goto train_done;
-			}
-			udelay(2); /* should be 1.5us */
-		}
-		if (i == 4)
-			drm_dbg_kms(&dev_priv->drm,
-				    "FDI train 2 fail on vswing %d\n", j / 2);
-	}
-
-train_done:
-	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-	enum pipe pipe = intel_crtc->pipe;
-	i915_reg_t reg;
-	u32 temp;
-
-	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
-	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(200);
-
-	/* Switch from Rawclk to PCDclk */
-	temp = intel_de_read(dev_priv, reg);
-	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(200);
-
-	/* Enable CPU FDI TX PLL, always on for Ironlake */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
-
-		intel_de_posting_read(dev_priv, reg);
-		udelay(100);
-	}
-}
-
-static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
-{
-	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = intel_crtc->pipe;
-	i915_reg_t reg;
-	u32 temp;
-
-	/* Switch from PCDclk to Rawclk */
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
-
-	/* Disable CPU FDI TX PLL */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(100);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
-	/* Wait for the clocks to turn off. */
-	intel_de_posting_read(dev_priv, reg);
-	udelay(100);
-}
-
-static void ilk_fdi_disable(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum pipe pipe = crtc->pipe;
-	i915_reg_t reg;
-	u32 temp;
-
-	/* disable CPU FDI tx and PCH FDI rx */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
-	intel_de_posting_read(dev_priv, reg);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~(0x7 << 16);
-	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(100);
-
-	/* Ironlake workaround, disable clock pointer after downing FDI */
-	if (HAS_PCH_IBX(dev_priv))
-		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
-			       FDI_RX_PHASE_SYNC_POINTER_OVR);
-
-	/* still set train pattern 1 */
-	reg = FDI_TX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	intel_de_write(dev_priv, reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = intel_de_read(dev_priv, reg);
-	if (HAS_PCH_CPT(dev_priv)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_PATTERN_1;
-	}
-	/* BPC in FDI rx is consistent with that in PIPECONF */
-	temp &= ~(0x07 << 16);
-	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-	intel_de_write(dev_priv, reg, temp);
-
-	intel_de_posting_read(dev_priv, reg);
-	udelay(100);
-}
-
 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
 {
 	struct drm_crtc *crtc;
@@ -5751,7 +4007,7 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
  * Finds the encoder associated with the given CRTC. This can only be
  * used when we know that the CRTC isn't feeding multiple encoders!
  */
-static struct intel_encoder *
+struct intel_encoder *
 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
 			   const struct intel_crtc_state *crtc_state)
 {
@@ -6270,7 +4526,7 @@ static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
 	intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
 }
 
-inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
 {
 	if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
 		return (PS_FILTER_PROGRAMMED |
@@ -6469,7 +4725,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
 	if (!old_crtc_state->ips_enabled)
 		return false;
 
-	if (needs_modeset(new_crtc_state))
+	if (intel_crtc_needs_modeset(new_crtc_state))
 		return true;
 
 	/*
@@ -6496,7 +4752,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
 	if (!new_crtc_state->ips_enabled)
 		return false;
 
-	if (needs_modeset(new_crtc_state))
+	if (intel_crtc_needs_modeset(new_crtc_state))
 		return true;
 
 	/*
@@ -6549,7 +4805,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
 			    const struct intel_crtc_state *new_crtc_state)
 {
-	return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
 		new_crtc_state->active_planes;
 }
 
@@ -6557,7 +4813,7 @@ static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
 			     const struct intel_crtc_state *new_crtc_state)
 {
 	return old_crtc_state->active_planes &&
-		(!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
 }
 
 static void intel_post_plane_update(struct intel_atomic_state *state,
@@ -6589,41 +4845,72 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
 		icl_wa_scalerclkgating(dev_priv, pipe, false);
 }
 
-static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
-				      struct intel_crtc *crtc,
-				      const struct intel_crtc_state *new_crtc_state)
+static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+					struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	u8 update_planes = crtc_state->update_planes;
+	const struct intel_plane_state *plane_state;
 	struct intel_plane *plane;
-	struct intel_plane_state *new_plane_state;
 	int i;
 
-	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
-		u32 update_mask = new_crtc_state->update_planes;
-		u32 plane_ctl, surf_addr;
-		enum plane_id plane_id;
-		unsigned long irqflags;
-		enum pipe pipe;
-
-		if (crtc->pipe != plane->pipe ||
-		    !(update_mask & BIT(plane->id)))
-			continue;
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		if (plane->enable_flip_done &&
+		    plane->pipe == crtc->pipe &&
+		    update_planes & BIT(plane->id))
+			plane->enable_flip_done(plane);
+	}
+}
 
-		plane_id = plane->id;
-		pipe = plane->pipe;
+static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
+					 struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	u8 update_planes = crtc_state->update_planes;
+	const struct intel_plane_state *plane_state;
+	struct intel_plane *plane;
+	int i;
 
-		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-		plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
-		surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		if (plane->disable_flip_done &&
+		    plane->pipe == crtc->pipe &&
+		    update_planes & BIT(plane->id))
+			plane->disable_flip_done(plane);
+	}
+}
 
-		plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
+static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+					     struct intel_crtc *crtc)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	u8 update_planes = new_crtc_state->update_planes;
+	const struct intel_plane_state *old_plane_state;
+	struct intel_plane *plane;
+	bool need_vbl_wait = false;
+	int i;
 
-		intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
-		intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
-		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+		if (plane->need_async_flip_disable_wa &&
+		    plane->pipe == crtc->pipe &&
+		    update_planes & BIT(plane->id)) {
+			/*
+			 * Apart from the async flip bit we want to
+			 * preserve the old state for the plane.
+			 */
+			plane->async_flip(plane, old_crtc_state,
+					  old_plane_state, false);
+			need_vbl_wait = true;
+		}
 	}
 
-	intel_wait_for_vblank(dev_priv, crtc->pipe);
+	if (need_vbl_wait)
+		intel_wait_for_vblank(i915, crtc->pipe);
 }
 
 static void intel_pre_plane_update(struct intel_atomic_state *state,
@@ -6680,7 +4967,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
 	 * If we're doing a modeset we don't need to do any
 	 * pre-vblank watermark programming here.
 	 */
-	if (!needs_modeset(new_crtc_state)) {
+	if (!intel_crtc_needs_modeset(new_crtc_state)) {
 		/*
 		 * For platforms that support atomic watermarks, program the
 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
@@ -6716,10 +5003,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
 	 * WA for platforms where async address update enable bit
 	 * is double buffered and only latched at start of vblank.
 	 */
-	if (old_crtc_state->uapi.async_flip &&
-	    !new_crtc_state->uapi.async_flip &&
-	    IS_GEN_RANGE(dev_priv, 9, 10))
-		skl_disable_async_flip_wa(state, crtc, new_crtc_state);
+	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+		intel_crtc_async_flip_disable_wa(state, crtc);
 }
 
 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -7139,7 +5424,7 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
 
 	val = intel_de_read(dev_priv, reg);
 	val &= ~HSW_FRAME_START_DELAY_MASK;
-	val |= HSW_FRAME_START_DELAY(0);
+	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 	intel_de_write(dev_priv, reg, val);
 }
 
@@ -7574,25 +5859,25 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
 	enum intel_display_power_domain domain;
 	u64 domains, new_domains, old_domains;
 
-	old_domains = crtc->enabled_power_domains;
-	crtc->enabled_power_domains = new_domains =
-		get_crtc_power_domains(crtc_state);
+	domains = get_crtc_power_domains(crtc_state);
 
-	domains = new_domains & ~old_domains;
+	new_domains = domains & ~crtc->enabled_power_domains.mask;
+	old_domains = crtc->enabled_power_domains.mask & ~domains;
 
-	for_each_power_domain(domain, domains)
-		intel_display_power_get(dev_priv, domain);
+	for_each_power_domain(domain, new_domains)
+		intel_display_power_get_in_set(dev_priv,
+					       &crtc->enabled_power_domains,
+					       domain);
 
-	return old_domains & ~new_domains;
+	return old_domains;
 }
 
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
-				      u64 domains)
+static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+					   u64 domains)
 {
-	enum intel_display_power_domain domain;
-
-	for_each_power_domain(domain, domains)
-		intel_display_power_put_unchecked(dev_priv, domain);
+	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+					    &crtc->enabled_power_domains,
+					    domains);
 }
 
 static void valleyview_crtc_enable(struct intel_atomic_state *state,
@@ -7788,12 +6073,10 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
 	struct intel_crtc_state *crtc_state =
 		to_intel_crtc_state(crtc->base.state);
-	enum intel_display_power_domain domain;
 	struct intel_plane *plane;
 	struct drm_atomic_state *state;
 	struct intel_crtc_state *temp_crtc_state;
 	enum pipe pipe = crtc->pipe;
-	u64 domains;
 	int ret;
 
 	if (!crtc_state->hw.active)
@@ -7849,10 +6132,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 	intel_update_watermarks(crtc);
 	intel_disable_shared_dpll(crtc_state);
 
-	domains = crtc->enabled_power_domains;
-	for_each_power_domain(domain, domains)
-		intel_display_power_put_unchecked(dev_priv, domain);
-	crtc->enabled_power_domains = 0;
+	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
 
 	dev_priv->active_pipes &= ~BIT(pipe);
 	cdclk_state->min_cdclk[pipe] = 0;
@@ -7932,151 +6212,14 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
 	}
 }
 
-static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
 {
-	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
-		return crtc_state->fdi_lanes;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	return 0;
-}
-
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
-			       struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *state = pipe_config->uapi.state;
-	struct intel_crtc *other_crtc;
-	struct intel_crtc_state *other_crtc_state;
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "checking fdi config on pipe %c, lanes %i\n",
-		    pipe_name(pipe), pipe_config->fdi_lanes);
-	if (pipe_config->fdi_lanes > 4) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "invalid fdi lane config on pipe %c: %i lanes\n",
-			    pipe_name(pipe), pipe_config->fdi_lanes);
-		return -EINVAL;
-	}
-
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		if (pipe_config->fdi_lanes > 2) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "only 2 lanes on haswell, required: %i lanes\n",
-				    pipe_config->fdi_lanes);
-			return -EINVAL;
-		} else {
-			return 0;
-		}
-	}
-
-	if (INTEL_NUM_PIPES(dev_priv) == 2)
-		return 0;
-
-	/* Ivybridge 3 pipe is really complicated */
-	switch (pipe) {
-	case PIPE_A:
-		return 0;
-	case PIPE_B:
-		if (pipe_config->fdi_lanes <= 2)
-			return 0;
-
-		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
-		other_crtc_state =
-			intel_atomic_get_crtc_state(state, other_crtc);
-		if (IS_ERR(other_crtc_state))
-			return PTR_ERR(other_crtc_state);
-
-		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
-				    pipe_name(pipe), pipe_config->fdi_lanes);
-			return -EINVAL;
-		}
-		return 0;
-	case PIPE_C:
-		if (pipe_config->fdi_lanes > 2) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "only 2 lanes on pipe %c: required %i lanes\n",
-				    pipe_name(pipe), pipe_config->fdi_lanes);
-			return -EINVAL;
-		}
-
-		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
-		other_crtc_state =
-			intel_atomic_get_crtc_state(state, other_crtc);
-		if (IS_ERR(other_crtc_state))
-			return PTR_ERR(other_crtc_state);
-
-		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "fdi link B uses too many lanes to enable link C\n");
-			return -EINVAL;
-		}
-		return 0;
-	default:
-		BUG();
-	}
-}
-
-#define RETRY 1
-static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
-				  struct intel_crtc_state *pipe_config)
-{
-	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *i915 = to_i915(dev);
-	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
-	int lane, link_bw, fdi_dotclock, ret;
-	bool needs_recompute = false;
-
-retry:
-	/* FDI is a binary signal running at ~2.7GHz, encoding
-	 * each output octet as 10 bits. The actual frequency
-	 * is stored as a divider into a 100MHz clock, and the
-	 * mode pixel clock is stored in units of 1KHz.
-	 * Hence the bw of each lane in terms of the mode signal
-	 * is:
-	 */
-	link_bw = intel_fdi_link_freq(i915, pipe_config);
-
-	fdi_dotclock = adjusted_mode->crtc_clock;
-
-	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
-				      pipe_config->pipe_bpp);
-
-	pipe_config->fdi_lanes = lane;
-
-	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-			       link_bw, &pipe_config->fdi_m_n, false, false);
-
-	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
-	if (ret == -EDEADLK)
-		return ret;
-
-	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
-		pipe_config->pipe_bpp -= 2*3;
-		drm_dbg_kms(&i915->drm,
-			    "fdi link bw constraint, reducing pipe bpp to %i\n",
-			    pipe_config->pipe_bpp);
-		needs_recompute = true;
-		pipe_config->bw_constrained = true;
-
-		goto retry;
-	}
-
-	if (needs_recompute)
-		return RETRY;
-
-	return ret;
-}
-
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	/* IPS only exists on ULT machines and is tied to pipe A. */
-	if (!hsw_crtc_supports_ips(crtc))
-		return false;
+	/* IPS only exists on ULT machines and is tied to pipe A. */
+	if (!hsw_crtc_supports_ips(crtc))
+		return false;
 
 	if (!dev_priv->params.enable_ips)
 		return false;
@@ -8300,19 +6443,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 		return -EINVAL;
 	}
 
-	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
-	     pipe_config->hw.ctm) {
-		/*
-		 * There is only one pipe CSC unit per pipe, and we need that
-		 * for output conversion from RGB->YCBCR. So if CTM is already
-		 * applied we can't support YCBCR420 output.
-		 */
-		drm_dbg_kms(&dev_priv->drm,
-			    "YCBCR420 and CTM together are not possible\n");
-		return -EINVAL;
-	}
-
 	/*
 	 * Pipe horizontal size must be even in:
 	 * - DVO ganged mode
@@ -8424,51 +6554,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
 	}
 }
 
-static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
-	if (dev_priv->params.panel_use_ssc >= 0)
-		return dev_priv->params.panel_use_ssc != 0;
-	return dev_priv->vbt.lvds_use_ssc
-		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
-{
-	return (1 << dpll->n) << 16 | dpll->m2;
-}
-
-static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
-	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
-				     struct intel_crtc_state *crtc_state,
-				     struct dpll *reduced_clock)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 fp, fp2 = 0;
-
-	if (IS_PINEVIEW(dev_priv)) {
-		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
-		if (reduced_clock)
-			fp2 = pnv_dpll_compute_fp(reduced_clock);
-	} else {
-		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-		if (reduced_clock)
-			fp2 = i9xx_dpll_compute_fp(reduced_clock);
-	}
-
-	crtc_state->dpll_hw_state.fp0 = fp;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-	    reduced_clock) {
-		crtc_state->dpll_hw_state.fp1 = fp2;
-	} else {
-		crtc_state->dpll_hw_state.fp1 = fp;
-	}
-}
-
 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
 		pipe)
 {
@@ -8593,39 +6678,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
 }
 
-static void vlv_compute_dpll(struct intel_crtc *crtc,
-			     struct intel_crtc_state *pipe_config)
-{
-	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
-		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-	if (crtc->pipe != PIPE_A)
-		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-	/* DPLL not used with DSI, but still need the rest set up */
-	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
-		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
-			DPLL_EXT_BUFFER_ENABLE_VLV;
-
-	pipe_config->dpll_hw_state.dpll_md =
-		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void chv_compute_dpll(struct intel_crtc *crtc,
-			     struct intel_crtc_state *pipe_config)
-{
-	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
-		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-	if (crtc->pipe != PIPE_A)
-		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-	/* DPLL not used with DSI, but still need the rest set up */
-	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
-		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
-
-	pipe_config->dpll_hw_state.dpll_md =
-		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
 static void vlv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config)
 {
@@ -8885,128 +6937,7 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
 		vlv_disable_pll(dev_priv, pipe);
 }
 
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
-			      struct intel_crtc_state *crtc_state,
-			      struct dpll *reduced_clock)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 dpll;
-	struct dpll *clock = &crtc_state->dpll;
-
-	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
-	dpll = DPLL_VGA_MODE_DIS;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
-		dpll |= DPLLB_MODE_LVDS;
-	else
-		dpll |= DPLLB_MODE_DAC_SERIAL;
-
-	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
-		dpll |= (crtc_state->pixel_multiplier - 1)
-			<< SDVO_MULTIPLIER_SHIFT_HIRES;
-	}
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
-	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-		dpll |= DPLL_SDVO_HIGH_SPEED;
-
-	if (intel_crtc_has_dp_encoder(crtc_state))
-		dpll |= DPLL_SDVO_HIGH_SPEED;
-
-	/* compute bitmask from p1 value */
-	if (IS_PINEVIEW(dev_priv))
-		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
-	else {
-		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-		if (IS_G4X(dev_priv) && reduced_clock)
-			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-	}
-	switch (clock->p2) {
-	case 5:
-		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
-		break;
-	case 7:
-		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
-		break;
-	case 10:
-		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
-		break;
-	case 14:
-		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
-		break;
-	}
-	if (INTEL_GEN(dev_priv) >= 4)
-		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-
-	if (crtc_state->sdvo_tv_clock)
-		dpll |= PLL_REF_INPUT_TVCLKINBC;
-	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-		 intel_panel_use_ssc(dev_priv))
-		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-	else
-		dpll |= PLL_REF_INPUT_DREFCLK;
-
-	dpll |= DPLL_VCO_ENABLE;
-	crtc_state->dpll_hw_state.dpll = dpll;
-
-	if (INTEL_GEN(dev_priv) >= 4) {
-		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
-			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
-		crtc_state->dpll_hw_state.dpll_md = dpll_md;
-	}
-}
-
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
-			      struct intel_crtc_state *crtc_state,
-			      struct dpll *reduced_clock)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	u32 dpll;
-	struct dpll *clock = &crtc_state->dpll;
-
-	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
-	dpll = DPLL_VGA_MODE_DIS;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-	} else {
-		if (clock->p1 == 2)
-			dpll |= PLL_P1_DIVIDE_BY_TWO;
-		else
-			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-		if (clock->p2 == 4)
-			dpll |= PLL_P2_DIVIDE_BY_4;
-	}
-
-	/*
-	 * Bspec:
-	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
-	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
-	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
-	 *  Enable) must be set to “1” in both the DPLL A Control Register
-	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
-	 *
-	 * For simplicity We simply keep both bits always enabled in
-	 * both DPLLS. The spec says we should disable the DVO 2X clock
-	 * when not needed, but this seems to work fine in practice.
-	 */
-	if (IS_I830(dev_priv) ||
-	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
-		dpll |= DPLL_DVO_2X_MODE;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-	    intel_panel_use_ssc(dev_priv))
-		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-	else
-		dpll |= PLL_REF_INPUT_DREFCLK;
 
-	dpll |= DPLL_VCO_ENABLE;
-	crtc_state->dpll_hw_state.dpll = dpll;
-}
 
 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
 {
@@ -9206,301 +7137,99 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
 
-	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 
 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
 }
 
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
-				   struct intel_crtc_state *crtc_state)
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	const struct intel_limit *limit;
-	int refclk = 48000;
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
-			drm_dbg_kms(&dev_priv->drm,
-				    "using SSC reference clock of %d kHz\n",
-				    refclk);
-		}
-
-		limit = &intel_limits_i8xx_lvds;
-	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
-		limit = &intel_limits_i8xx_dvo;
-	} else {
-		limit = &intel_limits_i8xx_dac;
-	}
-
-	if (!crtc_state->clock_set &&
-	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				 refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&dev_priv->drm,
-			"Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	i8xx_compute_dpll(crtc, crtc_state, NULL);
+	if (IS_I830(dev_priv))
+		return false;
 
-	return 0;
+	return INTEL_GEN(dev_priv) >= 4 ||
+		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
 }
 
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state)
+static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_limit *limit;
-	int refclk = 96000;
+	u32 tmp;
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
+	if (!i9xx_has_pfit(dev_priv))
+		return;
 
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
-			drm_dbg_kms(&dev_priv->drm,
-				    "using SSC reference clock of %d kHz\n",
-				    refclk);
-		}
+	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
+	if (!(tmp & PFIT_ENABLE))
+		return;
 
-		if (intel_is_dual_link_lvds(dev_priv))
-			limit = &intel_limits_g4x_dual_channel_lvds;
-		else
-			limit = &intel_limits_g4x_single_channel_lvds;
-	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
-		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
-		limit = &intel_limits_g4x_hdmi;
-	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
-		limit = &intel_limits_g4x_sdvo;
+	/* Check whether the pfit is attached to our pipe. */
+	if (INTEL_GEN(dev_priv) < 4) {
+		if (crtc->pipe != PIPE_B)
+			return;
 	} else {
-		/* The option is for other outputs */
-		limit = &intel_limits_i9xx_sdvo;
-	}
-
-	if (!crtc_state->clock_set &&
-	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&dev_priv->drm,
-			"Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
+		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+			return;
 	}
 
-	i9xx_compute_dpll(crtc, crtc_state, NULL);
-
-	return 0;
+	crtc_state->gmch_pfit.control = tmp;
+	crtc_state->gmch_pfit.pgm_ratios =
+		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
 }
 
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state)
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+			       struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	const struct intel_limit *limit;
-	int refclk = 96000;
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
-			drm_dbg_kms(&dev_priv->drm,
-				    "using SSC reference clock of %d kHz\n",
-				    refclk);
-		}
+	enum pipe pipe = crtc->pipe;
+	struct dpll clock;
+	u32 mdiv;
+	int refclk = 100000;
 
-		limit = &pnv_limits_lvds;
-	} else {
-		limit = &pnv_limits_sdvo;
-	}
+	/* In case of DSI, DPLL will not be used */
+	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+		return;
 
-	if (!crtc_state->clock_set &&
-	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&dev_priv->drm,
-			"Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
+	vlv_dpio_get(dev_priv);
+	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
+	vlv_dpio_put(dev_priv);
 
-	i9xx_compute_dpll(crtc, crtc_state, NULL);
+	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+	clock.m2 = mdiv & DPIO_M2DIV_MASK;
+	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
-	return 0;
+	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
 }
 
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
-				   struct intel_crtc_state *crtc_state)
+static void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+			      struct intel_initial_plane_config *plane_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	const struct intel_limit *limit;
-	int refclk = 96000;
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	enum pipe pipe;
+	u32 val, base, offset;
+	int fourcc, pixel_format;
+	unsigned int aligned_height;
+	struct drm_framebuffer *fb;
+	struct intel_framebuffer *intel_fb;
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
+	if (!plane->get_hw_state(plane, &pipe))
+		return;
 
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
-			drm_dbg_kms(&dev_priv->drm,
-				    "using SSC reference clock of %d kHz\n",
-				    refclk);
-		}
+	drm_WARN_ON(dev, pipe != crtc->pipe);
 
-		limit = &intel_limits_i9xx_lvds;
-	} else {
-		limit = &intel_limits_i9xx_sdvo;
-	}
-
-	if (!crtc_state->clock_set &&
-	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				 refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&dev_priv->drm,
-			"Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	i9xx_compute_dpll(crtc, crtc_state, NULL);
-
-	return 0;
-}
-
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state)
-{
-	int refclk = 100000;
-	const struct intel_limit *limit = &intel_limits_chv;
-	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	if (!crtc_state->clock_set &&
-	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	chv_compute_dpll(crtc, crtc_state);
-
-	return 0;
-}
-
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state)
-{
-	int refclk = 100000;
-	const struct intel_limit *limit = &intel_limits_vlv;
-	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	if (!crtc_state->clock_set &&
-	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	vlv_compute_dpll(crtc, crtc_state);
-
-	return 0;
-}
-
-static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
-{
-	if (IS_I830(dev_priv))
-		return false;
-
-	return INTEL_GEN(dev_priv) >= 4 ||
-		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 tmp;
-
-	if (!i9xx_has_pfit(dev_priv))
-		return;
-
-	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
-	if (!(tmp & PFIT_ENABLE))
-		return;
-
-	/* Check whether the pfit is attached to our pipe. */
-	if (INTEL_GEN(dev_priv) < 4) {
-		if (crtc->pipe != PIPE_B)
-			return;
-	} else {
-		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
-			return;
-	}
-
-	crtc_state->gmch_pfit.control = tmp;
-	crtc_state->gmch_pfit.pgm_ratios =
-		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
-}
-
-static void vlv_crtc_clock_get(struct intel_crtc *crtc,
-			       struct intel_crtc_state *pipe_config)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = crtc->pipe;
-	struct dpll clock;
-	u32 mdiv;
-	int refclk = 100000;
-
-	/* In case of DSI, DPLL will not be used */
-	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
-		return;
-
-	vlv_dpio_get(dev_priv);
-	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
-	vlv_dpio_put(dev_priv);
-
-	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
-	clock.m2 = mdiv & DPIO_M2DIV_MASK;
-	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
-	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
-	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
-
-	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
-}
-
-static void
-i9xx_get_initial_plane_config(struct intel_crtc *crtc,
-			      struct intel_initial_plane_config *plane_config)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-	enum pipe pipe;
-	u32 val, base, offset;
-	int fourcc, pixel_format;
-	unsigned int aligned_height;
-	struct drm_framebuffer *fb;
-	struct intel_framebuffer *intel_fb;
-
-	if (!plane->get_hw_state(plane, &pipe))
-		return;
-
-	drm_WARN_ON(dev, pipe != crtc->pipe);
-
-	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-	if (!intel_fb) {
-		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
-		return;
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb) {
+		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+		return;
 	}
 
 	fb = &intel_fb->base;
@@ -10315,7 +8044,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
 
-	val |= PIPECONF_FRAME_START_DELAY(0);
+	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 
 	intel_de_write(dev_priv, PIPECONF(pipe), val);
 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -10423,172 +8152,6 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
 	return DIV_ROUND_UP(bps, link_bw * 8);
 }
 
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
-{
-	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
-}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
-			     struct intel_crtc_state *crtc_state,
-			     struct dpll *reduced_clock)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 dpll, fp, fp2;
-	int factor;
-
-	/* Enable autotuning of the PLL clock (if permissible) */
-	factor = 21;
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if ((intel_panel_use_ssc(dev_priv) &&
-		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
-		    (HAS_PCH_IBX(dev_priv) &&
-		     intel_is_dual_link_lvds(dev_priv)))
-			factor = 25;
-	} else if (crtc_state->sdvo_tv_clock) {
-		factor = 20;
-	}
-
-	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
-	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
-		fp |= FP_CB_TUNE;
-
-	if (reduced_clock) {
-		fp2 = i9xx_dpll_compute_fp(reduced_clock);
-
-		if (reduced_clock->m < factor * reduced_clock->n)
-			fp2 |= FP_CB_TUNE;
-	} else {
-		fp2 = fp;
-	}
-
-	dpll = 0;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
-		dpll |= DPLLB_MODE_LVDS;
-	else
-		dpll |= DPLLB_MODE_DAC_SERIAL;
-
-	dpll |= (crtc_state->pixel_multiplier - 1)
-		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
-	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-		dpll |= DPLL_SDVO_HIGH_SPEED;
-
-	if (intel_crtc_has_dp_encoder(crtc_state))
-		dpll |= DPLL_SDVO_HIGH_SPEED;
-
-	/*
-	 * The high speed IO clock is only really required for
-	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
-	 * possible to share the DPLL between CRT and HDMI. Enabling
-	 * the clock needlessly does no real harm, except use up a
-	 * bit of power potentially.
-	 *
-	 * We'll limit this to IVB with 3 pipes, since it has only two
-	 * DPLLs and so DPLL sharing is the only way to get three pipes
-	 * driving PCH ports at the same time. On SNB we could do this,
-	 * and potentially avoid enabling the second DPLL, but it's not
-	 * clear if it''s a win or loss power wise. No point in doing
-	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
-	 */
-	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
-	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
-		dpll |= DPLL_SDVO_HIGH_SPEED;
-
-	/* compute bitmask from p1 value */
-	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-	/* also FPA1 */
-	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
-	switch (crtc_state->dpll.p2) {
-	case 5:
-		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
-		break;
-	case 7:
-		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
-		break;
-	case 10:
-		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
-		break;
-	case 14:
-		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
-		break;
-	}
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-	    intel_panel_use_ssc(dev_priv))
-		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-	else
-		dpll |= PLL_REF_INPUT_DREFCLK;
-
-	dpll |= DPLL_VCO_ENABLE;
-
-	crtc_state->dpll_hw_state.dpll = dpll;
-	crtc_state->dpll_hw_state.fp0 = fp;
-	crtc_state->dpll_hw_state.fp1 = fp2;
-}
-
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_atomic_state *state =
-		to_intel_atomic_state(crtc_state->uapi.state);
-	const struct intel_limit *limit;
-	int refclk = 120000;
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
-	if (!crtc_state->has_pch_encoder)
-		return 0;
-
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "using SSC reference clock of %d kHz\n",
-				    dev_priv->vbt.lvds_ssc_freq);
-			refclk = dev_priv->vbt.lvds_ssc_freq;
-		}
-
-		if (intel_is_dual_link_lvds(dev_priv)) {
-			if (refclk == 100000)
-				limit = &ilk_limits_dual_lvds_100m;
-			else
-				limit = &ilk_limits_dual_lvds;
-		} else {
-			if (refclk == 100000)
-				limit = &ilk_limits_single_lvds_100m;
-			else
-				limit = &ilk_limits_single_lvds;
-		}
-	} else {
-		limit = &ilk_limits_dac;
-	}
-
-	if (!crtc_state->clock_set &&
-	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-				refclk, NULL, &crtc_state->dpll)) {
-		drm_err(&dev_priv->drm,
-			"Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	ilk_compute_dpll(crtc, crtc_state, NULL);
-
-	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "failed to find PLL for pipe %c\n",
-			    pipe_name(crtc->pipe));
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
 					 struct intel_link_m_n *m_n)
 {
@@ -11001,29 +8564,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
 	return ret;
 }
 
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_atomic_state *state =
-		to_intel_atomic_state(crtc_state->uapi.state);
-
-	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
-	    INTEL_GEN(dev_priv) >= 11) {
-		struct intel_encoder *encoder =
-			intel_get_crtc_new_encoder(state, crtc_state);
-
-		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "failed to find PLL for pipe %c\n",
-				    pipe_name(crtc->pipe));
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
 			    struct intel_crtc_state *pipe_config)
 {
@@ -11225,16 +8765,13 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
 
 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 				     struct intel_crtc_state *pipe_config,
-				     u64 *power_domain_mask,
-				     intel_wakeref_t *wakerefs)
+				     struct intel_display_power_domain_set *power_domain_set)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum intel_display_power_domain power_domain;
 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
 	unsigned long enabled_panel_transcoders = 0;
 	enum transcoder panel_transcoder;
-	intel_wakeref_t wf;
 	u32 tmp;
 
 	if (INTEL_GEN(dev_priv) >= 11)
@@ -11305,16 +8842,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
 
-	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
-	drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
-	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (!wf)
+	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
 		return false;
 
-	wakerefs[power_domain] = wf;
-	*power_domain_mask |= BIT_ULL(power_domain);
-
 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
 
 	return tmp & PIPECONF_ENABLE;
@@ -11322,14 +8853,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 
 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
 					 struct intel_crtc_state *pipe_config,
-					 u64 *power_domain_mask,
-					 intel_wakeref_t *wakerefs)
+					 struct intel_display_power_domain_set *power_domain_set)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum intel_display_power_domain power_domain;
 	enum transcoder cpu_transcoder;
-	intel_wakeref_t wf;
 	enum port port;
 	u32 tmp;
 
@@ -11339,16 +8867,10 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
 		else
 			cpu_transcoder = TRANSCODER_DSI_C;
 
-		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-		drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
-		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-		if (!wf)
+		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
 			continue;
 
-		wakerefs[power_domain] = wf;
-		*power_domain_mask |= BIT_ULL(power_domain);
-
 		/*
 		 * The PLL needs to be enabled with a valid divider
 		 * configuration, otherwise accessing DSI registers will hang
@@ -11431,30 +8953,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 				struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
-	enum intel_display_power_domain power_domain;
-	u64 power_domain_mask;
+	struct intel_display_power_domain_set power_domain_set = { };
 	bool active;
 	u32 tmp;
 
-	pipe_config->master_transcoder = INVALID_TRANSCODER;
-
-	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (!wf)
+	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+						       POWER_DOMAIN_PIPE(crtc->pipe)))
 		return false;
 
-	wakerefs[power_domain] = wf;
-	power_domain_mask = BIT_ULL(power_domain);
-
 	pipe_config->shared_dpll = NULL;
 
-	active = hsw_get_transcoder_state(crtc, pipe_config,
-					  &power_domain_mask, wakerefs);
+	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
 
 	if (IS_GEN9_LP(dev_priv) &&
-	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
-					 &power_domain_mask, wakerefs)) {
+	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
 		drm_WARN_ON(&dev_priv->drm, active);
 		active = true;
 	}
@@ -11477,6 +8989,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 		intel_get_transcoder_timings(crtc, pipe_config);
 	}
 
+	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+		intel_vrr_get_config(crtc, pipe_config);
+
 	intel_get_pipe_src_size(crtc, pipe_config);
 
 	if (IS_HASWELL(dev_priv)) {
@@ -11518,14 +9033,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 		pipe_config->ips_linetime =
 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
 
-	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-	drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
-
-	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (wf) {
-		wakerefs[power_domain] = wf;
-		power_domain_mask |= BIT_ULL(power_domain);
-
+	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
 		if (INTEL_GEN(dev_priv) >= 9)
 			skl_get_pfit_config(pipe_config);
 		else
@@ -11559,9 +9068,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 	}
 
 out:
-	for_each_power_domain(power_domain, power_domain_mask)
-		intel_display_power_put(dev_priv,
-					power_domain, wakerefs[power_domain]);
+	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
 
 	return active;
 }
@@ -11581,1828 +9088,1232 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->uapi.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	u32 base;
-
-	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
-		base = sg_dma_address(obj->mm.pages->sgl);
-	else
-		base = intel_plane_ggtt_offset(plane_state);
-
-	return base + plane_state->color_plane[0].offset;
-}
+/* VESA 640x480x72Hz mode to set on the pipe */
+static const struct drm_display_mode load_detect_mode = {
+	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
 
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+			 struct drm_mode_fb_cmd2 *mode_cmd)
 {
-	int x = plane_state->uapi.dst.x1;
-	int y = plane_state->uapi.dst.y1;
-	u32 pos = 0;
-
-	if (x < 0) {
-		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
-		x = -x;
-	}
-	pos |= x << CURSOR_X_SHIFT;
+	struct intel_framebuffer *intel_fb;
+	int ret;
 
-	if (y < 0) {
-		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
-		y = -y;
-	}
-	pos |= y << CURSOR_Y_SHIFT;
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb)
+		return ERR_PTR(-ENOMEM);
 
-	return pos;
-}
+	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+	if (ret)
+		goto err;
 
-static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
-	const struct drm_mode_config *config =
-		&plane_state->uapi.plane->dev->mode_config;
-	int width = drm_rect_width(&plane_state->uapi.dst);
-	int height = drm_rect_height(&plane_state->uapi.dst);
+	return &intel_fb->base;
 
-	return width > 0 && width <= config->cursor_width &&
-		height > 0 && height <= config->cursor_height;
+err:
+	kfree(intel_fb);
+	return ERR_PTR(ret);
 }
 
-static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+					struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->uapi.plane->dev);
-	unsigned int rotation = plane_state->hw.rotation;
-	int src_x, src_y;
-	u32 offset;
-	int ret;
+	struct drm_plane *plane;
+	struct drm_plane_state *plane_state;
+	int ret, i;
 
-	ret = intel_plane_compute_gtt(plane_state);
+	ret = drm_atomic_add_affected_planes(state, crtc);
 	if (ret)
 		return ret;
 
-	if (!plane_state->uapi.visible)
-		return 0;
-
-	src_x = plane_state->uapi.src.x1 >> 16;
-	src_y = plane_state->uapi.src.y1 >> 16;
-
-	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
-						    plane_state, 0);
-
-	if (src_x != 0 || src_y != 0) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "Arbitrary cursor panning not supported\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Put the final coordinates back so that the src
-	 * coordinate checks will see the right values.
-	 */
-	drm_rect_translate_to(&plane_state->uapi.src,
-			      src_x << 16, src_y << 16);
+	for_each_new_plane_in_state(state, plane, plane_state, i) {
+		if (plane_state->crtc != crtc)
+			continue;
 
-	/* ILK+ do this automagically in hardware */
-	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
-		const struct drm_framebuffer *fb = plane_state->hw.fb;
-		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
-		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+		if (ret)
+			return ret;
 
-		offset += (src_h * src_w - 1) * fb->format->cpp[0];
+		drm_atomic_set_fb_for_plane(plane_state, NULL);
 	}
 
-	plane_state->color_plane[0].offset = offset;
-	plane_state->color_plane[0].x = src_x;
-	plane_state->color_plane[0].y = src_y;
-
 	return 0;
 }
 
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
-			      struct intel_plane_state *plane_state)
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+			       struct intel_load_detect_pipe *old,
+			       struct drm_modeset_acquire_ctx *ctx)
 {
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
-	const struct drm_rect src = plane_state->uapi.src;
-	const struct drm_rect dst = plane_state->uapi.dst;
-	int ret;
-
-	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
-		drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
-		return -EINVAL;
-	}
-
-	ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
-						DRM_PLANE_HELPER_NO_SCALING,
-						DRM_PLANE_HELPER_NO_SCALING,
-						true);
-	if (ret)
-		return ret;
+	struct intel_crtc *intel_crtc;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(to_intel_connector(connector));
+	struct drm_crtc *possible_crtc;
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = NULL;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_atomic_state *state = NULL, *restore_state = NULL;
+	struct drm_connector_state *connector_state;
+	struct intel_crtc_state *crtc_state;
+	int ret, i = -1;
 
-	/* Use the unclipped src/dst rectangles, which we program to hw */
-	plane_state->uapi.src = src;
-	plane_state->uapi.dst = dst;
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		    connector->base.id, connector->name,
+		    encoder->base.id, encoder->name);
 
-	ret = intel_cursor_check_surface(plane_state);
-	if (ret)
-		return ret;
+	old->restore_state = NULL;
 
-	if (!plane_state->uapi.visible)
-		return 0;
+	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
 
-	ret = intel_plane_check_src_coordinates(plane_state);
-	if (ret)
-		return ret;
+	/*
+	 * Algorithm gets a little messy:
+	 *
+	 *   - if the connector already has an assigned crtc, use it (but make
+	 *     sure it's on first)
+	 *
+	 *   - try to find the first unused crtc that can drive this connector,
+	 *     and use that if we find one
+	 */
 
-	return 0;
-}
+	/* See if we already have a CRTC for this connector */
+	if (connector->state->crtc) {
+		crtc = connector->state->crtc;
 
-static unsigned int
-i845_cursor_max_stride(struct intel_plane *plane,
-		       u32 pixel_format, u64 modifier,
-		       unsigned int rotation)
-{
-	return 2048;
-}
+		ret = drm_modeset_lock(&crtc->mutex, ctx);
+		if (ret)
+			goto fail;
 
-static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-	u32 cntl = 0;
+		/* Make sure the crtc and connector are running */
+		goto found;
+	}
 
-	if (crtc_state->gamma_enable)
-		cntl |= CURSOR_GAMMA_ENABLE;
+	/* Find an unused one (if possible) */
+	for_each_crtc(dev, possible_crtc) {
+		i++;
+		if (!(encoder->possible_crtcs & (1 << i)))
+			continue;
 
-	return cntl;
-}
+		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+		if (ret)
+			goto fail;
 
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
-			   const struct intel_plane_state *plane_state)
-{
-	return CURSOR_ENABLE |
-		CURSOR_FORMAT_ARGB |
-		CURSOR_STRIDE(plane_state->color_plane[0].stride);
-}
+		if (possible_crtc->state->enable) {
+			drm_modeset_unlock(&possible_crtc->mutex);
+			continue;
+		}
 
-static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
-	int width = drm_rect_width(&plane_state->uapi.dst);
+		crtc = possible_crtc;
+		break;
+	}
 
 	/*
-	 * 845g/865g are only limited by the width of their cursors,
-	 * the height is arbitrary up to the precision of the register.
+	 * If we didn't find an unused CRTC, don't use any.
 	 */
-	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
-}
-
-static int i845_check_cursor(struct intel_crtc_state *crtc_state,
-			     struct intel_plane_state *plane_state)
-{
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
-	int ret;
-
-	ret = intel_check_cursor(crtc_state, plane_state);
-	if (ret)
-		return ret;
+	if (!crtc) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "no pipe available for load-detect\n");
+		ret = -ENODEV;
+		goto fail;
+	}
 
-	/* if we want to turn off the cursor ignore width and height */
-	if (!fb)
-		return 0;
+found:
+	intel_crtc = to_intel_crtc(crtc);
 
-	/* Check for which cursor types we support */
-	if (!i845_cursor_size_ok(plane_state)) {
-		drm_dbg_kms(&i915->drm,
-			    "Cursor dimension %dx%d not supported\n",
-			    drm_rect_width(&plane_state->uapi.dst),
-			    drm_rect_height(&plane_state->uapi.dst));
-		return -EINVAL;
+	state = drm_atomic_state_alloc(dev);
+	restore_state = drm_atomic_state_alloc(dev);
+	if (!state || !restore_state) {
+		ret = -ENOMEM;
+		goto fail;
 	}
 
-	drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
-		    plane_state->color_plane[0].stride != fb->pitches[0]);
+	state->acquire_ctx = ctx;
+	restore_state->acquire_ctx = ctx;
 
-	switch (fb->pitches[0]) {
-	case 256:
-	case 512:
-	case 1024:
-	case 2048:
-		break;
-	default:
-		 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
-			     fb->pitches[0]);
-		return -EINVAL;
+	connector_state = drm_atomic_get_connector_state(state, connector);
+	if (IS_ERR(connector_state)) {
+		ret = PTR_ERR(connector_state);
+		goto fail;
 	}
 
-	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+	if (ret)
+		goto fail;
 
-	return 0;
-}
+	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
 
-static void i845_update_cursor(struct intel_plane *plane,
-			       const struct intel_crtc_state *crtc_state,
-			       const struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	u32 cntl = 0, base = 0, pos = 0, size = 0;
-	unsigned long irqflags;
+	crtc_state->uapi.active = true;
 
-	if (plane_state && plane_state->uapi.visible) {
-		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
-		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
+					   &load_detect_mode);
+	if (ret)
+		goto fail;
 
-		cntl = plane_state->ctl |
-			i845_cursor_ctl_crtc(crtc_state);
+	ret = intel_modeset_disable_planes(state, crtc);
+	if (ret)
+		goto fail;
 
-		size = (height << 12) | width;
+	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+	if (!ret)
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+	if (!ret)
+		ret = drm_atomic_add_affected_planes(restore_state, crtc);
+	if (ret) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Failed to create a copy of old state to restore: %i\n",
+			    ret);
+		goto fail;
+	}
 
-		base = intel_cursor_base(plane_state);
-		pos = intel_cursor_position(plane_state);
+	ret = drm_atomic_commit(state);
+	if (ret) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "failed to set mode on load-detect pipe\n");
+		goto fail;
 	}
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	old->restore_state = restore_state;
+	drm_atomic_state_put(state);
 
-	/* On these chipsets we can only modify the base/size/stride
-	 * whilst the cursor is disabled.
-	 */
-	if (plane->cursor.base != base ||
-	    plane->cursor.size != size ||
-	    plane->cursor.cntl != cntl) {
-		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
-		intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
-		intel_de_write_fw(dev_priv, CURSIZE, size);
-		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
-		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
-
-		plane->cursor.base = base;
-		plane->cursor.size = size;
-		plane->cursor.cntl = cntl;
-	} else {
-		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+	/* let the connector get through one full cycle before testing */
+	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+	return true;
+
+fail:
+	if (state) {
+		drm_atomic_state_put(state);
+		state = NULL;
+	}
+	if (restore_state) {
+		drm_atomic_state_put(restore_state);
+		restore_state = NULL;
 	}
 
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
+	if (ret == -EDEADLK)
+		return ret;
 
-static void i845_disable_cursor(struct intel_plane *plane,
-				const struct intel_crtc_state *crtc_state)
-{
-	i845_update_cursor(plane, crtc_state, NULL);
+	return false;
 }
 
-static bool i845_cursor_get_hw_state(struct intel_plane *plane,
-				     enum pipe *pipe)
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+				    struct intel_load_detect_pipe *old,
+				    struct drm_modeset_acquire_ctx *ctx)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum intel_display_power_domain power_domain;
-	intel_wakeref_t wakeref;
-	bool ret;
-
-	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (!wakeref)
-		return false;
-
-	ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(to_intel_connector(connector));
+	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_atomic_state *state = old->restore_state;
+	int ret;
 
-	*pipe = PIPE_A;
+	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		    connector->base.id, connector->name,
+		    encoder->base.id, encoder->name);
 
-	intel_display_power_put(dev_priv, power_domain, wakeref);
+	if (!state)
+		return;
 
-	return ret;
+	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+	if (ret)
+		drm_dbg_kms(&i915->drm,
+			    "Couldn't release load detect pipe: %i\n", ret);
+	drm_atomic_state_put(state);
 }
 
-static unsigned int
-i9xx_cursor_max_stride(struct intel_plane *plane,
-		       u32 pixel_format, u64 modifier,
-		       unsigned int rotation)
+static int i9xx_pll_refclk(struct drm_device *dev,
+			   const struct intel_crtc_state *pipe_config)
 {
-	return plane->base.dev->mode_config.cursor_width * 4;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+		return dev_priv->vbt.lvds_ssc_freq;
+	else if (HAS_PCH_SPLIT(dev_priv))
+		return 120000;
+	else if (!IS_GEN(dev_priv, 2))
+		return 96000;
+	else
+		return 48000;
 }
 
-static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 cntl = 0;
-
-	if (INTEL_GEN(dev_priv) >= 11)
-		return cntl;
-
-	if (crtc_state->gamma_enable)
-		cntl = MCURSOR_GAMMA_ENABLE;
-
-	if (crtc_state->csc_enable)
-		cntl |= MCURSOR_PIPE_CSC_ENABLE;
-
-	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
-
-	return cntl;
-}
-
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
-			   const struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->uapi.plane->dev);
-	u32 cntl = 0;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe = crtc->pipe;
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+	u32 fp;
+	struct dpll clock;
+	int port_clock;
+	int refclk = i9xx_pll_refclk(dev, pipe_config);
 
-	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+		fp = pipe_config->dpll_hw_state.fp0;
+	else
+		fp = pipe_config->dpll_hw_state.fp1;
 
-	switch (drm_rect_width(&plane_state->uapi.dst)) {
-	case 64:
-		cntl |= MCURSOR_MODE_64_ARGB_AX;
-		break;
-	case 128:
-		cntl |= MCURSOR_MODE_128_ARGB_AX;
-		break;
-	case 256:
-		cntl |= MCURSOR_MODE_256_ARGB_AX;
-		break;
-	default:
-		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
-		return 0;
+	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+	if (IS_PINEVIEW(dev_priv)) {
+		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	} else {
+		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
 	}
 
-	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
-		cntl |= MCURSOR_ROTATE_180;
+	if (!IS_GEN(dev_priv, 2)) {
+		if (IS_PINEVIEW(dev_priv))
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+		else
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+			       DPLL_FPA01_P1_POST_DIV_SHIFT);
 
-	return cntl;
-}
+		switch (dpll & DPLL_MODE_MASK) {
+		case DPLLB_MODE_DAC_SERIAL:
+			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+				5 : 10;
+			break;
+		case DPLLB_MODE_LVDS:
+			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+				7 : 14;
+			break;
+		default:
+			drm_dbg_kms(&dev_priv->drm,
+				    "Unknown DPLL mode %08x in programmed "
+				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
+			return;
+		}
 
-static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->uapi.plane->dev);
-	int width = drm_rect_width(&plane_state->uapi.dst);
-	int height = drm_rect_height(&plane_state->uapi.dst);
+		if (IS_PINEVIEW(dev_priv))
+			port_clock = pnv_calc_dpll_params(refclk, &clock);
+		else
+			port_clock = i9xx_calc_dpll_params(refclk, &clock);
+	} else {
+		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
+								 LVDS);
+		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
 
-	if (!intel_cursor_size_ok(plane_state))
-		return false;
+		if (is_lvds) {
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+				       DPLL_FPA01_P1_POST_DIV_SHIFT);
 
-	/* Cursor width is limited to a few power-of-two sizes */
-	switch (width) {
-	case 256:
-	case 128:
-	case 64:
-		break;
-	default:
-		return false;
+			if (lvds & LVDS_CLKB_POWER_UP)
+				clock.p2 = 7;
+			else
+				clock.p2 = 14;
+		} else {
+			if (dpll & PLL_P1_DIVIDE_BY_TWO)
+				clock.p1 = 2;
+			else {
+				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+			}
+			if (dpll & PLL_P2_DIVIDE_BY_4)
+				clock.p2 = 4;
+			else
+				clock.p2 = 2;
+		}
+
+		port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	}
 
 	/*
-	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
-	 * height from 8 lines up to the cursor width, when the
-	 * cursor is not rotated. Everything else requires square
-	 * cursors.
+	 * This value includes pixel_multiplier. We will use
+	 * port_clock to compute adjusted_mode.crtc_clock in the
+	 * encoder's get_config() function.
 	 */
-	if (HAS_CUR_FBC(dev_priv) &&
-	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
-		if (height < 8 || height > width)
-			return false;
-	} else {
-		if (height != width)
-			return false;
-	}
-
-	return true;
+	pipe_config->port_clock = port_clock;
 }
 
-static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
-			     struct intel_plane_state *plane_state)
+int intel_dotclock_calculate(int link_freq,
+			     const struct intel_link_m_n *m_n)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	enum pipe pipe = plane->pipe;
-	int ret;
-
-	ret = intel_check_cursor(crtc_state, plane_state);
-	if (ret)
-		return ret;
+	/*
+	 * The calculation for the data clock is:
+	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
+	 * But we want to avoid losing precison if possible, so:
+	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
+	 *
+	 * and the link clock is simpler:
+	 * link_clock = (m * link_clock) / n
+	 */
 
-	/* if we want to turn off the cursor ignore width and height */
-	if (!fb)
+	if (!m_n->link_n)
 		return 0;
 
-	/* Check for which cursor types we support */
-	if (!i9xx_cursor_size_ok(plane_state)) {
-		drm_dbg(&dev_priv->drm,
-			"Cursor dimension %dx%d not supported\n",
-			drm_rect_width(&plane_state->uapi.dst),
-			drm_rect_height(&plane_state->uapi.dst));
-		return -EINVAL;
-	}
+	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
+}
 
-	drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
-		    plane_state->color_plane[0].stride != fb->pitches[0]);
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+			      struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	if (fb->pitches[0] !=
-	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "Invalid cursor stride (%u) (cursor width %d)\n",
-			    fb->pitches[0],
-			    drm_rect_width(&plane_state->uapi.dst));
-		return -EINVAL;
-	}
+	/* read out port_clock from the DPLL */
+	i9xx_crtc_clock_get(crtc, pipe_config);
 
 	/*
-	 * There's something wrong with the cursor on CHV pipe C.
-	 * If it straddles the left edge of the screen then
-	 * moving it away from the edge or disabling it often
-	 * results in a pipe underrun, and often that can lead to
-	 * dead pipe (constant underrun reported, and it scans
-	 * out just a solid color). To recover from that, the
-	 * display power well must be turned off and on again.
-	 * Refuse the put the cursor into that compromised position.
+	 * In case there is an active pipe without active ports,
+	 * we may need some idea for the dotclock anyway.
+	 * Calculate one based on the FDI configuration.
 	 */
-	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
-	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "CHV cursor C not allowed to straddle the left screen edge\n");
-		return -EINVAL;
-	}
-
-	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
-
-	return 0;
+	pipe_config->hw.adjusted_mode.crtc_clock =
+		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+					 &pipe_config->fdi_m_n);
 }
 
-static void i9xx_update_cursor(struct intel_plane *plane,
-			       const struct intel_crtc_state *crtc_state,
-			       const struct intel_plane_state *plane_state)
+/* Returns the currently programmed mode of the given encoder. */
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum pipe pipe = plane->pipe;
-	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
-	unsigned long irqflags;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc_state *crtc_state;
+	struct drm_display_mode *mode;
+	struct intel_crtc *crtc;
+	enum pipe pipe;
 
-	if (plane_state && plane_state->uapi.visible) {
-		unsigned width = drm_rect_width(&plane_state->uapi.dst);
-		unsigned height = drm_rect_height(&plane_state->uapi.dst);
+	if (!encoder->get_hw_state(encoder, &pipe))
+		return NULL;
 
-		cntl = plane_state->ctl |
-			i9xx_cursor_ctl_crtc(crtc_state);
+	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
-		if (width != height)
-			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
 
-		base = intel_cursor_base(plane_state);
-		pos = intel_cursor_position(plane_state);
+	crtc_state = intel_crtc_state_alloc(crtc);
+	if (!crtc_state) {
+		kfree(mode);
+		return NULL;
 	}
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (!intel_crtc_get_pipe_config(crtc_state)) {
+		kfree(crtc_state);
+		kfree(mode);
+		return NULL;
+	}
 
-	/*
-	 * On some platforms writing CURCNTR first will also
-	 * cause CURPOS to be armed by the CURBASE write.
-	 * Without the CURCNTR write the CURPOS write would
-	 * arm itself. Thus we always update CURCNTR before
-	 * CURPOS.
-	 *
-	 * On other platforms CURPOS always requires the
-	 * CURBASE write to arm the update. Additonally
-	 * a write to any of the cursor register will cancel
-	 * an already armed cursor update. Thus leaving out
-	 * the CURBASE write after CURPOS could lead to a
-	 * cursor that doesn't appear to move, or even change
-	 * shape. Thus we always write CURBASE.
-	 *
-	 * The other registers are armed by by the CURBASE write
-	 * except when the plane is getting enabled at which time
-	 * the CURCNTR write arms the update.
-	 */
+	intel_encoder_get_config(encoder, crtc_state);
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		skl_write_cursor_wm(plane, crtc_state);
-
-	if (!needs_modeset(crtc_state))
-		intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
-
-	if (plane->cursor.base != base ||
-	    plane->cursor.size != fbc_ctl ||
-	    plane->cursor.cntl != cntl) {
-		if (HAS_CUR_FBC(dev_priv))
-			intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
-					  fbc_ctl);
-		intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
-		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
-		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
-
-		plane->cursor.base = base;
-		plane->cursor.size = fbc_ctl;
-		plane->cursor.cntl = cntl;
-	} else {
-		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
-		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
-	}
+	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
 
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
+	kfree(crtc_state);
 
-static void i9xx_disable_cursor(struct intel_plane *plane,
-				const struct intel_crtc_state *crtc_state)
-{
-	i9xx_update_cursor(plane, crtc_state, NULL);
+	return mode;
 }
 
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
-				     enum pipe *pipe)
+/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @cur: current plane state
+ * @new: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
+				 struct intel_plane_state *new)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	enum intel_display_power_domain power_domain;
-	intel_wakeref_t wakeref;
-	bool ret;
-	u32 val;
+	/* Update watermarks on tiling or size changes. */
+	if (new->uapi.visible != cur->uapi.visible)
+		return true;
 
-	/*
-	 * Not 100% correct for planes that can move between pipes,
-	 * but that's only the case for gen2-3 which don't have any
-	 * display power wells.
-	 */
-	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (!wakeref)
+	if (!cur->hw.fb || !new->hw.fb)
 		return false;
 
-	val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
-
-	ret = val & MCURSOR_MODE;
-
-	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		*pipe = plane->pipe;
-	else
-		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
-			MCURSOR_PIPE_SELECT_SHIFT;
-
-	intel_display_power_put(dev_priv, power_domain, wakeref);
+	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+	    cur->hw.rotation != new->hw.rotation ||
+	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
+		return true;
 
-	return ret;
+	return false;
 }
 
-/* VESA 640x480x72Hz mode to set on the pipe */
-static const struct drm_display_mode load_detect_mode = {
-	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
-		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-};
-
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
-			 struct drm_mode_fb_cmd2 *mode_cmd)
+static bool needs_scaling(const struct intel_plane_state *state)
 {
-	struct intel_framebuffer *intel_fb;
-	int ret;
-
-	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-	if (!intel_fb)
-		return ERR_PTR(-ENOMEM);
+	int src_w = drm_rect_width(&state->uapi.src) >> 16;
+	int src_h = drm_rect_height(&state->uapi.src) >> 16;
+	int dst_w = drm_rect_width(&state->uapi.dst);
+	int dst_h = drm_rect_height(&state->uapi.dst);
 
-	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
-	if (ret)
-		goto err;
+	return (src_w != dst_w || src_h != dst_h);
+}
 
-	return &intel_fb->base;
-
-err:
-	kfree(intel_fb);
-	return ERR_PTR(ret);
-}
-
-static int intel_modeset_disable_planes(struct drm_atomic_state *state,
-					struct drm_crtc *crtc)
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+				    struct intel_crtc_state *crtc_state,
+				    const struct intel_plane_state *old_plane_state,
+				    struct intel_plane_state *plane_state)
 {
-	struct drm_plane *plane;
-	struct drm_plane_state *plane_state;
-	int ret, i;
-
-	ret = drm_atomic_add_affected_planes(state, crtc);
-	if (ret)
-		return ret;
-
-	for_each_new_plane_in_state(state, plane, plane_state, i) {
-		if (plane_state->crtc != crtc)
-			continue;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
+	bool was_crtc_enabled = old_crtc_state->hw.active;
+	bool is_crtc_enabled = crtc_state->hw.active;
+	bool turn_off, turn_on, visible, was_visible;
+	int ret;
 
-		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+		ret = skl_update_scaler_plane(crtc_state, plane_state);
 		if (ret)
 			return ret;
-
-		drm_atomic_set_fb_for_plane(plane_state, NULL);
 	}
 
-	return 0;
-}
-
-int intel_get_load_detect_pipe(struct drm_connector *connector,
-			       struct intel_load_detect_pipe *old,
-			       struct drm_modeset_acquire_ctx *ctx)
-{
-	struct intel_crtc *intel_crtc;
-	struct intel_encoder *intel_encoder =
-		intel_attached_encoder(to_intel_connector(connector));
-	struct drm_crtc *possible_crtc;
-	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_crtc *crtc = NULL;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_atomic_state *state = NULL, *restore_state = NULL;
-	struct drm_connector_state *connector_state;
-	struct intel_crtc_state *crtc_state;
-	int ret, i = -1;
-
-	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-		    connector->base.id, connector->name,
-		    encoder->base.id, encoder->name);
-
-	old->restore_state = NULL;
+	was_visible = old_plane_state->uapi.visible;
+	visible = plane_state->uapi.visible;
 
-	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
+	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
+		was_visible = false;
 
 	/*
-	 * Algorithm gets a little messy:
-	 *
-	 *   - if the connector already has an assigned crtc, use it (but make
-	 *     sure it's on first)
+	 * Visibility is calculated as if the crtc was on, but
+	 * after scaler setup everything depends on it being off
+	 * when the crtc isn't active.
 	 *
-	 *   - try to find the first unused crtc that can drive this connector,
-	 *     and use that if we find one
+	 * FIXME this is wrong for watermarks. Watermarks should also
+	 * be computed as if the pipe would be active. Perhaps move
+	 * per-plane wm computation to the .check_plane() hook, and
+	 * only combine the results from all planes in the current place?
 	 */
+	if (!is_crtc_enabled) {
+		intel_plane_set_invisible(crtc_state, plane_state);
+		visible = false;
+	}
 
-	/* See if we already have a CRTC for this connector */
-	if (connector->state->crtc) {
-		crtc = connector->state->crtc;
+	if (!was_visible && !visible)
+		return 0;
 
-		ret = drm_modeset_lock(&crtc->mutex, ctx);
-		if (ret)
-			goto fail;
+	turn_off = was_visible && (!visible || mode_changed);
+	turn_on = visible && (!was_visible || mode_changed);
 
-		/* Make sure the crtc and connector are running */
-		goto found;
-	}
+	drm_dbg_atomic(&dev_priv->drm,
+		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+		       crtc->base.base.id, crtc->base.name,
+		       plane->base.base.id, plane->base.name,
+		       was_visible, visible,
+		       turn_off, turn_on, mode_changed);
 
-	/* Find an unused one (if possible) */
-	for_each_crtc(dev, possible_crtc) {
-		i++;
-		if (!(encoder->possible_crtcs & (1 << i)))
-			continue;
+	if (turn_on) {
+		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+			crtc_state->update_wm_pre = true;
 
-		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
-		if (ret)
-			goto fail;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->id != PLANE_CURSOR)
+			crtc_state->disable_cxsr = true;
+	} else if (turn_off) {
+		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+			crtc_state->update_wm_post = true;
 
-		if (possible_crtc->state->enable) {
-			drm_modeset_unlock(&possible_crtc->mutex);
-			continue;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->id != PLANE_CURSOR)
+			crtc_state->disable_cxsr = true;
+	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
+		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+			/* FIXME bollocks */
+			crtc_state->update_wm_pre = true;
+			crtc_state->update_wm_post = true;
 		}
-
-		crtc = possible_crtc;
-		break;
 	}
 
+	if (visible || was_visible)
+		crtc_state->fb_bits |= plane->frontbuffer_bit;
+
 	/*
-	 * If we didn't find an unused CRTC, don't use any.
+	 * ILK/SNB DVSACNTR/Sprite Enable
+	 * IVB SPR_CTL/Sprite Enable
+	 * "When in Self Refresh Big FIFO mode, a write to enable the
+	 *  plane will be internally buffered and delayed while Big FIFO
+	 *  mode is exiting."
+	 *
+	 * Which means that enabling the sprite can take an extra frame
+	 * when we start in big FIFO mode (LP1+). Thus we need to drop
+	 * down to LP0 and wait for vblank in order to make sure the
+	 * sprite gets enabled on the next vblank after the register write.
+	 * Doing otherwise would risk enabling the sprite one frame after
+	 * we've already signalled flip completion. We can resume LP1+
+	 * once the sprite has been enabled.
+	 *
+	 *
+	 * WaCxSRDisabledForSpriteScaling:ivb
+	 * IVB SPR_SCALE/Scaling Enable
+	 * "Low Power watermarks must be disabled for at least one
+	 *  frame before enabling sprite scaling, and kept disabled
+	 *  until sprite scaling is disabled."
+	 *
+	 * ILK/SNB DVSASCALE/Scaling Enable
+	 * "When in Self Refresh Big FIFO mode, scaling enable will be
+	 *  masked off while Big FIFO mode is exiting."
+	 *
+	 * Despite the w/a only being listed for IVB we assume that
+	 * the ILK/SNB note has similar ramifications, hence we apply
+	 * the w/a on all three platforms.
+	 *
+	 * With experimental results seems this is needed also for primary
+	 * plane, not only sprite plane.
 	 */
-	if (!crtc) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "no pipe available for load-detect\n");
-		ret = -ENODEV;
-		goto fail;
-	}
+	if (plane->id != PLANE_CURSOR &&
+	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
+	     IS_IVYBRIDGE(dev_priv)) &&
+	    (turn_on || (!needs_scaling(old_plane_state) &&
+			 needs_scaling(plane_state))))
+		crtc_state->disable_lp_wm = true;
 
-found:
-	intel_crtc = to_intel_crtc(crtc);
+	return 0;
+}
 
-	state = drm_atomic_state_alloc(dev);
-	restore_state = drm_atomic_state_alloc(dev);
-	if (!state || !restore_state) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+static bool encoders_cloneable(const struct intel_encoder *a,
+			       const struct intel_encoder *b)
+{
+	/* masks could be asymmetric, so check both ways */
+	return a == b || (a->cloneable & (1 << b->type) &&
+			  b->cloneable & (1 << a->type));
+}
 
-	state->acquire_ctx = ctx;
-	restore_state->acquire_ctx = ctx;
+static bool check_single_encoder_cloning(struct intel_atomic_state *state,
+					 struct intel_crtc *crtc,
+					 struct intel_encoder *encoder)
+{
+	struct intel_encoder *source_encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
 
-	connector_state = drm_atomic_get_connector_state(state, connector);
-	if (IS_ERR(connector_state)) {
-		ret = PTR_ERR(connector_state);
-		goto fail;
+	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		source_encoder =
+			to_intel_encoder(connector_state->best_encoder);
+		if (!encoders_cloneable(encoder, source_encoder))
+			return false;
 	}
 
-	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
-	if (ret)
-		goto fail;
+	return true;
+}
 
-	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-	if (IS_ERR(crtc_state)) {
-		ret = PTR_ERR(crtc_state);
-		goto fail;
-	}
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+	struct intel_plane *plane, *linked;
+	struct intel_plane_state *plane_state, *linked_plane_state;
+	int i;
 
-	crtc_state->uapi.active = true;
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		linked = plane_state->planar_linked_plane;
 
-	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
-					   &load_detect_mode);
-	if (ret)
-		goto fail;
+		if (!linked)
+			continue;
 
-	ret = intel_modeset_disable_planes(state, crtc);
-	if (ret)
-		goto fail;
+		linked_plane_state = intel_atomic_get_plane_state(state, linked);
+		if (IS_ERR(linked_plane_state))
+			return PTR_ERR(linked_plane_state);
 
-	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
-	if (!ret)
-		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
-	if (!ret)
-		ret = drm_atomic_add_affected_planes(restore_state, crtc);
-	if (ret) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "Failed to create a copy of old state to restore: %i\n",
-			    ret);
-		goto fail;
-	}
-
-	ret = drm_atomic_commit(state);
-	if (ret) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "failed to set mode on load-detect pipe\n");
-		goto fail;
-	}
-
-	old->restore_state = restore_state;
-	drm_atomic_state_put(state);
-
-	/* let the connector get through one full cycle before testing */
-	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-	return true;
-
-fail:
-	if (state) {
-		drm_atomic_state_put(state);
-		state = NULL;
-	}
-	if (restore_state) {
-		drm_atomic_state_put(restore_state);
-		restore_state = NULL;
+		drm_WARN_ON(state->base.dev,
+			    linked_plane_state->planar_linked_plane != plane);
+		drm_WARN_ON(state->base.dev,
+			    linked_plane_state->planar_slave == plane_state->planar_slave);
 	}
 
-	if (ret == -EDEADLK)
-		return ret;
-
-	return false;
+	return 0;
 }
 
-void intel_release_load_detect_pipe(struct drm_connector *connector,
-				    struct intel_load_detect_pipe *old,
-				    struct drm_modeset_acquire_ctx *ctx)
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 {
-	struct intel_encoder *intel_encoder =
-		intel_attached_encoder(to_intel_connector(connector));
-	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
-	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_atomic_state *state = old->restore_state;
-	int ret;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+	struct intel_plane *plane, *linked;
+	struct intel_plane_state *plane_state;
+	int i;
 
-	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-		    connector->base.id, connector->name,
-		    encoder->base.id, encoder->name);
+	if (INTEL_GEN(dev_priv) < 11)
+		return 0;
 
-	if (!state)
-		return;
+	/*
+	 * Destroy all old plane links and make the slave plane invisible
+	 * in the crtc_state->active_planes mask.
+	 */
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
+			continue;
 
-	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
-	if (ret)
-		drm_dbg_kms(&i915->drm,
-			    "Couldn't release load detect pipe: %i\n", ret);
-	drm_atomic_state_put(state);
-}
+		plane_state->planar_linked_plane = NULL;
+		if (plane_state->planar_slave && !plane_state->uapi.visible) {
+			crtc_state->enabled_planes &= ~BIT(plane->id);
+			crtc_state->active_planes &= ~BIT(plane->id);
+			crtc_state->update_planes |= BIT(plane->id);
+		}
 
-static int i9xx_pll_refclk(struct drm_device *dev,
-			   const struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	u32 dpll = pipe_config->dpll_hw_state.dpll;
+		plane_state->planar_slave = false;
+	}
 
-	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
-		return dev_priv->vbt.lvds_ssc_freq;
-	else if (HAS_PCH_SPLIT(dev_priv))
-		return 120000;
-	else if (!IS_GEN(dev_priv, 2))
-		return 96000;
-	else
-		return 48000;
-}
+	if (!crtc_state->nv12_planes)
+		return 0;
 
-/* Returns the clock of the currently programmed mode of the given pipe. */
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
-				struct intel_crtc_state *pipe_config)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = crtc->pipe;
-	u32 dpll = pipe_config->dpll_hw_state.dpll;
-	u32 fp;
-	struct dpll clock;
-	int port_clock;
-	int refclk = i9xx_pll_refclk(dev, pipe_config);
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		struct intel_plane_state *linked_state = NULL;
 
-	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-		fp = pipe_config->dpll_hw_state.fp0;
-	else
-		fp = pipe_config->dpll_hw_state.fp1;
+		if (plane->pipe != crtc->pipe ||
+		    !(crtc_state->nv12_planes & BIT(plane->id)))
+			continue;
 
-	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-	if (IS_PINEVIEW(dev_priv)) {
-		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
-		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
-	} else {
-		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
-		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
-	}
+		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
+				continue;
 
-	if (!IS_GEN(dev_priv, 2)) {
-		if (IS_PINEVIEW(dev_priv))
-			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
-				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
-		else
-			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
-			       DPLL_FPA01_P1_POST_DIV_SHIFT);
+			if (crtc_state->active_planes & BIT(linked->id))
+				continue;
+
+			linked_state = intel_atomic_get_plane_state(state, linked);
+			if (IS_ERR(linked_state))
+				return PTR_ERR(linked_state);
 
-		switch (dpll & DPLL_MODE_MASK) {
-		case DPLLB_MODE_DAC_SERIAL:
-			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
-				5 : 10;
-			break;
-		case DPLLB_MODE_LVDS:
-			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
-				7 : 14;
 			break;
-		default:
+		}
+
+		if (!linked_state) {
 			drm_dbg_kms(&dev_priv->drm,
-				    "Unknown DPLL mode %08x in programmed "
-				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
-			return;
+				    "Need %d free Y planes for planar YUV\n",
+				    hweight8(crtc_state->nv12_planes));
+
+			return -EINVAL;
 		}
 
-		if (IS_PINEVIEW(dev_priv))
-			port_clock = pnv_calc_dpll_params(refclk, &clock);
-		else
-			port_clock = i9xx_calc_dpll_params(refclk, &clock);
-	} else {
-		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
-								 LVDS);
-		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
+		plane_state->planar_linked_plane = linked;
 
-		if (is_lvds) {
-			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
-				       DPLL_FPA01_P1_POST_DIV_SHIFT);
+		linked_state->planar_slave = true;
+		linked_state->planar_linked_plane = plane;
+		crtc_state->enabled_planes |= BIT(linked->id);
+		crtc_state->active_planes |= BIT(linked->id);
+		crtc_state->update_planes |= BIT(linked->id);
+		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
+			    linked->base.name, plane->base.name);
 
-			if (lvds & LVDS_CLKB_POWER_UP)
-				clock.p2 = 7;
-			else
-				clock.p2 = 14;
-		} else {
-			if (dpll & PLL_P1_DIVIDE_BY_TWO)
-				clock.p1 = 2;
-			else {
-				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
-					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
-			}
-			if (dpll & PLL_P2_DIVIDE_BY_4)
-				clock.p2 = 4;
+		/* Copy parameters to slave plane */
+		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
+		linked_state->color_ctl = plane_state->color_ctl;
+		linked_state->view = plane_state->view;
+		memcpy(linked_state->color_plane, plane_state->color_plane,
+		       sizeof(linked_state->color_plane));
+
+		intel_plane_copy_hw_state(linked_state, plane_state);
+		linked_state->uapi.src = plane_state->uapi.src;
+		linked_state->uapi.dst = plane_state->uapi.dst;
+
+		if (icl_is_hdr_plane(dev_priv, plane->id)) {
+			if (linked->id == PLANE_SPRITE5)
+				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+			else if (linked->id == PLANE_SPRITE4)
+				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+			else if (linked->id == PLANE_SPRITE3)
+				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
+			else if (linked->id == PLANE_SPRITE2)
+				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
 			else
-				clock.p2 = 2;
+				MISSING_CASE(linked->id);
 		}
-
-		port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	}
 
-	/*
-	 * This value includes pixel_multiplier. We will use
-	 * port_clock to compute adjusted_mode.crtc_clock in the
-	 * encoder's get_config() function.
-	 */
-	pipe_config->port_clock = port_clock;
+	return 0;
 }
 
-int intel_dotclock_calculate(int link_freq,
-			     const struct intel_link_m_n *m_n)
+static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
 {
-	/*
-	 * The calculation for the data clock is:
-	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
-	 * But we want to avoid losing precison if possible, so:
-	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
-	 *
-	 * and the link clock is simpler:
-	 * link_clock = (m * link_clock) / n
-	 */
-
-	if (!m_n->link_n)
-		return 0;
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(new_crtc_state->uapi.state);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
 
-	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
+	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
 }
 
-static void ilk_pch_clock_get(struct intel_crtc *crtc,
-			      struct intel_crtc_state *pipe_config)
+static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_display_mode *pipe_mode =
+		&crtc_state->hw.pipe_mode;
+	int linetime_wm;
 
-	/* read out port_clock from the DPLL */
-	i9xx_crtc_clock_get(crtc, pipe_config);
+	if (!crtc_state->hw.enable)
+		return 0;
 
-	/*
-	 * In case there is an active pipe without active ports,
-	 * we may need some idea for the dotclock anyway.
-	 * Calculate one based on the FDI configuration.
-	 */
-	pipe_config->hw.adjusted_mode.crtc_clock =
-		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
-					 &pipe_config->fdi_m_n);
-}
-
-static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
-				   struct intel_crtc *crtc)
-{
-	memset(crtc_state, 0, sizeof(*crtc_state));
-
-	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
+					pipe_mode->crtc_clock);
 
-	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
-	crtc_state->master_transcoder = INVALID_TRANSCODER;
-	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
-	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
-	crtc_state->scaler_state.scaler_id = -1;
-	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+	return min(linetime_wm, 0x1ff);
 }
 
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
+			       const struct intel_cdclk_state *cdclk_state)
 {
-	struct intel_crtc_state *crtc_state;
+	const struct drm_display_mode *pipe_mode =
+		&crtc_state->hw.pipe_mode;
+	int linetime_wm;
 
-	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+	if (!crtc_state->hw.enable)
+		return 0;
 
-	if (crtc_state)
-		intel_crtc_state_reset(crtc_state, crtc);
+	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
+					cdclk_state->logical.cdclk);
 
-	return crtc_state;
+	return min(linetime_wm, 0x1ff);
 }
 
-/* Returns the currently programmed mode of the given encoder. */
-struct drm_display_mode *
-intel_encoder_current_mode(struct intel_encoder *encoder)
+static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc_state *crtc_state;
-	struct drm_display_mode *mode;
-	struct intel_crtc *crtc;
-	enum pipe pipe;
-
-	if (!encoder->get_hw_state(encoder, &pipe))
-		return NULL;
-
-	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
-	if (!mode)
-		return NULL;
-
-	crtc_state = intel_crtc_state_alloc(crtc);
-	if (!crtc_state) {
-		kfree(mode);
-		return NULL;
-	}
-
-	if (!intel_crtc_get_pipe_config(crtc_state)) {
-		kfree(crtc_state);
-		kfree(mode);
-		return NULL;
-	}
-
-	intel_encoder_get_config(encoder, crtc_state);
-
-	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_display_mode *pipe_mode =
+		&crtc_state->hw.pipe_mode;
+	int linetime_wm;
 
-	kfree(crtc_state);
+	if (!crtc_state->hw.enable)
+		return 0;
 
-	return mode;
-}
+	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
+				   crtc_state->pixel_rate);
 
-static void intel_crtc_destroy(struct drm_crtc *crtc)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	/* Display WA #1135: BXT:ALL GLK:ALL */
+	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+		linetime_wm /= 2;
 
-	drm_crtc_cleanup(crtc);
-	kfree(intel_crtc);
+	return min(linetime_wm, 0x1ff);
 }
 
-/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @cur: current plane state
- * @new: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-static bool intel_wm_need_update(const struct intel_plane_state *cur,
-				 struct intel_plane_state *new)
+static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc)
 {
-	/* Update watermarks on tiling or size changes. */
-	if (new->uapi.visible != cur->uapi.visible)
-		return true;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct intel_cdclk_state *cdclk_state;
 
-	if (!cur->hw.fb || !new->hw.fb)
-		return false;
+	if (INTEL_GEN(dev_priv) >= 9)
+		crtc_state->linetime = skl_linetime_wm(crtc_state);
+	else
+		crtc_state->linetime = hsw_linetime_wm(crtc_state);
 
-	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
-	    cur->hw.rotation != new->hw.rotation ||
-	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
-	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
-	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
-	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
-		return true;
+	if (!hsw_crtc_supports_ips(crtc))
+		return 0;
 
-	return false;
-}
+	cdclk_state = intel_atomic_get_cdclk_state(state);
+	if (IS_ERR(cdclk_state))
+		return PTR_ERR(cdclk_state);
 
-static bool needs_scaling(const struct intel_plane_state *state)
-{
-	int src_w = drm_rect_width(&state->uapi.src) >> 16;
-	int src_h = drm_rect_height(&state->uapi.src) >> 16;
-	int dst_w = drm_rect_width(&state->uapi.dst);
-	int dst_h = drm_rect_height(&state->uapi.dst);
+	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
+						       cdclk_state);
 
-	return (src_w != dst_w || src_h != dst_h);
+	return 0;
 }
 
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
-				    struct intel_crtc_state *crtc_state,
-				    const struct intel_plane_state *old_plane_state,
-				    struct intel_plane_state *plane_state)
+static int intel_crtc_atomic_check(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	bool mode_changed = needs_modeset(crtc_state);
-	bool was_crtc_enabled = old_crtc_state->hw.active;
-	bool is_crtc_enabled = crtc_state->hw.active;
-	bool turn_off, turn_on, visible, was_visible;
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
 	int ret;
 
-	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
-		ret = skl_update_scaler_plane(crtc_state, plane_state);
+	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+	    mode_changed && !crtc_state->hw.active)
+		crtc_state->update_wm_post = true;
+
+	if (mode_changed && crtc_state->hw.enable &&
+	    dev_priv->display.crtc_compute_clock &&
+	    !crtc_state->bigjoiner_slave &&
+	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
+		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
 		if (ret)
 			return ret;
 	}
 
-	was_visible = old_plane_state->uapi.visible;
-	visible = plane_state->uapi.visible;
-
-	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
-		was_visible = false;
-
 	/*
-	 * Visibility is calculated as if the crtc was on, but
-	 * after scaler setup everything depends on it being off
-	 * when the crtc isn't active.
-	 *
-	 * FIXME this is wrong for watermarks. Watermarks should also
-	 * be computed as if the pipe would be active. Perhaps move
-	 * per-plane wm computation to the .check_plane() hook, and
-	 * only combine the results from all planes in the current place?
+	 * May need to update pipe gamma enable bits
+	 * when C8 planes are getting enabled/disabled.
 	 */
-	if (!is_crtc_enabled) {
-		intel_plane_set_invisible(crtc_state, plane_state);
-		visible = false;
+	if (c8_planes_changed(crtc_state))
+		crtc_state->uapi.color_mgmt_changed = true;
+
+	if (mode_changed || crtc_state->update_pipe ||
+	    crtc_state->uapi.color_mgmt_changed) {
+		ret = intel_color_check(crtc_state);
+		if (ret)
+			return ret;
 	}
 
-	if (!was_visible && !visible)
-		return 0;
+	if (dev_priv->display.compute_pipe_wm) {
+		ret = dev_priv->display.compute_pipe_wm(crtc_state);
+		if (ret) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "Target pipe watermarks are invalid\n");
+			return ret;
+		}
+	}
 
-	turn_off = was_visible && (!visible || mode_changed);
-	turn_on = visible && (!was_visible || mode_changed);
+	if (dev_priv->display.compute_intermediate_wm) {
+		if (drm_WARN_ON(&dev_priv->drm,
+				!dev_priv->display.compute_pipe_wm))
+			return 0;
 
-	drm_dbg_atomic(&dev_priv->drm,
-		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
-		       crtc->base.base.id, crtc->base.name,
-		       plane->base.base.id, plane->base.name,
-		       was_visible, visible,
-		       turn_off, turn_on, mode_changed);
+		/*
+		 * Calculate 'intermediate' watermarks that satisfy both the
+		 * old state and the new state.  We can program these
+		 * immediately.
+		 */
+		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
+		if (ret) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "No valid intermediate pipe watermarks are possible\n");
+			return ret;
+		}
+	}
 
-	if (turn_on) {
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-			crtc_state->update_wm_pre = true;
+	if (INTEL_GEN(dev_priv) >= 9) {
+		if (mode_changed || crtc_state->update_pipe) {
+			ret = skl_update_scaler_crtc(crtc_state);
+			if (ret)
+				return ret;
+		}
 
-		/* must disable cxsr around plane enable/disable */
-		if (plane->id != PLANE_CURSOR)
-			crtc_state->disable_cxsr = true;
-	} else if (turn_off) {
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-			crtc_state->update_wm_post = true;
+		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+		if (ret)
+			return ret;
+	}
 
-		/* must disable cxsr around plane enable/disable */
-		if (plane->id != PLANE_CURSOR)
-			crtc_state->disable_cxsr = true;
-	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
-			/* FIXME bollocks */
-			crtc_state->update_wm_pre = true;
-			crtc_state->update_wm_post = true;
-		}
+	if (HAS_IPS(dev_priv)) {
+		ret = hsw_compute_ips_config(crtc_state);
+		if (ret)
+			return ret;
 	}
 
-	if (visible || was_visible)
-		crtc_state->fb_bits |= plane->frontbuffer_bit;
+	if (INTEL_GEN(dev_priv) >= 9 ||
+	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+		ret = hsw_compute_linetime_wm(state, crtc);
+		if (ret)
+			return ret;
 
-	/*
-	 * ILK/SNB DVSACNTR/Sprite Enable
-	 * IVB SPR_CTL/Sprite Enable
-	 * "When in Self Refresh Big FIFO mode, a write to enable the
-	 *  plane will be internally buffered and delayed while Big FIFO
-	 *  mode is exiting."
-	 *
-	 * Which means that enabling the sprite can take an extra frame
-	 * when we start in big FIFO mode (LP1+). Thus we need to drop
-	 * down to LP0 and wait for vblank in order to make sure the
-	 * sprite gets enabled on the next vblank after the register write.
-	 * Doing otherwise would risk enabling the sprite one frame after
-	 * we've already signalled flip completion. We can resume LP1+
-	 * once the sprite has been enabled.
-	 *
-	 *
-	 * WaCxSRDisabledForSpriteScaling:ivb
-	 * IVB SPR_SCALE/Scaling Enable
-	 * "Low Power watermarks must be disabled for at least one
-	 *  frame before enabling sprite scaling, and kept disabled
-	 *  until sprite scaling is disabled."
-	 *
-	 * ILK/SNB DVSASCALE/Scaling Enable
-	 * "When in Self Refresh Big FIFO mode, scaling enable will be
-	 *  masked off while Big FIFO mode is exiting."
-	 *
-	 * Despite the w/a only being listed for IVB we assume that
-	 * the ILK/SNB note has similar ramifications, hence we apply
-	 * the w/a on all three platforms.
-	 *
-	 * With experimental results seems this is needed also for primary
-	 * plane, not only sprite plane.
-	 */
-	if (plane->id != PLANE_CURSOR &&
-	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
-	     IS_IVYBRIDGE(dev_priv)) &&
-	    (turn_on || (!needs_scaling(old_plane_state) &&
-			 needs_scaling(plane_state))))
-		crtc_state->disable_lp_wm = true;
+	}
+
+	if (!mode_changed) {
+		ret = intel_psr2_sel_fetch_update(state, crtc);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
 
-static bool encoders_cloneable(const struct intel_encoder *a,
-			       const struct intel_encoder *b)
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 {
-	/* masks could be asymmetric, so check both ways */
-	return a == b || (a->cloneable & (1 << b->type) &&
-			  b->cloneable & (1 << a->type));
-}
+	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 
-static bool check_single_encoder_cloning(struct intel_atomic_state *state,
-					 struct intel_crtc *crtc,
-					 struct intel_encoder *encoder)
-{
-	struct intel_encoder *source_encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		if (connector->base.state->crtc)
+			drm_connector_put(&connector->base);
 
-	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
+		if (connector->base.encoder) {
+			connector->base.state->best_encoder =
+				connector->base.encoder;
+			connector->base.state->crtc =
+				connector->base.encoder->crtc;
 
-		source_encoder =
-			to_intel_encoder(connector_state->best_encoder);
-		if (!encoders_cloneable(encoder, source_encoder))
-			return false;
+			drm_connector_get(&connector->base);
+		} else {
+			connector->base.state->best_encoder = NULL;
+			connector->base.state->crtc = NULL;
+		}
 	}
-
-	return true;
+	drm_connector_list_iter_end(&conn_iter);
 }
 
-static int icl_add_linked_planes(struct intel_atomic_state *state)
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+		      struct intel_crtc_state *pipe_config)
 {
-	struct intel_plane *plane, *linked;
-	struct intel_plane_state *plane_state, *linked_plane_state;
-	int i;
-
-	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-		linked = plane_state->planar_linked_plane;
+	struct drm_connector *connector = conn_state->connector;
+	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+	const struct drm_display_info *info = &connector->display_info;
+	int bpp;
 
-		if (!linked)
-			continue;
+	switch (conn_state->max_bpc) {
+	case 6 ... 7:
+		bpp = 6 * 3;
+		break;
+	case 8 ... 9:
+		bpp = 8 * 3;
+		break;
+	case 10 ... 11:
+		bpp = 10 * 3;
+		break;
+	case 12 ... 16:
+		bpp = 12 * 3;
+		break;
+	default:
+		MISSING_CASE(conn_state->max_bpc);
+		return -EINVAL;
+	}
 
-		linked_plane_state = intel_atomic_get_plane_state(state, linked);
-		if (IS_ERR(linked_plane_state))
-			return PTR_ERR(linked_plane_state);
+	if (bpp < pipe_config->pipe_bpp) {
+		drm_dbg_kms(&i915->drm,
+			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+			    connector->base.id, connector->name,
+			    bpp, 3 * info->bpc,
+			    3 * conn_state->max_requested_bpc,
+			    pipe_config->pipe_bpp);
 
-		drm_WARN_ON(state->base.dev,
-			    linked_plane_state->planar_linked_plane != plane);
-		drm_WARN_ON(state->base.dev,
-			    linked_plane_state->planar_slave == plane_state->planar_slave);
+		pipe_config->pipe_bpp = bpp;
 	}
 
 	return 0;
 }
 
-static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+static int
+compute_baseline_pipe_bpp(struct intel_crtc *crtc,
+			  struct intel_crtc_state *pipe_config)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
-	struct intel_plane *plane, *linked;
-	struct intel_plane_state *plane_state;
-	int i;
-
-	if (INTEL_GEN(dev_priv) < 11)
-		return 0;
+	struct drm_atomic_state *state = pipe_config->uapi.state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int bpp, i;
 
-	/*
-	 * Destroy all old plane links and make the slave plane invisible
-	 * in the crtc_state->active_planes mask.
-	 */
-	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
-			continue;
+	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+	    IS_CHERRYVIEW(dev_priv)))
+		bpp = 10*3;
+	else if (INTEL_GEN(dev_priv) >= 5)
+		bpp = 12*3;
+	else
+		bpp = 8*3;
 
-		plane_state->planar_linked_plane = NULL;
-		if (plane_state->planar_slave && !plane_state->uapi.visible) {
-			crtc_state->active_planes &= ~BIT(plane->id);
-			crtc_state->update_planes |= BIT(plane->id);
-		}
+	pipe_config->pipe_bpp = bpp;
 
-		plane_state->planar_slave = false;
-	}
+	/* Clamp display bpp to connector max bpp */
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		int ret;
 
-	if (!crtc_state->nv12_planes)
-		return 0;
+		if (connector_state->crtc != &crtc->base)
+			continue;
 
-	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-		struct intel_plane_state *linked_state = NULL;
+		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+		if (ret)
+			return ret;
+	}
 
-		if (plane->pipe != crtc->pipe ||
-		    !(crtc_state->nv12_planes & BIT(plane->id)))
-			continue;
+	return 0;
+}
 
-		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
-			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
-				continue;
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+				    const struct drm_display_mode *mode)
+{
+	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+		    "type: 0x%x flags: 0x%x\n",
+		    mode->crtc_clock,
+		    mode->crtc_hdisplay, mode->crtc_hsync_start,
+		    mode->crtc_hsync_end, mode->crtc_htotal,
+		    mode->crtc_vdisplay, mode->crtc_vsync_start,
+		    mode->crtc_vsync_end, mode->crtc_vtotal,
+		    mode->type, mode->flags);
+}
 
-			if (crtc_state->active_planes & BIT(linked->id))
-				continue;
+static void
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+		      const char *id, unsigned int lane_count,
+		      const struct intel_link_m_n *m_n)
+{
+	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
 
-			linked_state = intel_atomic_get_plane_state(state, linked);
-			if (IS_ERR(linked_state))
-				return PTR_ERR(linked_state);
+	drm_dbg_kms(&i915->drm,
+		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		    id, lane_count,
+		    m_n->gmch_m, m_n->gmch_n,
+		    m_n->link_m, m_n->link_n, m_n->tu);
+}
 
-			break;
-		}
+static void
+intel_dump_infoframe(struct drm_i915_private *dev_priv,
+		     const union hdmi_infoframe *frame)
+{
+	if (!drm_debug_enabled(DRM_UT_KMS))
+		return;
 
-		if (!linked_state) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "Need %d free Y planes for planar YUV\n",
-				    hweight8(crtc_state->nv12_planes));
+	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+}
 
-			return -EINVAL;
-		}
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
+		      const struct drm_dp_vsc_sdp *vsc)
+{
+	if (!drm_debug_enabled(DRM_UT_KMS))
+		return;
 
-		plane_state->planar_linked_plane = linked;
+	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
+}
 
-		linked_state->planar_slave = true;
-		linked_state->planar_linked_plane = plane;
-		crtc_state->active_planes |= BIT(linked->id);
-		crtc_state->update_planes |= BIT(linked->id);
-		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
-			    linked->base.name, plane->base.name);
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
 
-		/* Copy parameters to slave plane */
-		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
-		linked_state->color_ctl = plane_state->color_ctl;
-		linked_state->view = plane_state->view;
-		memcpy(linked_state->color_plane, plane_state->color_plane,
-		       sizeof(linked_state->color_plane));
-
-		intel_plane_copy_hw_state(linked_state, plane_state);
-		linked_state->uapi.src = plane_state->uapi.src;
-		linked_state->uapi.dst = plane_state->uapi.dst;
-
-		if (icl_is_hdr_plane(dev_priv, plane->id)) {
-			if (linked->id == PLANE_SPRITE5)
-				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
-			else if (linked->id == PLANE_SPRITE4)
-				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
-			else if (linked->id == PLANE_SPRITE3)
-				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
-			else if (linked->id == PLANE_SPRITE2)
-				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
-			else
-				MISSING_CASE(linked->id);
-		}
-	}
+static const char * const output_type_str[] = {
+	OUTPUT_TYPE(UNUSED),
+	OUTPUT_TYPE(ANALOG),
+	OUTPUT_TYPE(DVO),
+	OUTPUT_TYPE(SDVO),
+	OUTPUT_TYPE(LVDS),
+	OUTPUT_TYPE(TVOUT),
+	OUTPUT_TYPE(HDMI),
+	OUTPUT_TYPE(DP),
+	OUTPUT_TYPE(EDP),
+	OUTPUT_TYPE(DSI),
+	OUTPUT_TYPE(DDI),
+	OUTPUT_TYPE(DP_MST),
+};
 
-	return 0;
-}
+#undef OUTPUT_TYPE
 
-static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
+static void snprintf_output_types(char *buf, size_t len,
+				  unsigned int output_types)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
-	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_crtc_state->uapi.state);
-	const struct intel_crtc_state *old_crtc_state =
-		intel_atomic_get_old_crtc_state(state, crtc);
-
-	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
-}
+	char *str = buf;
+	int i;
 
-static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
-	const struct drm_display_mode *pipe_mode =
-		&crtc_state->hw.pipe_mode;
-	int linetime_wm;
+	str[0] = '\0';
 
-	if (!crtc_state->hw.enable)
-		return 0;
+	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+		int r;
 
-	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
-					pipe_mode->crtc_clock);
+		if ((output_types & BIT(i)) == 0)
+			continue;
 
-	return min(linetime_wm, 0x1ff);
-}
+		r = snprintf(str, len, "%s%s",
+			     str != buf ? "," : "", output_type_str[i]);
+		if (r >= len)
+			break;
+		str += r;
+		len -= r;
 
-static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
-			       const struct intel_cdclk_state *cdclk_state)
-{
-	const struct drm_display_mode *pipe_mode =
-		&crtc_state->hw.pipe_mode;
-	int linetime_wm;
+		output_types &= ~BIT(i);
+	}
 
-	if (!crtc_state->hw.enable)
-		return 0;
+	WARN_ON_ONCE(output_types != 0);
+}
 
-	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
-					cdclk_state->logical.cdclk);
+static const char * const output_format_str[] = {
+	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
 
-	return min(linetime_wm, 0x1ff);
+static const char *output_formats(enum intel_output_format format)
+{
+	if (format >= ARRAY_SIZE(output_format_str))
+		format = INTEL_OUTPUT_FORMAT_INVALID;
+	return output_format_str[format];
 }
 
-static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct drm_display_mode *pipe_mode =
-		&crtc_state->hw.pipe_mode;
-	int linetime_wm;
-
-	if (!crtc_state->hw.enable)
-		return 0;
-
-	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
-				   crtc_state->pixel_rate);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_format_name_buf format_name;
 
-	/* Display WA #1135: BXT:ALL GLK:ALL */
-	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
-		linetime_wm /= 2;
+	if (!fb) {
+		drm_dbg_kms(&i915->drm,
+			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+			    plane->base.base.id, plane->base.name,
+			    yesno(plane_state->uapi.visible));
+		return;
+	}
 
-	return min(linetime_wm, 0x1ff);
+	drm_dbg_kms(&i915->drm,
+		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
+		    plane->base.base.id, plane->base.name,
+		    fb->base.id, fb->width, fb->height,
+		    drm_get_format_name(fb->format->format, &format_name),
+		    fb->modifier, yesno(plane_state->uapi.visible));
+	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+		    plane_state->hw.rotation, plane_state->scaler_id);
+	if (plane_state->uapi.visible)
+		drm_dbg_kms(&i915->drm,
+			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
+			    DRM_RECT_ARG(&plane_state->uapi.dst));
 }
 
-static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc)
+static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
+				   struct intel_atomic_state *state,
+				   const char *context)
 {
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_cdclk_state *cdclk_state;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		crtc_state->linetime = skl_linetime_wm(crtc_state);
-	else
-		crtc_state->linetime = hsw_linetime_wm(crtc_state);
-
-	if (!hsw_crtc_supports_ips(crtc))
-		return 0;
+	const struct intel_plane_state *plane_state;
+	struct intel_plane *plane;
+	char buf[64];
+	int i;
 
-	cdclk_state = intel_atomic_get_cdclk_state(state);
-	if (IS_ERR(cdclk_state))
-		return PTR_ERR(cdclk_state);
+	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
+		    crtc->base.base.id, crtc->base.name,
+		    yesno(pipe_config->hw.enable), context);
 
-	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
-						       cdclk_state);
+	if (!pipe_config->hw.enable)
+		goto dump_planes;
 
-	return 0;
-}
+	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+	drm_dbg_kms(&dev_priv->drm,
+		    "active: %s, output_types: %s (0x%x), output format: %s\n",
+		    yesno(pipe_config->hw.active),
+		    buf, pipe_config->output_types,
+		    output_formats(pipe_config->output_format));
 
-static int intel_crtc_atomic_check(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	bool mode_changed = needs_modeset(crtc_state);
-	int ret;
+	drm_dbg_kms(&dev_priv->drm,
+		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+		    transcoder_name(pipe_config->cpu_transcoder),
+		    pipe_config->pipe_bpp, pipe_config->dither);
 
-	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
-	    mode_changed && !crtc_state->hw.active)
-		crtc_state->update_wm_post = true;
+	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
+		    transcoder_name(pipe_config->mst_master_transcoder));
 
-	if (mode_changed && crtc_state->hw.enable &&
-	    dev_priv->display.crtc_compute_clock &&
-	    !crtc_state->bigjoiner_slave &&
-	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
-		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
-		if (ret)
-			return ret;
-	}
+	drm_dbg_kms(&dev_priv->drm,
+		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+		    transcoder_name(pipe_config->master_transcoder),
+		    pipe_config->sync_mode_slaves_mask);
 
-	/*
-	 * May need to update pipe gamma enable bits
-	 * when C8 planes are getting enabled/disabled.
-	 */
-	if (c8_planes_changed(crtc_state))
-		crtc_state->uapi.color_mgmt_changed = true;
+	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
+		    pipe_config->bigjoiner_slave ? "slave" :
+		    pipe_config->bigjoiner ? "master" : "no");
 
-	if (mode_changed || crtc_state->update_pipe ||
-	    crtc_state->uapi.color_mgmt_changed) {
-		ret = intel_color_check(crtc_state);
-		if (ret)
-			return ret;
-	}
+	if (pipe_config->has_pch_encoder)
+		intel_dump_m_n_config(pipe_config, "fdi",
+				      pipe_config->fdi_lanes,
+				      &pipe_config->fdi_m_n);
 
-	if (dev_priv->display.compute_pipe_wm) {
-		ret = dev_priv->display.compute_pipe_wm(crtc_state);
-		if (ret) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "Target pipe watermarks are invalid\n");
-			return ret;
-		}
+	if (intel_crtc_has_dp_encoder(pipe_config)) {
+		intel_dump_m_n_config(pipe_config, "dp m_n",
+				pipe_config->lane_count, &pipe_config->dp_m_n);
+		if (pipe_config->has_drrs)
+			intel_dump_m_n_config(pipe_config, "dp m2_n2",
+					      pipe_config->lane_count,
+					      &pipe_config->dp_m2_n2);
 	}
 
-	if (dev_priv->display.compute_intermediate_wm) {
-		if (drm_WARN_ON(&dev_priv->drm,
-				!dev_priv->display.compute_pipe_wm))
-			return 0;
-
-		/*
-		 * Calculate 'intermediate' watermarks that satisfy both the
-		 * old state and the new state.  We can program these
-		 * immediately.
-		 */
-		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
-		if (ret) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "No valid intermediate pipe watermarks are possible\n");
-			return ret;
-		}
-	}
-
-	if (INTEL_GEN(dev_priv) >= 9) {
-		if (mode_changed || crtc_state->update_pipe) {
-			ret = skl_update_scaler_crtc(crtc_state);
-			if (ret)
-				return ret;
-		}
-
-		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
-		if (ret)
-			return ret;
-	}
-
-	if (HAS_IPS(dev_priv)) {
-		ret = hsw_compute_ips_config(crtc_state);
-		if (ret)
-			return ret;
-	}
-
-	if (INTEL_GEN(dev_priv) >= 9 ||
-	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
-		ret = hsw_compute_linetime_wm(state, crtc);
-		if (ret)
-			return ret;
-
-	}
-
-	if (!mode_changed) {
-		ret = intel_psr2_sel_fetch_update(state, crtc);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
-{
-	struct intel_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	for_each_intel_connector_iter(connector, &conn_iter) {
-		if (connector->base.state->crtc)
-			drm_connector_put(&connector->base);
-
-		if (connector->base.encoder) {
-			connector->base.state->best_encoder =
-				connector->base.encoder;
-			connector->base.state->crtc =
-				connector->base.encoder->crtc;
-
-			drm_connector_get(&connector->base);
-		} else {
-			connector->base.state->best_encoder = NULL;
-			connector->base.state->crtc = NULL;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-}
-
-static int
-compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
-		      struct intel_crtc_state *pipe_config)
-{
-	struct drm_connector *connector = conn_state->connector;
-	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-	const struct drm_display_info *info = &connector->display_info;
-	int bpp;
-
-	switch (conn_state->max_bpc) {
-	case 6 ... 7:
-		bpp = 6 * 3;
-		break;
-	case 8 ... 9:
-		bpp = 8 * 3;
-		break;
-	case 10 ... 11:
-		bpp = 10 * 3;
-		break;
-	case 12 ... 16:
-		bpp = 12 * 3;
-		break;
-	default:
-		MISSING_CASE(conn_state->max_bpc);
-		return -EINVAL;
-	}
-
-	if (bpp < pipe_config->pipe_bpp) {
-		drm_dbg_kms(&i915->drm,
-			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
-			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
-			    connector->base.id, connector->name,
-			    bpp, 3 * info->bpc,
-			    3 * conn_state->max_requested_bpc,
-			    pipe_config->pipe_bpp);
-
-		pipe_config->pipe_bpp = bpp;
-	}
-
-	return 0;
-}
-
-static int
-compute_baseline_pipe_bpp(struct intel_crtc *crtc,
-			  struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct drm_atomic_state *state = pipe_config->uapi.state;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int bpp, i;
-
-	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv)))
-		bpp = 10*3;
-	else if (INTEL_GEN(dev_priv) >= 5)
-		bpp = 12*3;
-	else
-		bpp = 8*3;
-
-	pipe_config->pipe_bpp = bpp;
-
-	/* Clamp display bpp to connector max bpp */
-	for_each_new_connector_in_state(state, connector, connector_state, i) {
-		int ret;
-
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void intel_dump_crtc_timings(struct drm_i915_private *i915,
-				    const struct drm_display_mode *mode)
-{
-	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
-		    "type: 0x%x flags: 0x%x\n",
-		    mode->crtc_clock,
-		    mode->crtc_hdisplay, mode->crtc_hsync_start,
-		    mode->crtc_hsync_end, mode->crtc_htotal,
-		    mode->crtc_vdisplay, mode->crtc_vsync_start,
-		    mode->crtc_vsync_end, mode->crtc_vtotal,
-		    mode->type, mode->flags);
-}
-
-static void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
-		      const char *id, unsigned int lane_count,
-		      const struct intel_link_m_n *m_n)
-{
-	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-
-	drm_dbg_kms(&i915->drm,
-		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
-		    id, lane_count,
-		    m_n->gmch_m, m_n->gmch_n,
-		    m_n->link_m, m_n->link_n, m_n->tu);
-}
-
-static void
-intel_dump_infoframe(struct drm_i915_private *dev_priv,
-		     const union hdmi_infoframe *frame)
-{
-	if (!drm_debug_enabled(DRM_UT_KMS))
-		return;
-
-	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
-}
-
-static void
-intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
-		      const struct drm_dp_vsc_sdp *vsc)
-{
-	if (!drm_debug_enabled(DRM_UT_KMS))
-		return;
-
-	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
-}
-
-#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
-
-static const char * const output_type_str[] = {
-	OUTPUT_TYPE(UNUSED),
-	OUTPUT_TYPE(ANALOG),
-	OUTPUT_TYPE(DVO),
-	OUTPUT_TYPE(SDVO),
-	OUTPUT_TYPE(LVDS),
-	OUTPUT_TYPE(TVOUT),
-	OUTPUT_TYPE(HDMI),
-	OUTPUT_TYPE(DP),
-	OUTPUT_TYPE(EDP),
-	OUTPUT_TYPE(DSI),
-	OUTPUT_TYPE(DDI),
-	OUTPUT_TYPE(DP_MST),
-};
-
-#undef OUTPUT_TYPE
-
-static void snprintf_output_types(char *buf, size_t len,
-				  unsigned int output_types)
-{
-	char *str = buf;
-	int i;
-
-	str[0] = '\0';
-
-	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
-		int r;
-
-		if ((output_types & BIT(i)) == 0)
-			continue;
-
-		r = snprintf(str, len, "%s%s",
-			     str != buf ? "," : "", output_type_str[i]);
-		if (r >= len)
-			break;
-		str += r;
-		len -= r;
-
-		output_types &= ~BIT(i);
-	}
-
-	WARN_ON_ONCE(output_types != 0);
-}
-
-static const char * const output_format_str[] = {
-	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
-	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
-	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
-	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
-};
-
-static const char *output_formats(enum intel_output_format format)
-{
-	if (format >= ARRAY_SIZE(output_format_str))
-		format = INTEL_OUTPUT_FORMAT_INVALID;
-	return output_format_str[format];
-}
-
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *i915 = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	struct drm_format_name_buf format_name;
-
-	if (!fb) {
-		drm_dbg_kms(&i915->drm,
-			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
-			    plane->base.base.id, plane->base.name,
-			    yesno(plane_state->uapi.visible));
-		return;
-	}
-
-	drm_dbg_kms(&i915->drm,
-		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
-		    plane->base.base.id, plane->base.name,
-		    fb->base.id, fb->width, fb->height,
-		    drm_get_format_name(fb->format->format, &format_name),
-		    fb->modifier, yesno(plane_state->uapi.visible));
-	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
-		    plane_state->hw.rotation, plane_state->scaler_id);
-	if (plane_state->uapi.visible)
-		drm_dbg_kms(&i915->drm,
-			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
-			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
-			    DRM_RECT_ARG(&plane_state->uapi.dst));
-}
-
-static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
-				   struct intel_atomic_state *state,
-				   const char *context)
-{
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_plane_state *plane_state;
-	struct intel_plane *plane;
-	char buf[64];
-	int i;
-
-	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
-		    crtc->base.base.id, crtc->base.name,
-		    yesno(pipe_config->hw.enable), context);
-
-	if (!pipe_config->hw.enable)
-		goto dump_planes;
-
-	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
-	drm_dbg_kms(&dev_priv->drm,
-		    "active: %s, output_types: %s (0x%x), output format: %s\n",
-		    yesno(pipe_config->hw.active),
-		    buf, pipe_config->output_types,
-		    output_formats(pipe_config->output_format));
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
-		    transcoder_name(pipe_config->cpu_transcoder),
-		    pipe_config->pipe_bpp, pipe_config->dither);
-
-	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
-		    transcoder_name(pipe_config->mst_master_transcoder));
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
-		    transcoder_name(pipe_config->master_transcoder),
-		    pipe_config->sync_mode_slaves_mask);
-
-	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
-		    pipe_config->bigjoiner_slave ? "slave" :
-		    pipe_config->bigjoiner ? "master" : "no");
-
-	if (pipe_config->has_pch_encoder)
-		intel_dump_m_n_config(pipe_config, "fdi",
-				      pipe_config->fdi_lanes,
-				      &pipe_config->fdi_m_n);
-
-	if (intel_crtc_has_dp_encoder(pipe_config)) {
-		intel_dump_m_n_config(pipe_config, "dp m_n",
-				pipe_config->lane_count, &pipe_config->dp_m_n);
-		if (pipe_config->has_drrs)
-			intel_dump_m_n_config(pipe_config, "dp m2_n2",
-					      pipe_config->lane_count,
-					      &pipe_config->dp_m2_n2);
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
-		    pipe_config->has_audio, pipe_config->has_infoframe,
-		    pipe_config->infoframes.enable);
+	drm_dbg_kms(&dev_priv->drm,
+		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+		    pipe_config->has_audio, pipe_config->has_infoframe,
+		    pipe_config->infoframes.enable);
 
 	if (pipe_config->infoframes.enable &
 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
@@ -13427,6 +10338,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
 
+	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+		    yesno(pipe_config->vrr.enable),
+		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+		    pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+		    intel_vrr_vmin_vblank_start(pipe_config),
+		    intel_vrr_vmax_vblank_start(pipe_config));
+
 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
@@ -13814,7 +10732,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
 		return ret;
 	}
 
-	if (ret == RETRY) {
+	if (ret == I915_DISPLAY_CONFIG_RETRY) {
 		if (drm_WARN(&i915->drm, !retry,
 			     "loop in pipe configuration computation\n"))
 			return -EINVAL;
@@ -14420,6 +11338,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
 	PIPE_CONF_CHECK_I(mst_master_transcoder);
 
+	PIPE_CONF_CHECK_BOOL(vrr.enable);
+	PIPE_CONF_CHECK_I(vrr.vmin);
+	PIPE_CONF_CHECK_I(vrr.vmax);
+	PIPE_CONF_CHECK_I(vrr.flipline);
+	PIPE_CONF_CHECK_I(vrr.pipeline_full);
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_BOOL
@@ -14844,7 +11768,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc,
 			  struct intel_crtc_state *old_crtc_state,
 			  struct intel_crtc_state *new_crtc_state)
 {
-	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
 		return;
 
 	verify_wm_state(crtc, new_crtc_state);
@@ -14878,10 +11802,17 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->hw.adjusted_mode;
+	struct drm_display_mode adjusted_mode =
+		crtc_state->hw.adjusted_mode;
+
+	if (crtc_state->vrr.enable) {
+		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+	}
 
-	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
 
 	crtc->mode_flags = crtc_state->mode_flags;
 
@@ -14915,8 +11846,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
 	if (IS_GEN(dev_priv, 2)) {
 		int vtotal;
 
-		vtotal = adjusted_mode->crtc_vtotal;
-		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vtotal = adjusted_mode.crtc_vtotal;
+		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 			vtotal /= 2;
 
 		crtc->scanline_offset = vtotal - 1;
@@ -14939,7 +11870,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
 		return;
 
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
-		if (!needs_modeset(new_crtc_state))
+		if (!intel_crtc_needs_modeset(new_crtc_state))
 			continue;
 
 		intel_release_shared_dplls(state, crtc);
@@ -14964,7 +11895,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
 	/* look at all crtc's that are going to be enabled in during modeset */
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		if (!crtc_state->hw.active ||
-		    !needs_modeset(crtc_state))
+		    !intel_crtc_needs_modeset(crtc_state))
 			continue;
 
 		if (first_crtc_state) {
@@ -14989,7 +11920,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
 
 		if (!crtc_state->hw.active ||
-		    needs_modeset(crtc_state))
+		    intel_crtc_needs_modeset(crtc_state))
 			continue;
 
 		/* 2 or more enabled crtcs means no need for w/a */
@@ -15101,6 +12032,19 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
 	return 0;
 }
 
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+				     struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+
+	return intel_crtc_add_planes_to_state(state, crtc,
+					      old_crtc_state->enabled_planes |
+					      new_crtc_state->enabled_planes);
+}
+
 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
 {
 	/* See {hsw,vlv,ivb}_plane_ratio() */
@@ -15295,7 +12239,7 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
 		if (new_crtc_state->hw.enable &&
 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
-		    needs_modeset(new_crtc_state))
+		    intel_crtc_needs_modeset(new_crtc_state))
 			return true;
 	}
 
@@ -15316,7 +12260,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
 		slave = crtc;
 		master = old_crtc_state->bigjoiner_linked_crtc;
 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
-		if (!master_crtc_state || !needs_modeset(master_crtc_state))
+		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
 			goto claimed;
 	}
 
@@ -15354,21 +12298,16 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
 	return -EINVAL;
 }
 
-static int kill_bigjoiner_slave(struct intel_atomic_state *state,
-				struct intel_crtc_state *master_crtc_state)
+static void kill_bigjoiner_slave(struct intel_atomic_state *state,
+				 struct intel_crtc_state *master_crtc_state)
 {
 	struct intel_crtc_state *slave_crtc_state =
-		intel_atomic_get_crtc_state(&state->base,
-					    master_crtc_state->bigjoiner_linked_crtc);
-
-	if (IS_ERR(slave_crtc_state))
-		return PTR_ERR(slave_crtc_state);
+		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
 
 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
-	return 0;
 }
 
 /**
@@ -15381,7 +12320,7 @@ static int kill_bigjoiner_slave(struct intel_atomic_state *state,
  * Async flip can only change the plane surface address, so anything else
  * changing is rejected from the intel_atomic_check_async() function.
  * Once this check is cleared, flip done interrupt is enabled using
- * the skl_enable_flip_done() function.
+ * the intel_crtc_enable_flip_done() function.
  *
  * As soon as the surface address register is written, flip done interrupt is
  * generated and the requested events are sent to the usersapce in the interrupt
@@ -15400,7 +12339,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (needs_modeset(new_crtc_state)) {
+		if (intel_crtc_needs_modeset(new_crtc_state)) {
 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
 			return -EINVAL;
 		}
@@ -15425,7 +12364,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
 		 * this(vlv/chv and icl+) should be added when async flip is
 		 * enabled in the atomic IOCTL path.
 		 */
-		if (plane->id != PLANE_PRIMARY)
+		if (!plane->async_flip)
 			return -EINVAL;
 
 		/*
@@ -15506,20 +12445,43 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
 
 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
 {
-	const struct intel_crtc_state *crtc_state;
+	struct intel_crtc_state *crtc_state;
 	struct intel_crtc *crtc;
 	int i;
 
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		struct intel_crtc_state *linked_crtc_state;
+		struct intel_crtc *linked_crtc;
+		int ret;
 
 		if (!crtc_state->bigjoiner)
 			continue;
 
-		linked_crtc_state = intel_atomic_get_crtc_state(&state->base,
-								crtc_state->bigjoiner_linked_crtc);
+		linked_crtc = crtc_state->bigjoiner_linked_crtc;
+		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
 		if (IS_ERR(linked_crtc_state))
 			return PTR_ERR(linked_crtc_state);
+
+		if (!intel_crtc_needs_modeset(crtc_state))
+			continue;
+
+		linked_crtc_state->uapi.mode_changed = true;
+
+		ret = drm_atomic_add_affected_connectors(&state->base,
+							 &linked_crtc->base);
+		if (ret)
+			return ret;
+
+		ret = intel_atomic_add_affected_planes(state, linked_crtc);
+		if (ret)
+			return ret;
+	}
+
+	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+		/* Kill old bigjoiner link, we may re-establish afterwards */
+		if (intel_crtc_needs_modeset(crtc_state) &&
+		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
+			kill_bigjoiner_slave(state, crtc_state);
 	}
 
 	return 0;
@@ -15546,6 +12508,8 @@ static int intel_atomic_check(struct drm_device *dev,
 			new_crtc_state->uapi.mode_changed = true;
 	}
 
+	intel_vrr_check_modeset(state);
+
 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
 	if (ret)
 		goto fail;
@@ -15556,20 +12520,13 @@ static int intel_atomic_check(struct drm_device *dev,
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(new_crtc_state)) {
+		if (!intel_crtc_needs_modeset(new_crtc_state)) {
 			/* Light copy */
 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
 
 			continue;
 		}
 
-		/* Kill old bigjoiner link, we may re-establish afterwards */
-		if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
-			ret = kill_bigjoiner_slave(state, new_crtc_state);
-			if (ret)
-				goto fail;
-		}
-
 		if (!new_crtc_state->uapi.enable) {
 			if (!new_crtc_state->bigjoiner_slave) {
 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
@@ -15594,7 +12551,7 @@ static int intel_atomic_check(struct drm_device *dev,
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(new_crtc_state))
+		if (!intel_crtc_needs_modeset(new_crtc_state))
 			continue;
 
 		ret = intel_modeset_pipe_config_late(new_crtc_state);
@@ -15616,7 +12573,7 @@ static int intel_atomic_check(struct drm_device *dev,
 	 * forced a full modeset.
 	 */
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
-		if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
 			continue;
 
 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
@@ -15639,11 +12596,21 @@ static int intel_atomic_check(struct drm_device *dev,
 				new_crtc_state->update_pipe = false;
 			}
 		}
+
+		if (new_crtc_state->bigjoiner) {
+			struct intel_crtc_state *linked_crtc_state =
+				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
+
+			if (intel_crtc_needs_modeset(linked_crtc_state)) {
+				new_crtc_state->uapi.mode_changed = true;
+				new_crtc_state->update_pipe = false;
+			}
+		}
 	}
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (needs_modeset(new_crtc_state)) {
+		if (intel_crtc_needs_modeset(new_crtc_state)) {
 			any_ms = true;
 			continue;
 		}
@@ -15669,20 +12636,6 @@ static int intel_atomic_check(struct drm_device *dev,
 	if (ret)
 		goto fail;
 
-	/*
-	 * distrust_bios_wm will force a full dbuf recomputation
-	 * but the hardware state will only get updated accordingly
-	 * if state->modeset==true. Hence distrust_bios_wm==true &&
-	 * state->modeset==false is an invalid combination which
-	 * would cause the hardware and software dbuf state to get
-	 * out of sync. We must prevent that.
-	 *
-	 * FIXME clean up this mess and introduce better
-	 * state tracking for dbuf.
-	 */
-	if (dev_priv->wm.distrust_bios_wm)
-		any_ms = true;
-
 	intel_fbc_choose_crtc(dev_priv, state);
 	ret = calc_watermark_data(state);
 	if (ret)
@@ -15720,12 +12673,12 @@ static int intel_atomic_check(struct drm_device *dev,
 				goto fail;
 		}
 
-		if (!needs_modeset(new_crtc_state) &&
+		if (!intel_crtc_needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->update_pipe)
 			continue;
 
 		intel_dump_pipe_config(new_crtc_state, state,
-				       needs_modeset(new_crtc_state) ?
+				       intel_crtc_needs_modeset(new_crtc_state) ?
 				       "[modeset]" : "[fastset]");
 	}
 
@@ -15757,7 +12710,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
 		return ret;
 
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-		bool mode_changed = needs_modeset(crtc_state);
+		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
 
 		if (mode_changed || crtc_state->update_pipe ||
 		    crtc_state->uapi.color_mgmt_changed) {
@@ -15768,17 +12721,6 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
 	return 0;
 }
 
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
-
-	if (!vblank->max_vblank_count)
-		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
-
-	return crtc->base.funcs->get_vblank_counter(&crtc->base);
-}
-
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state)
 {
@@ -15848,7 +12790,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
 		intel_atomic_get_old_crtc_state(state, crtc);
 	const struct intel_crtc_state *new_crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
-	bool modeset = needs_modeset(new_crtc_state);
+	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
 
 	/*
 	 * During modesets pipe configuration was programmed as the
@@ -15882,7 +12824,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
 	const struct intel_crtc_state *new_crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
 
-	if (!needs_modeset(new_crtc_state))
+	if (!intel_crtc_needs_modeset(new_crtc_state))
 		return;
 
 	intel_crtc_update_active_timings(new_crtc_state);
@@ -15904,7 +12846,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
 		intel_atomic_get_old_crtc_state(state, crtc);
 	struct intel_crtc_state *new_crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
-	bool modeset = needs_modeset(new_crtc_state);
+	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
 
 	if (!modeset) {
 		if (new_crtc_state->preload_luts &&
@@ -15996,7 +12938,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
 	/* Only disable port sync and MST slaves */
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
 			continue;
 
 		if (!old_crtc_state->hw.active)
@@ -16020,7 +12962,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
 	/* Disable everything else left on */
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(new_crtc_state) ||
+		if (!intel_crtc_needs_modeset(new_crtc_state) ||
 		    (handled & BIT(crtc->pipe)) ||
 		    old_crtc_state->bigjoiner_slave)
 			continue;
@@ -16070,7 +13012,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 			continue;
 
 		/* ignore allocations for crtc's that have been turned off. */
-		if (!needs_modeset(new_crtc_state)) {
+		if (!intel_crtc_needs_modeset(new_crtc_state)) {
 			entries[pipe] = old_crtc_state->wm.skl.ddb;
 			update_pipes |= BIT(pipe);
 		} else {
@@ -16238,12 +13180,49 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
 		container_of(work, struct intel_atomic_state, base.commit_work);
 	struct drm_i915_private *i915 = to_i915(state->base.dev);
 
-	intel_cleanup_dsbs(state);
-	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
-	drm_atomic_helper_commit_cleanup_done(&state->base);
-	drm_atomic_state_put(&state->base);
+	intel_cleanup_dsbs(state);
+	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+	drm_atomic_helper_commit_cleanup_done(&state->base);
+	drm_atomic_state_put(&state->base);
+
+	intel_atomic_helper_free_state(i915);
+}
+
+static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	struct intel_plane *plane;
+	struct intel_plane_state *plane_state;
+	int i;
+
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		struct drm_framebuffer *fb = plane_state->hw.fb;
+		int ret;
 
-	intel_atomic_helper_free_state(i915);
+		if (!fb ||
+		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+			continue;
+
+		/*
+		 * The layout of the fast clear color value expected by HW
+		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
+		 * - 4 x 4 bytes per-channel value
+		 *   (in surface type specific float/int format provided by the fb user)
+		 * - 8 bytes native color value used by the display
+		 *   (converted/written by GPU during a fast clear operation using the
+		 *    above per-channel values)
+		 *
+		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
+		 * caller made sure that the object is synced wrt. the related color clear value
+		 * GPU write on it.
+		 */
+		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
+						     fb->offsets[2] + 16,
+						     &plane_state->ccval,
+						     sizeof(plane_state->ccval));
+		/* The above could only fail if the FB obj has an unexpected backing store type. */
+		drm_WARN_ON(&i915->drm, ret);
+	}
 }
 
 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
@@ -16263,9 +13242,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	if (state->modeset)
 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
+	intel_atomic_prepare_plane_clear_colors(state);
+
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (needs_modeset(new_crtc_state) ||
+		if (intel_crtc_needs_modeset(new_crtc_state) ||
 		    new_crtc_state->update_pipe) {
 
 			put_domains[crtc->pipe] =
@@ -16291,7 +13272,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 
 	/* Complete the events for pipes that have now been disabled */
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
-		bool modeset = needs_modeset(new_crtc_state);
+		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
 
 		/* Complete events for now disable pipes here. */
 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
@@ -16311,7 +13292,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
 		if (new_crtc_state->uapi.async_flip)
-			skl_enable_flip_done(crtc);
+			intel_crtc_enable_flip_done(state, crtc);
 	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -16336,10 +13317,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
 		if (new_crtc_state->uapi.async_flip)
-			skl_disable_flip_done(crtc);
+			intel_crtc_disable_flip_done(state, crtc);
 
 		if (new_crtc_state->hw.active &&
-		    !needs_modeset(new_crtc_state) &&
+		    !intel_crtc_needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->preload_luts &&
 		    (new_crtc_state->uapi.color_mgmt_changed ||
 		     new_crtc_state->update_pipe))
@@ -16359,1011 +13340,471 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 		 * Gen2 reports pipe underruns whenever all planes are disabled.
 		 * So re-enable underrun reporting after some planes get enabled.
 		 *
-		 * We do this before .optimize_watermarks() so that we have a
-		 * chance of catching underruns with the intermediate watermarks
-		 * vs. the new plane configuration.
-		 */
-		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
-			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-
-		if (dev_priv->display.optimize_watermarks)
-			dev_priv->display.optimize_watermarks(state, crtc);
-	}
-
-	intel_dbuf_post_plane_update(state);
-
-	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		intel_post_plane_update(state, crtc);
-
-		if (put_domains[i])
-			modeset_put_power_domains(dev_priv, put_domains[i]);
-
-		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
-
-		/*
-		 * DSB cleanup is done in cleanup_work aligning with framebuffer
-		 * cleanup. So copy and reset the dsb structure to sync with
-		 * commit_done and later do dsb cleanup in cleanup_work.
-		 */
-		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
-	}
-
-	/* Underruns don't always raise interrupts, so check manually */
-	intel_check_cpu_fifo_underruns(dev_priv);
-	intel_check_pch_fifo_underruns(dev_priv);
-
-	if (state->modeset)
-		intel_verify_planes(state);
-
-	intel_sagv_post_plane_update(state);
-
-	drm_atomic_helper_commit_hw_done(&state->base);
-
-	if (state->modeset) {
-		/* As one of the primary mmio accessors, KMS has a high
-		 * likelihood of triggering bugs in unclaimed access. After we
-		 * finish modesetting, see if an error has been flagged, and if
-		 * so enable debugging for the next modeset - and hope we catch
-		 * the culprit.
-		 */
-		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
-		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
-	}
-	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
-
-	/*
-	 * Defer the cleanup of the old state to a separate worker to not
-	 * impede the current task (userspace for blocking modesets) that
-	 * are executed inline. For out-of-line asynchronous modesets/flips,
-	 * deferring to a new worker seems overkill, but we would place a
-	 * schedule point (cond_resched()) here anyway to keep latencies
-	 * down.
-	 */
-	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
-	queue_work(system_highpri_wq, &state->base.commit_work);
-}
-
-static void intel_atomic_commit_work(struct work_struct *work)
-{
-	struct intel_atomic_state *state =
-		container_of(work, struct intel_atomic_state, base.commit_work);
-
-	intel_atomic_commit_tail(state);
-}
-
-static int __i915_sw_fence_call
-intel_atomic_commit_ready(struct i915_sw_fence *fence,
-			  enum i915_sw_fence_notify notify)
-{
-	struct intel_atomic_state *state =
-		container_of(fence, struct intel_atomic_state, commit_ready);
-
-	switch (notify) {
-	case FENCE_COMPLETE:
-		/* we do blocking waits in the worker, nothing to do here */
-		break;
-	case FENCE_FREE:
-		{
-			struct intel_atomic_helper *helper =
-				&to_i915(state->base.dev)->atomic_helper;
-
-			if (llist_add(&state->freed, &helper->free_list))
-				schedule_work(&helper->free_work);
-			break;
-		}
-	}
-
-	return NOTIFY_DONE;
-}
-
-static void intel_atomic_track_fbs(struct intel_atomic_state *state)
-{
-	struct intel_plane_state *old_plane_state, *new_plane_state;
-	struct intel_plane *plane;
-	int i;
-
-	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
-					     new_plane_state, i)
-		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
-					to_intel_frontbuffer(new_plane_state->hw.fb),
-					plane->frontbuffer_bit);
-}
-
-static int intel_atomic_commit(struct drm_device *dev,
-			       struct drm_atomic_state *_state,
-			       bool nonblock)
-{
-	struct intel_atomic_state *state = to_intel_atomic_state(_state);
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret = 0;
-
-	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	drm_atomic_state_get(&state->base);
-	i915_sw_fence_init(&state->commit_ready,
-			   intel_atomic_commit_ready);
-
-	/*
-	 * The intel_legacy_cursor_update() fast path takes care
-	 * of avoiding the vblank waits for simple cursor
-	 * movement and flips. For cursor on/off and size changes,
-	 * we want to perform the vblank waits so that watermark
-	 * updates happen during the correct frames. Gen9+ have
-	 * double buffered watermarks and so shouldn't need this.
-	 *
-	 * Unset state->legacy_cursor_update before the call to
-	 * drm_atomic_helper_setup_commit() because otherwise
-	 * drm_atomic_helper_wait_for_flip_done() is a noop and
-	 * we get FIFO underruns because we didn't wait
-	 * for vblank.
-	 *
-	 * FIXME doing watermarks and fb cleanup from a vblank worker
-	 * (assuming we had any) would solve these problems.
-	 */
-	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
-		struct intel_crtc_state *new_crtc_state;
-		struct intel_crtc *crtc;
-		int i;
-
-		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
-			if (new_crtc_state->wm.need_postvbl_update ||
-			    new_crtc_state->update_wm_post)
-				state->base.legacy_cursor_update = false;
-	}
-
-	ret = intel_atomic_prepare_commit(state);
-	if (ret) {
-		drm_dbg_atomic(&dev_priv->drm,
-			       "Preparing state failed with %i\n", ret);
-		i915_sw_fence_commit(&state->commit_ready);
-		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
-		return ret;
-	}
-
-	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
-	if (!ret)
-		ret = drm_atomic_helper_swap_state(&state->base, true);
-	if (!ret)
-		intel_atomic_swap_global_state(state);
-
-	if (ret) {
-		struct intel_crtc_state *new_crtc_state;
-		struct intel_crtc *crtc;
-		int i;
-
-		i915_sw_fence_commit(&state->commit_ready);
-
-		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
-			intel_dsb_cleanup(new_crtc_state);
-
-		drm_atomic_helper_cleanup_planes(dev, &state->base);
-		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
-		return ret;
-	}
-	dev_priv->wm.distrust_bios_wm = false;
-	intel_shared_dpll_swap_state(state);
-	intel_atomic_track_fbs(state);
-
-	drm_atomic_state_get(&state->base);
-	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
-
-	i915_sw_fence_commit(&state->commit_ready);
-	if (nonblock && state->modeset) {
-		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
-	} else if (nonblock) {
-		queue_work(dev_priv->flip_wq, &state->base.commit_work);
-	} else {
-		if (state->modeset)
-			flush_workqueue(dev_priv->modeset_wq);
-		intel_atomic_commit_tail(state);
-	}
-
-	return 0;
-}
-
-struct wait_rps_boost {
-	struct wait_queue_entry wait;
-
-	struct drm_crtc *crtc;
-	struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
-			unsigned mode, int sync, void *key)
-{
-	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
-	struct i915_request *rq = wait->request;
-
-	/*
-	 * If we missed the vblank, but the request is already running it
-	 * is reasonable to assume that it will complete before the next
-	 * vblank without our intervention, so leave RPS alone.
-	 */
-	if (!i915_request_started(rq))
-		intel_rps_boost(rq);
-	i915_request_put(rq);
-
-	drm_crtc_vblank_put(wait->crtc);
-
-	list_del(&wait->wait.entry);
-	kfree(wait);
-	return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
-				       struct dma_fence *fence)
-{
-	struct wait_rps_boost *wait;
-
-	if (!dma_fence_is_i915(fence))
-		return;
-
-	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
-		return;
-
-	if (drm_crtc_vblank_get(crtc))
-		return;
+		 * We do this before .optimize_watermarks() so that we have a
+		 * chance of catching underruns with the intermediate watermarks
+		 * vs. the new plane configuration.
+		 */
+		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
 
-	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
-	if (!wait) {
-		drm_crtc_vblank_put(crtc);
-		return;
+		if (dev_priv->display.optimize_watermarks)
+			dev_priv->display.optimize_watermarks(state, crtc);
 	}
 
-	wait->request = to_request(dma_fence_get(fence));
-	wait->crtc = crtc;
-
-	wait->wait.func = do_rps_boost;
-	wait->wait.flags = 0;
+	intel_dbuf_post_plane_update(state);
 
-	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		intel_post_plane_update(state, crtc);
 
-static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	struct drm_framebuffer *fb = plane_state->hw.fb;
-	struct i915_vma *vma;
+		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
 
-	if (plane->id == PLANE_CURSOR &&
-	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
-		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-		const int align = intel_cursor_alignment(dev_priv);
-		int err;
+		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
 
-		err = i915_gem_object_attach_phys(obj, align);
-		if (err)
-			return err;
+		/*
+		 * DSB cleanup is done in cleanup_work aligning with framebuffer
+		 * cleanup. So copy and reset the dsb structure to sync with
+		 * commit_done and later do dsb cleanup in cleanup_work.
+		 */
+		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
 	}
 
-	vma = intel_pin_and_fence_fb_obj(fb,
-					 &plane_state->view,
-					 intel_plane_uses_fence(plane_state),
-					 &plane_state->flags);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+	/* Underruns don't always raise interrupts, so check manually */
+	intel_check_cpu_fifo_underruns(dev_priv);
+	intel_check_pch_fifo_underruns(dev_priv);
 
-	plane_state->vma = vma;
+	if (state->modeset)
+		intel_verify_planes(state);
 
-	return 0;
-}
+	intel_sagv_post_plane_update(state);
 
-static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
-{
-	struct i915_vma *vma;
+	drm_atomic_helper_commit_hw_done(&state->base);
 
-	vma = fetch_and_zero(&old_plane_state->vma);
-	if (vma)
-		intel_unpin_fb_vma(vma, old_plane_state->flags);
+	if (state->modeset) {
+		/* As one of the primary mmio accessors, KMS has a high
+		 * likelihood of triggering bugs in unclaimed access. After we
+		 * finish modesetting, see if an error has been flagged, and if
+		 * so enable debugging for the next modeset - and hope we catch
+		 * the culprit.
+		 */
+		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+	}
+	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+
+	/*
+	 * Defer the cleanup of the old state to a separate worker to not
+	 * impede the current task (userspace for blocking modesets) that
+	 * are executed inline. For out-of-line asynchronous modesets/flips,
+	 * deferring to a new worker seems overkill, but we would place a
+	 * schedule point (cond_resched()) here anyway to keep latencies
+	 * down.
+	 */
+	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
+	queue_work(system_highpri_wq, &state->base.commit_work);
 }
 
-static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
+static void intel_atomic_commit_work(struct work_struct *work)
 {
-	struct i915_sched_attr attr = {
-		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
-	};
+	struct intel_atomic_state *state =
+		container_of(work, struct intel_atomic_state, base.commit_work);
 
-	i915_gem_object_wait_priority(obj, 0, &attr);
+	intel_atomic_commit_tail(state);
 }
 
-/**
- * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @_plane: drm plane to prepare for
- * @_new_plane_state: the plane state being prepared
- *
- * Prepares a framebuffer for usage on a display plane.  Generally this
- * involves pinning the underlying object and updating the frontbuffer tracking
- * bits.  Some older platforms need special physical address handling for
- * cursor planes.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int
-intel_prepare_plane_fb(struct drm_plane *_plane,
-		       struct drm_plane_state *_new_plane_state)
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+			  enum i915_sw_fence_notify notify)
 {
-	struct intel_plane *plane = to_intel_plane(_plane);
-	struct intel_plane_state *new_plane_state =
-		to_intel_plane_state(_new_plane_state);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_plane_state->uapi.state);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct intel_plane_state *old_plane_state =
-		intel_atomic_get_old_plane_state(state, plane);
-	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
-	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
-	int ret;
-
-	if (old_obj) {
-		const struct intel_crtc_state *crtc_state =
-			intel_atomic_get_new_crtc_state(state,
-							to_intel_crtc(old_plane_state->hw.crtc));
-
-		/* Big Hammer, we also need to ensure that any pending
-		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-		 * current scanout is retired before unpinning the old
-		 * framebuffer. Note that we rely on userspace rendering
-		 * into the buffer attached to the pipe they are waiting
-		 * on. If not, userspace generates a GPU hang with IPEHR
-		 * point to the MI_WAIT_FOR_EVENT.
-		 *
-		 * This should only fail upon a hung GPU, in which case we
-		 * can safely continue.
-		 */
-		if (needs_modeset(crtc_state)) {
-			ret = i915_sw_fence_await_reservation(&state->commit_ready,
-							      old_obj->base.resv, NULL,
-							      false, 0,
-							      GFP_KERNEL);
-			if (ret < 0)
-				return ret;
-		}
-	}
-
-	if (new_plane_state->uapi.fence) { /* explicit fencing */
-		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
-						    new_plane_state->uapi.fence,
-						    i915_fence_timeout(dev_priv),
-						    GFP_KERNEL);
-		if (ret < 0)
-			return ret;
-	}
-
-	if (!obj)
-		return 0;
-
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
-
-	ret = intel_plane_pin_fb(new_plane_state);
-
-	i915_gem_object_unpin_pages(obj);
-	if (ret)
-		return ret;
-
-	fb_obj_bump_render_priority(obj);
-	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
-
-	if (!new_plane_state->uapi.fence) { /* implicit fencing */
-		struct dma_fence *fence;
+		container_of(fence, struct intel_atomic_state, commit_ready);
 
-		ret = i915_sw_fence_await_reservation(&state->commit_ready,
-						      obj->base.resv, NULL,
-						      false,
-						      i915_fence_timeout(dev_priv),
-						      GFP_KERNEL);
-		if (ret < 0)
-			goto unpin_fb;
+	switch (notify) {
+	case FENCE_COMPLETE:
+		/* we do blocking waits in the worker, nothing to do here */
+		break;
+	case FENCE_FREE:
+		{
+			struct intel_atomic_helper *helper =
+				&to_i915(state->base.dev)->atomic_helper;
 
-		fence = dma_resv_get_excl_rcu(obj->base.resv);
-		if (fence) {
-			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
-						   fence);
-			dma_fence_put(fence);
+			if (llist_add(&state->freed, &helper->free_list))
+				schedule_work(&helper->free_work);
+			break;
 		}
-	} else {
-		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
-					   new_plane_state->uapi.fence);
-	}
-
-	/*
-	 * We declare pageflips to be interactive and so merit a small bias
-	 * towards upclocking to deliver the frame on time. By only changing
-	 * the RPS thresholds to sample more regularly and aim for higher
-	 * clocks we can hopefully deliver low power workloads (like kodi)
-	 * that are not quite steady state without resorting to forcing
-	 * maximum clocks following a vblank miss (see do_rps_boost()).
-	 */
-	if (!state->rps_interactive) {
-		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
-		state->rps_interactive = true;
 	}
 
-	return 0;
+	return NOTIFY_DONE;
+}
 
-unpin_fb:
-	intel_plane_unpin_fb(new_plane_state);
+static void intel_atomic_track_fbs(struct intel_atomic_state *state)
+{
+	struct intel_plane_state *old_plane_state, *new_plane_state;
+	struct intel_plane *plane;
+	int i;
 
-	return ret;
+	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+					     new_plane_state, i)
+		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+					to_intel_frontbuffer(new_plane_state->hw.fb),
+					plane->frontbuffer_bit);
 }
 
-/**
- * intel_cleanup_plane_fb - Cleans up an fb after plane use
- * @plane: drm plane to clean up for
- * @_old_plane_state: the state from the previous modeset
- *
- * Cleans up a framebuffer that has just been removed from a plane.
- */
-void
-intel_cleanup_plane_fb(struct drm_plane *plane,
-		       struct drm_plane_state *_old_plane_state)
+static int intel_atomic_commit(struct drm_device *dev,
+			       struct drm_atomic_state *_state,
+			       bool nonblock)
 {
-	struct intel_plane_state *old_plane_state =
-		to_intel_plane_state(_old_plane_state);
-	struct intel_atomic_state *state =
-		to_intel_atomic_state(old_plane_state->uapi.state);
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
-	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+	struct intel_atomic_state *state = to_intel_atomic_state(_state);
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int ret = 0;
+
+	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+	drm_atomic_state_get(&state->base);
+	i915_sw_fence_init(&state->commit_ready,
+			   intel_atomic_commit_ready);
 
-	if (!obj)
-		return;
+	/*
+	 * The intel_legacy_cursor_update() fast path takes care
+	 * of avoiding the vblank waits for simple cursor
+	 * movement and flips. For cursor on/off and size changes,
+	 * we want to perform the vblank waits so that watermark
+	 * updates happen during the correct frames. Gen9+ have
+	 * double buffered watermarks and so shouldn't need this.
+	 *
+	 * Unset state->legacy_cursor_update before the call to
+	 * drm_atomic_helper_setup_commit() because otherwise
+	 * drm_atomic_helper_wait_for_flip_done() is a noop and
+	 * we get FIFO underruns because we didn't wait
+	 * for vblank.
+	 *
+	 * FIXME doing watermarks and fb cleanup from a vblank worker
+	 * (assuming we had any) would solve these problems.
+	 */
+	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
+		struct intel_crtc_state *new_crtc_state;
+		struct intel_crtc *crtc;
+		int i;
 
-	if (state->rps_interactive) {
-		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
-		state->rps_interactive = false;
+		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+			if (new_crtc_state->wm.need_postvbl_update ||
+			    new_crtc_state->update_wm_post)
+				state->base.legacy_cursor_update = false;
 	}
 
-	/* Should only be called after a successful intel_prepare_plane_fb()! */
-	intel_plane_unpin_fb(old_plane_state);
-}
+	ret = intel_atomic_prepare_commit(state);
+	if (ret) {
+		drm_dbg_atomic(&dev_priv->drm,
+			       "Preparing state failed with %i\n", ret);
+		i915_sw_fence_commit(&state->commit_ready);
+		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+		return ret;
+	}
 
-/**
- * intel_plane_destroy - destroy a plane
- * @plane: plane to destroy
- *
- * Common destruction function for all types of planes (primary, cursor,
- * sprite).
- */
-void intel_plane_destroy(struct drm_plane *plane)
-{
-	drm_plane_cleanup(plane);
-	kfree(to_intel_plane(plane));
-}
+	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
+	if (!ret)
+		ret = drm_atomic_helper_swap_state(&state->base, true);
+	if (!ret)
+		intel_atomic_swap_global_state(state);
 
-static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
-					    u32 format, u64 modifier)
-{
-	switch (modifier) {
-	case DRM_FORMAT_MOD_LINEAR:
-	case I915_FORMAT_MOD_X_TILED:
-		break;
-	default:
-		return false;
-	}
+	if (ret) {
+		struct intel_crtc_state *new_crtc_state;
+		struct intel_crtc *crtc;
+		int i;
 
-	switch (format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_XRGB1555:
-	case DRM_FORMAT_XRGB8888:
-		return modifier == DRM_FORMAT_MOD_LINEAR ||
-			modifier == I915_FORMAT_MOD_X_TILED;
-	default:
-		return false;
-	}
-}
+		i915_sw_fence_commit(&state->commit_ready);
 
-static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
-					    u32 format, u64 modifier)
-{
-	switch (modifier) {
-	case DRM_FORMAT_MOD_LINEAR:
-	case I915_FORMAT_MOD_X_TILED:
-		break;
-	default:
-		return false;
+		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+			intel_dsb_cleanup(new_crtc_state);
+
+		drm_atomic_helper_cleanup_planes(dev, &state->base);
+		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+		return ret;
 	}
+	intel_shared_dpll_swap_state(state);
+	intel_atomic_track_fbs(state);
 
-	switch (format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_ARGB2101010:
-	case DRM_FORMAT_ABGR2101010:
-	case DRM_FORMAT_XBGR16161616F:
-		return modifier == DRM_FORMAT_MOD_LINEAR ||
-			modifier == I915_FORMAT_MOD_X_TILED;
-	default:
-		return false;
+	drm_atomic_state_get(&state->base);
+	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
+
+	i915_sw_fence_commit(&state->commit_ready);
+	if (nonblock && state->modeset) {
+		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
+	} else if (nonblock) {
+		queue_work(dev_priv->flip_wq, &state->base.commit_work);
+	} else {
+		if (state->modeset)
+			flush_workqueue(dev_priv->modeset_wq);
+		intel_atomic_commit_tail(state);
 	}
-}
 
-static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
-					      u32 format, u64 modifier)
-{
-	return modifier == DRM_FORMAT_MOD_LINEAR &&
-		format == DRM_FORMAT_ARGB8888;
+	return 0;
 }
 
-static const struct drm_plane_funcs i965_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = intel_plane_destroy,
-	.atomic_duplicate_state = intel_plane_duplicate_state,
-	.atomic_destroy_state = intel_plane_destroy_state,
-	.format_mod_supported = i965_plane_format_mod_supported,
-};
+struct wait_rps_boost {
+	struct wait_queue_entry wait;
 
-static const struct drm_plane_funcs i8xx_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = intel_plane_destroy,
-	.atomic_duplicate_state = intel_plane_duplicate_state,
-	.atomic_destroy_state = intel_plane_destroy_state,
-	.format_mod_supported = i8xx_plane_format_mod_supported,
+	struct drm_crtc *crtc;
+	struct i915_request *request;
 };
 
-static int
-intel_legacy_cursor_update(struct drm_plane *_plane,
-			   struct drm_crtc *_crtc,
-			   struct drm_framebuffer *fb,
-			   int crtc_x, int crtc_y,
-			   unsigned int crtc_w, unsigned int crtc_h,
-			   u32 src_x, u32 src_y,
-			   u32 src_w, u32 src_h,
-			   struct drm_modeset_acquire_ctx *ctx)
+static int do_rps_boost(struct wait_queue_entry *_wait,
+			unsigned mode, int sync, void *key)
 {
-	struct intel_plane *plane = to_intel_plane(_plane);
-	struct intel_crtc *crtc = to_intel_crtc(_crtc);
-	struct intel_plane_state *old_plane_state =
-		to_intel_plane_state(plane->base.state);
-	struct intel_plane_state *new_plane_state;
-	struct intel_crtc_state *crtc_state =
-		to_intel_crtc_state(crtc->base.state);
-	struct intel_crtc_state *new_crtc_state;
-	int ret;
+	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+	struct i915_request *rq = wait->request;
 
 	/*
-	 * When crtc is inactive or there is a modeset pending,
-	 * wait for it to complete in the slowpath
-	 *
-	 * FIXME bigjoiner fastpath would be good
+	 * If we missed the vblank, but the request is already running it
+	 * is reasonable to assume that it will complete before the next
+	 * vblank without our intervention, so leave RPS alone.
 	 */
-	if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
-	    crtc_state->update_pipe || crtc_state->bigjoiner)
-		goto slow;
+	if (!i915_request_started(rq))
+		intel_rps_boost(rq);
+	i915_request_put(rq);
 
-	/*
-	 * Don't do an async update if there is an outstanding commit modifying
-	 * the plane.  This prevents our async update's changes from getting
-	 * overridden by a previous synchronous update's state.
-	 */
-	if (old_plane_state->uapi.commit &&
-	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
-		goto slow;
+	drm_crtc_vblank_put(wait->crtc);
 
-	/*
-	 * If any parameters change that may affect watermarks,
-	 * take the slowpath. Only changing fb or position should be
-	 * in the fastpath.
-	 */
-	if (old_plane_state->uapi.crtc != &crtc->base ||
-	    old_plane_state->uapi.src_w != src_w ||
-	    old_plane_state->uapi.src_h != src_h ||
-	    old_plane_state->uapi.crtc_w != crtc_w ||
-	    old_plane_state->uapi.crtc_h != crtc_h ||
-	    !old_plane_state->uapi.fb != !fb)
-		goto slow;
-
-	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
-	if (!new_plane_state)
-		return -ENOMEM;
+	list_del(&wait->wait.entry);
+	kfree(wait);
+	return 1;
+}
 
-	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
-	if (!new_crtc_state) {
-		ret = -ENOMEM;
-		goto out_free;
-	}
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+				       struct dma_fence *fence)
+{
+	struct wait_rps_boost *wait;
 
-	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+	if (!dma_fence_is_i915(fence))
+		return;
 
-	new_plane_state->uapi.src_x = src_x;
-	new_plane_state->uapi.src_y = src_y;
-	new_plane_state->uapi.src_w = src_w;
-	new_plane_state->uapi.src_h = src_h;
-	new_plane_state->uapi.crtc_x = crtc_x;
-	new_plane_state->uapi.crtc_y = crtc_y;
-	new_plane_state->uapi.crtc_w = crtc_w;
-	new_plane_state->uapi.crtc_h = crtc_h;
+	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+		return;
 
-	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+	if (drm_crtc_vblank_get(crtc))
+		return;
 
-	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
-						  old_plane_state, new_plane_state);
-	if (ret)
-		goto out_free;
+	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+	if (!wait) {
+		drm_crtc_vblank_put(crtc);
+		return;
+	}
 
-	ret = intel_plane_pin_fb(new_plane_state);
-	if (ret)
-		goto out_free;
+	wait->request = to_request(dma_fence_get(fence));
+	wait->crtc = crtc;
 
-	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
-				ORIGIN_FLIP);
-	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
-				to_intel_frontbuffer(new_plane_state->hw.fb),
-				plane->frontbuffer_bit);
+	wait->wait.func = do_rps_boost;
+	wait->wait.flags = 0;
 
-	/* Swap plane state */
-	plane->base.state = &new_plane_state->uapi;
+	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
 
-	/*
-	 * We cannot swap crtc_state as it may be in use by an atomic commit or
-	 * page flip that's running simultaneously. If we swap crtc_state and
-	 * destroy the old state, we will cause a use-after-free there.
-	 *
-	 * Only update active_planes, which is needed for our internal
-	 * bookkeeping. Either value will do the right thing when updating
-	 * planes atomically. If the cursor was part of the atomic update then
-	 * we would have taken the slowpath.
-	 */
-	crtc_state->active_planes = new_crtc_state->active_planes;
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct i915_vma *vma;
 
-	if (new_plane_state->uapi.visible)
-		intel_update_plane(plane, crtc_state, new_plane_state);
-	else
-		intel_disable_plane(plane, crtc_state);
+	if (plane->id == PLANE_CURSOR &&
+	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
+		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+		const int align = intel_cursor_alignment(dev_priv);
+		int err;
 
-	intel_plane_unpin_fb(old_plane_state);
+		err = i915_gem_object_attach_phys(obj, align);
+		if (err)
+			return err;
+	}
 
-out_free:
-	if (new_crtc_state)
-		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
-	if (ret)
-		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
-	else
-		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
-	return ret;
+	vma = intel_pin_and_fence_fb_obj(fb,
+					 &plane_state->view,
+					 intel_plane_uses_fence(plane_state),
+					 &plane_state->flags);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
-slow:
-	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
-					      crtc_x, crtc_y, crtc_w, crtc_h,
-					      src_x, src_y, src_w, src_h, ctx);
-}
+	plane_state->vma = vma;
 
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
-	.update_plane = intel_legacy_cursor_update,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = intel_plane_destroy,
-	.atomic_duplicate_state = intel_plane_duplicate_state,
-	.atomic_destroy_state = intel_plane_destroy_state,
-	.format_mod_supported = intel_cursor_format_mod_supported,
-};
+	return 0;
+}
 
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
-			       enum i9xx_plane_id i9xx_plane)
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
 {
-	if (!HAS_FBC(dev_priv))
-		return false;
+	struct i915_vma *vma;
 
-	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-		return i9xx_plane == PLANE_A; /* tied to pipe A */
-	else if (IS_IVYBRIDGE(dev_priv))
-		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
-			i9xx_plane == PLANE_C;
-	else if (INTEL_GEN(dev_priv) >= 4)
-		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
-	else
-		return i9xx_plane == PLANE_A;
+	vma = fetch_and_zero(&old_plane_state->vma);
+	if (vma)
+		intel_unpin_fb_vma(vma, old_plane_state->flags);
 }
 
-static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @_plane: drm plane to prepare for
+ * @_new_plane_state: the plane state being prepared
+ *
+ * Prepares a framebuffer for usage on a display plane.  Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits.  Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int
+intel_prepare_plane_fb(struct drm_plane *_plane,
+		       struct drm_plane_state *_new_plane_state)
 {
-	struct intel_plane *plane;
-	const struct drm_plane_funcs *plane_funcs;
-	unsigned int supported_rotations;
-	const u32 *formats;
-	int num_formats;
-	int ret, zpos;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		return skl_universal_plane_create(dev_priv, pipe,
-						  PLANE_PRIMARY);
-
-	plane = intel_plane_alloc();
-	if (IS_ERR(plane))
-		return plane;
-
-	plane->pipe = pipe;
-	/*
-	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
-	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
-	 */
-	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
-	    INTEL_NUM_PIPES(dev_priv) == 2)
-		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
-	else
-		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
-	plane->id = PLANE_PRIMARY;
-	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
-	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
-	if (plane->has_fbc) {
-		struct intel_fbc *fbc = &dev_priv->fbc;
+	struct i915_sched_attr attr = {
+		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
+	};
+	struct intel_plane *plane = to_intel_plane(_plane);
+	struct intel_plane_state *new_plane_state =
+		to_intel_plane_state(_new_plane_state);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(new_plane_state->uapi.state);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct intel_plane_state *old_plane_state =
+		intel_atomic_get_old_plane_state(state, plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
+	int ret;
 
-		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
-	}
+	if (old_obj) {
+		const struct intel_crtc_state *crtc_state =
+			intel_atomic_get_new_crtc_state(state,
+							to_intel_crtc(old_plane_state->hw.crtc));
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		formats = vlv_primary_formats;
-		num_formats = ARRAY_SIZE(vlv_primary_formats);
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		/*
-		 * WaFP16GammaEnabling:ivb
-		 * "Workaround : When using the 64-bit format, the plane
-		 *  output on each color channel has one quarter amplitude.
-		 *  It can be brought up to full amplitude by using pipe
-		 *  gamma correction or pipe color space conversion to
-		 *  multiply the plane output by four."
+		/* Big Hammer, we also need to ensure that any pending
+		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+		 * current scanout is retired before unpinning the old
+		 * framebuffer. Note that we rely on userspace rendering
+		 * into the buffer attached to the pipe they are waiting
+		 * on. If not, userspace generates a GPU hang with IPEHR
+		 * point to the MI_WAIT_FOR_EVENT.
 		 *
-		 * There is no dedicated plane gamma for the primary plane,
-		 * and using the pipe gamma/csc could conflict with other
-		 * planes, so we choose not to expose fp16 on IVB primary
-		 * planes. HSW primary planes no longer have this problem.
+		 * This should only fail upon a hung GPU, in which case we
+		 * can safely continue.
 		 */
-		if (IS_IVYBRIDGE(dev_priv)) {
-			formats = ivb_primary_formats;
-			num_formats = ARRAY_SIZE(ivb_primary_formats);
-		} else {
-			formats = i965_primary_formats;
-			num_formats = ARRAY_SIZE(i965_primary_formats);
+		if (intel_crtc_needs_modeset(crtc_state)) {
+			ret = i915_sw_fence_await_reservation(&state->commit_ready,
+							      old_obj->base.resv, NULL,
+							      false, 0,
+							      GFP_KERNEL);
+			if (ret < 0)
+				return ret;
 		}
-	} else {
-		formats = i8xx_primary_formats;
-		num_formats = ARRAY_SIZE(i8xx_primary_formats);
 	}
 
-	if (INTEL_GEN(dev_priv) >= 4)
-		plane_funcs = &i965_plane_funcs;
-	else
-		plane_funcs = &i8xx_plane_funcs;
-
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		plane->min_cdclk = vlv_plane_min_cdclk;
-	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-		plane->min_cdclk = hsw_plane_min_cdclk;
-	else if (IS_IVYBRIDGE(dev_priv))
-		plane->min_cdclk = ivb_plane_min_cdclk;
-	else
-		plane->min_cdclk = i9xx_plane_min_cdclk;
-
-	plane->max_stride = i9xx_plane_max_stride;
-	plane->update_plane = i9xx_update_plane;
-	plane->disable_plane = i9xx_disable_plane;
-	plane->get_hw_state = i9xx_plane_get_hw_state;
-	plane->check_plane = i9xx_plane_check;
-
-	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-					       0, plane_funcs,
-					       formats, num_formats,
-					       i9xx_format_modifiers,
-					       DRM_PLANE_TYPE_PRIMARY,
-					       "primary %c", pipe_name(pipe));
-	else
-		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-					       0, plane_funcs,
-					       formats, num_formats,
-					       i9xx_format_modifiers,
-					       DRM_PLANE_TYPE_PRIMARY,
-					       "plane %c",
-					       plane_name(plane->i9xx_plane));
-	if (ret)
-		goto fail;
-
-	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-		supported_rotations =
-			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
-			DRM_MODE_REFLECT_X;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		supported_rotations =
-			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
-	} else {
-		supported_rotations = DRM_MODE_ROTATE_0;
+	if (new_plane_state->uapi.fence) { /* explicit fencing */
+		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+					     &attr);
+		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
+						    new_plane_state->uapi.fence,
+						    i915_fence_timeout(dev_priv),
+						    GFP_KERNEL);
+		if (ret < 0)
+			return ret;
 	}
 
-	if (INTEL_GEN(dev_priv) >= 4)
-		drm_plane_create_rotation_property(&plane->base,
-						   DRM_MODE_ROTATE_0,
-						   supported_rotations);
-
-	zpos = 0;
-	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
-
-	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+	if (!obj)
+		return 0;
 
-	return plane;
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		return ret;
 
-fail:
-	intel_plane_free(plane);
+	ret = intel_plane_pin_fb(new_plane_state);
 
-	return ERR_PTR(ret);
-}
+	i915_gem_object_unpin_pages(obj);
+	if (ret)
+		return ret;
 
-static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
-			  enum pipe pipe)
-{
-	struct intel_plane *cursor;
-	int ret, zpos;
+	i915_gem_object_wait_priority(obj, 0, &attr);
+	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
-	cursor = intel_plane_alloc();
-	if (IS_ERR(cursor))
-		return cursor;
+	if (!new_plane_state->uapi.fence) { /* implicit fencing */
+		struct dma_fence *fence;
 
-	cursor->pipe = pipe;
-	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
-	cursor->id = PLANE_CURSOR;
-	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+		ret = i915_sw_fence_await_reservation(&state->commit_ready,
+						      obj->base.resv, NULL,
+						      false,
+						      i915_fence_timeout(dev_priv),
+						      GFP_KERNEL);
+		if (ret < 0)
+			goto unpin_fb;
 
-	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
-		cursor->max_stride = i845_cursor_max_stride;
-		cursor->update_plane = i845_update_cursor;
-		cursor->disable_plane = i845_disable_cursor;
-		cursor->get_hw_state = i845_cursor_get_hw_state;
-		cursor->check_plane = i845_check_cursor;
+		fence = dma_resv_get_excl_rcu(obj->base.resv);
+		if (fence) {
+			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+						   fence);
+			dma_fence_put(fence);
+		}
 	} else {
-		cursor->max_stride = i9xx_cursor_max_stride;
-		cursor->update_plane = i9xx_update_cursor;
-		cursor->disable_plane = i9xx_disable_cursor;
-		cursor->get_hw_state = i9xx_cursor_get_hw_state;
-		cursor->check_plane = i9xx_check_cursor;
-	}
-
-	cursor->cursor.base = ~0;
-	cursor->cursor.cntl = ~0;
-
-	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
-		cursor->cursor.size = ~0;
-
-	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
-				       0, &intel_cursor_plane_funcs,
-				       intel_cursor_formats,
-				       ARRAY_SIZE(intel_cursor_formats),
-				       cursor_format_modifiers,
-				       DRM_PLANE_TYPE_CURSOR,
-				       "cursor %c", pipe_name(pipe));
-	if (ret)
-		goto fail;
-
-	if (INTEL_GEN(dev_priv) >= 4)
-		drm_plane_create_rotation_property(&cursor->base,
-						   DRM_MODE_ROTATE_0,
-						   DRM_MODE_ROTATE_0 |
-						   DRM_MODE_ROTATE_180);
-
-	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
-	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
-
-	if (INTEL_GEN(dev_priv) >= 12)
-		drm_plane_enable_fb_damage_clips(&cursor->base);
+		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+					   new_plane_state->uapi.fence);
+	}
 
-	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+	/*
+	 * We declare pageflips to be interactive and so merit a small bias
+	 * towards upclocking to deliver the frame on time. By only changing
+	 * the RPS thresholds to sample more regularly and aim for higher
+	 * clocks we can hopefully deliver low power workloads (like kodi)
+	 * that are not quite steady state without resorting to forcing
+	 * maximum clocks following a vblank miss (see do_rps_boost()).
+	 */
+	if (!state->rps_interactive) {
+		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+		state->rps_interactive = true;
+	}
 
-	return cursor;
+	return 0;
 
-fail:
-	intel_plane_free(cursor);
+unpin_fb:
+	intel_plane_unpin_fb(new_plane_state);
 
-	return ERR_PTR(ret);
+	return ret;
 }
 
-#define INTEL_CRTC_FUNCS \
-	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
-	.set_config = drm_atomic_helper_set_config, \
-	.destroy = intel_crtc_destroy, \
-	.page_flip = drm_atomic_helper_page_flip, \
-	.atomic_duplicate_state = intel_crtc_duplicate_state, \
-	.atomic_destroy_state = intel_crtc_destroy_state, \
-	.set_crc_source = intel_crtc_set_crc_source, \
-	.verify_crc_source = intel_crtc_verify_crc_source, \
-	.get_crc_sources = intel_crtc_get_crc_sources
-
-static const struct drm_crtc_funcs bdw_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	.get_vblank_counter = g4x_get_vblank_counter,
-	.enable_vblank = bdw_enable_vblank,
-	.disable_vblank = bdw_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs ilk_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	.get_vblank_counter = g4x_get_vblank_counter,
-	.enable_vblank = ilk_enable_vblank,
-	.disable_vblank = ilk_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs g4x_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	.get_vblank_counter = g4x_get_vblank_counter,
-	.enable_vblank = i965_enable_vblank,
-	.disable_vblank = i965_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i965_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	.get_vblank_counter = i915_get_vblank_counter,
-	.enable_vblank = i965_enable_vblank,
-	.disable_vblank = i965_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915gm_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	.get_vblank_counter = i915_get_vblank_counter,
-	.enable_vblank = i915gm_enable_vblank,
-	.disable_vblank = i915gm_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	.get_vblank_counter = i915_get_vblank_counter,
-	.enable_vblank = i8xx_enable_vblank,
-	.disable_vblank = i8xx_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i8xx_crtc_funcs = {
-	INTEL_CRTC_FUNCS,
-
-	/* no hw vblank counter */
-	.enable_vblank = i8xx_enable_vblank,
-	.disable_vblank = i8xx_disable_vblank,
-	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static struct intel_crtc *intel_crtc_alloc(void)
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @_old_plane_state: the state from the previous modeset
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+		       struct drm_plane_state *_old_plane_state)
 {
-	struct intel_crtc_state *crtc_state;
-	struct intel_crtc *crtc;
+	struct intel_plane_state *old_plane_state =
+		to_intel_plane_state(_old_plane_state);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(old_plane_state->uapi.state);
+	struct drm_i915_private *dev_priv = to_i915(plane->dev);
+	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
 
-	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
-	if (!crtc)
-		return ERR_PTR(-ENOMEM);
+	if (!obj)
+		return;
 
-	crtc_state = intel_crtc_state_alloc(crtc);
-	if (!crtc_state) {
-		kfree(crtc);
-		return ERR_PTR(-ENOMEM);
+	if (state->rps_interactive) {
+		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+		state->rps_interactive = false;
 	}
 
-	crtc->base.state = &crtc_state->uapi;
-	crtc->config = crtc_state;
-
-	return crtc;
+	/* Should only be called after a successful intel_prepare_plane_fb()! */
+	intel_plane_unpin_fb(old_plane_state);
 }
 
-static void intel_crtc_free(struct intel_crtc *crtc)
+/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
 {
-	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
-	kfree(crtc);
+	drm_plane_cleanup(plane);
+	kfree(to_intel_plane(plane));
 }
 
 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
@@ -17378,100 +13819,6 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
 	}
 }
 
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-	struct intel_plane *primary, *cursor;
-	const struct drm_crtc_funcs *funcs;
-	struct intel_crtc *crtc;
-	int sprite, ret;
-
-	crtc = intel_crtc_alloc();
-	if (IS_ERR(crtc))
-		return PTR_ERR(crtc);
-
-	crtc->pipe = pipe;
-	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
-
-	primary = intel_primary_plane_create(dev_priv, pipe);
-	if (IS_ERR(primary)) {
-		ret = PTR_ERR(primary);
-		goto fail;
-	}
-	crtc->plane_ids_mask |= BIT(primary->id);
-
-	for_each_sprite(dev_priv, pipe, sprite) {
-		struct intel_plane *plane;
-
-		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
-		if (IS_ERR(plane)) {
-			ret = PTR_ERR(plane);
-			goto fail;
-		}
-		crtc->plane_ids_mask |= BIT(plane->id);
-	}
-
-	cursor = intel_cursor_plane_create(dev_priv, pipe);
-	if (IS_ERR(cursor)) {
-		ret = PTR_ERR(cursor);
-		goto fail;
-	}
-	crtc->plane_ids_mask |= BIT(cursor->id);
-
-	if (HAS_GMCH(dev_priv)) {
-		if (IS_CHERRYVIEW(dev_priv) ||
-		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
-			funcs = &g4x_crtc_funcs;
-		else if (IS_GEN(dev_priv, 4))
-			funcs = &i965_crtc_funcs;
-		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
-			funcs = &i915gm_crtc_funcs;
-		else if (IS_GEN(dev_priv, 3))
-			funcs = &i915_crtc_funcs;
-		else
-			funcs = &i8xx_crtc_funcs;
-	} else {
-		if (INTEL_GEN(dev_priv) >= 8)
-			funcs = &bdw_crtc_funcs;
-		else
-			funcs = &ilk_crtc_funcs;
-	}
-
-	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
-					&primary->base, &cursor->base,
-					funcs, "pipe %c", pipe_name(pipe));
-	if (ret)
-		goto fail;
-
-	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
-	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
-	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
-
-	if (INTEL_GEN(dev_priv) < 9) {
-		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
-		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
-		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
-		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
-	}
-
-	if (INTEL_GEN(dev_priv) >= 10)
-		drm_crtc_create_scaling_filter_property(&crtc->base,
-						BIT(DRM_SCALING_FILTER_DEFAULT) |
-						BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
-	intel_color_init(crtc);
-
-	intel_crtc_crc_init(crtc);
-
-	drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
-
-	return 0;
-
-fail:
-	intel_crtc_free(crtc);
-
-	return ret;
-}
 
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file)
@@ -17554,48 +13901,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
 	return true;
 }
 
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
-{
-	int pps_num;
-	int pps_idx;
-
-	if (HAS_DDI(dev_priv))
-		return;
-	/*
-	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
-	 * everywhere where registers can be write protected.
-	 */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		pps_num = 2;
-	else
-		pps_num = 1;
-
-	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
-		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
-
-		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
-		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
-	}
-}
-
-static void intel_pps_init(struct drm_i915_private *dev_priv)
-{
-	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
-		dev_priv->pps_mmio_base = PCH_PPS_BASE;
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->pps_mmio_base = VLV_PPS_BASE;
-	else
-		dev_priv->pps_mmio_base = PPS_BASE;
-
-	intel_pps_unlock_regs_wa(dev_priv);
-}
-
 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 {
 	struct intel_encoder *encoder;
 	bool dpd_is_edp = false;
 
-	intel_pps_init(dev_priv);
+	intel_pps_unlock_regs_wa(dev_priv);
 
 	if (!HAS_DISPLAY(dev_priv))
 		return;
@@ -17996,7 +14307,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 			goto err;
 		}
 
-		if (is_gen12_ccs_plane(fb, i)) {
+		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
 
 			if (fb->pitches[i] != ccs_aux_stride) {
@@ -18194,86 +14505,40 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 {
 	intel_init_cdclk_hooks(dev_priv);
 
+	intel_dpll_init_clock_hook(dev_priv);
+
 	if (INTEL_GEN(dev_priv) >= 9) {
 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			skl_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
 		dev_priv->display.crtc_enable = hsw_crtc_enable;
 		dev_priv->display.crtc_disable = hsw_crtc_disable;
 	} else if (HAS_DDI(dev_priv)) {
 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock =
-			hsw_crtc_compute_clock;
 		dev_priv->display.crtc_enable = hsw_crtc_enable;
 		dev_priv->display.crtc_disable = hsw_crtc_disable;
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock =
-			ilk_crtc_compute_clock;
 		dev_priv->display.crtc_enable = ilk_crtc_enable;
 		dev_priv->display.crtc_disable = ilk_crtc_disable;
-	} else if (IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_CHERRYVIEW(dev_priv) ||
+		   IS_VALLEYVIEW(dev_priv)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
-		dev_priv->display.crtc_enable = valleyview_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (IS_G4X(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (IS_PINEVIEW(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (!IS_GEN(dev_priv, 2)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
 	} else {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
-			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
 	}
 
-	if (IS_GEN(dev_priv, 5)) {
-		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
-	} else if (IS_GEN(dev_priv, 6)) {
-		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-	} else if (IS_IVYBRIDGE(dev_priv)) {
-		/* FIXME: detect B0+ stepping and use auto training */
-		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-	}
+	intel_fdi_init_hook(dev_priv);
 
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(dev_priv) >= 9) {
 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
-	else
+		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
+	} else {
 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
+		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
+	}
 
 }
 
@@ -18281,14 +14546,10 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
 {
 	struct intel_cdclk_state *cdclk_state =
 		to_intel_cdclk_state(i915->cdclk.obj.state);
-	struct intel_dbuf_state *dbuf_state =
-		to_intel_dbuf_state(i915->dbuf.obj.state);
 
 	intel_update_cdclk(i915);
 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
-
-	dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
 }
 
 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -18521,8 +14782,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
 
 	mode_config->funcs = &intel_mode_funcs;
 
-	if (INTEL_GEN(i915) >= 9)
-		mode_config->async_page_flip = true;
+	mode_config->async_page_flip = has_async_flips(i915);
 
 	/*
 	 * Maximum framebuffer dimensions, chosen to match
@@ -18607,6 +14867,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
 
+	i915->framestart_delay = 1; /* 1-4 */
+
 	intel_mode_config_init(i915);
 
 	ret = intel_cdclk_init(i915);
@@ -18653,6 +14915,8 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
 
 	intel_panel_sanitize_ssc(i915);
 
+	intel_pps_setup(i915);
+
 	intel_gmbus_setup(i915);
 
 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
@@ -18941,7 +15205,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
 
 		val = intel_de_read(dev_priv, reg);
 		val &= ~HSW_FRAME_START_DELAY_MASK;
-		val |= HSW_FRAME_START_DELAY(0);
+		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 		intel_de_write(dev_priv, reg, val);
 	} else {
 		i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -18949,7 +15213,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
 
 		val = intel_de_read(dev_priv, reg);
 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
-		val |= PIPECONF_FRAME_START_DELAY(0);
+		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 		intel_de_write(dev_priv, reg, val);
 	}
 
@@ -18962,7 +15226,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
 
 		val = intel_de_read(dev_priv, reg);
 		val &= ~TRANS_FRAME_START_DELAY_MASK;
-		val |= TRANS_FRAME_START_DELAY(0);
+		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 		intel_de_write(dev_priv, reg, val);
 	} else {
 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
@@ -18971,7 +15235,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
 
 		val = intel_de_read(dev_priv, reg);
 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
-		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
 		intel_de_write(dev_priv, reg, val);
 	}
 }
@@ -19164,7 +15428,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
 		struct intel_crtc_state *crtc_state =
 			to_intel_crtc_state(crtc->base.state);
 
-		fixup_active_planes(crtc_state);
+		fixup_plane_bitmasks(crtc_state);
 	}
 }
 
@@ -19587,7 +15851,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 
 		put_domains = modeset_get_crtc_power_domains(crtc_state);
 		if (drm_WARN_ON(dev, put_domains))
-			modeset_put_power_domains(dev_priv, put_domains);
+			modeset_put_crtc_power_domains(crtc, put_domains);
 	}
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 5e0d42d82c110aa27ed8ce981e9defee16cf6708..76f8a805b0a34a290b503b7395dc1a87b45d4a15 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -499,6 +499,8 @@ enum phy_fia {
 			     ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
 			     (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
 
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+				     struct intel_crtc *crtc);
 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
 			   u8 active_pipes);
 void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -544,7 +546,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
 int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder);
@@ -628,11 +629,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
 		     int plane);
 int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+				 int *x, int *y, u32 *offset);
 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
-				   u32 pixel_format, u64 modifier,
-				   unsigned int rotation);
 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
 unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
 
@@ -643,7 +642,23 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 
 bool
 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
-				    uint64_t modifier);
+				    u64 modifier);
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+				       const struct intel_plane_state *state,
+				       int color_plane);
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+			   const struct intel_crtc_state *crtc_state);
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+				  int color_plane);
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+				      const struct intel_plane_state *state,
+				      int color_plane,
+				      u32 old_offset, u32 new_offset);
 
 /* modesetting */
 void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ca41e8c00ad7b7c3f28c57fd738fa860a91ad173..d62b18d5ecd80004df891f7d4e6d9c283840c85b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -18,6 +18,7 @@
 #include "intel_pm.h"
 #include "intel_psr.h"
 #include "intel_sideband.h"
+#include "intel_sprite.h"
 
 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
 {
@@ -865,6 +866,110 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
 	}
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void crtc_updates_info(struct seq_file *m,
+			      struct intel_crtc *crtc,
+			      const char *hdr)
+{
+	u64 count;
+	int row;
+
+	count = 0;
+	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
+		count += crtc->debug.vbl.times[row];
+	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
+	if (!count)
+		return;
+
+	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
+		char columns[80] = "       |";
+		unsigned int x;
+
+		if (row & 1) {
+			const char *units;
+
+			if (row > 10) {
+				x = 1000000;
+				units = "ms";
+			} else {
+				x = 1000;
+				units = "us";
+			}
+
+			snprintf(columns, sizeof(columns), "%4ld%s |",
+				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
+		}
+
+		if (crtc->debug.vbl.times[row]) {
+			x = ilog2(crtc->debug.vbl.times[row]);
+			memset(columns + 8, '*', x);
+			columns[8 + x] = '\0';
+		}
+
+		seq_printf(m, "%s%s\n", hdr, columns);
+	}
+
+	seq_printf(m, "%sMin update: %lluns\n",
+		   hdr, crtc->debug.vbl.min);
+	seq_printf(m, "%sMax update: %lluns\n",
+		   hdr, crtc->debug.vbl.max);
+	seq_printf(m, "%sAverage update: %lluns\n",
+		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
+	seq_printf(m, "%sOverruns > %uus: %u\n",
+		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
+}
+
+static int crtc_updates_show(struct seq_file *m, void *data)
+{
+	crtc_updates_info(m, m->private, "");
+	return 0;
+}
+
+static int crtc_updates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, crtc_updates_show, inode->i_private);
+}
+
+static ssize_t crtc_updates_write(struct file *file,
+				  const char __user *ubuf,
+				  size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct intel_crtc *crtc = m->private;
+
+	/* May race with an update. Meh. */
+	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
+
+	return len;
+}
+
+static const struct file_operations crtc_updates_fops = {
+	.owner = THIS_MODULE,
+	.open = crtc_updates_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = crtc_updates_write
+};
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
+			    to_intel_crtc(crtc), &crtc_updates_fops);
+}
+
+#else
+static void crtc_updates_info(struct seq_file *m,
+			      struct intel_crtc *crtc,
+			      const char *hdr)
+{
+}
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+}
+#endif
+
 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -907,6 +1012,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
 		   yesno(!crtc->cpu_fifo_underrun_disabled),
 		   yesno(!crtc->pch_fifo_underrun_disabled));
+
+	crtc_updates_info(m, crtc, "\t");
 }
 
 static int i915_display_info(struct seq_file *m, void *unused)
@@ -1032,7 +1139,6 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
 		if (!dev_priv->ipc_enabled && enable)
 			drm_info(&dev_priv->drm,
 				 "Enabling IPC: WM will be proper only after next commit\n");
-		dev_priv->wm.distrust_bios_wm = true;
 		dev_priv->ipc_enabled = enable;
 		intel_enable_ipc(dev_priv);
 	}
@@ -2048,13 +2154,13 @@ static int i915_panel_show(struct seq_file *m, void *data)
 		return -ENODEV;
 
 	seq_printf(m, "Panel power up delay: %d\n",
-		   intel_dp->panel_power_up_delay);
+		   intel_dp->pps.panel_power_up_delay);
 	seq_printf(m, "Panel power down delay: %d\n",
-		   intel_dp->panel_power_down_delay);
+		   intel_dp->pps.panel_power_down_delay);
 	seq_printf(m, "Backlight on delay: %d\n",
-		   intel_dp->backlight_on_delay);
+		   intel_dp->pps.backlight_on_delay);
 	seq_printf(m, "Backlight off delay: %d\n",
-		   intel_dp->backlight_off_delay);
+		   intel_dp->pps.backlight_off_delay);
 
 	return 0;
 }
@@ -2278,3 +2384,20 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
 
 	return 0;
 }
+
+/**
+ * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
+ * @crtc: pointer to a drm_crtc
+ *
+ * Returns 0 on success, negative error codes on error.
+ *
+ * Failure to add debugfs entries should generally be ignored.
+ */
+int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+{
+	if (!crtc->debugfs_entry)
+		return -ENODEV;
+
+	crtc_updates_add(crtc);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index c922c1745bfe170d91af1f857d74db23306e9401..557901f3eb908c0f0a19ca11a4d168c7b7c4b5a4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -7,14 +7,17 @@
 #define __INTEL_DISPLAY_DEBUGFS_H__
 
 struct drm_connector;
+struct drm_crtc;
 struct drm_i915_private;
 
 #ifdef CONFIG_DEBUG_FS
 void intel_display_debugfs_register(struct drm_i915_private *i915);
 int intel_connector_debugfs_add(struct drm_connector *connector);
+int intel_crtc_debugfs_add(struct drm_crtc *crtc);
 #else
 static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
 static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
 #endif
 
 #endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index fe2d90bba53685c723e5e54395ca79d6d1a74445..c11c37c65d861306a58e8ba67e729fce41b6749f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4,7 +4,6 @@
  */
 
 #include "display/intel_crt.h"
-#include "display/intel_dp.h"
 
 #include "i915_drv.h"
 #include "i915_irq.h"
@@ -16,6 +15,7 @@
 #include "intel_dpio_phy.h"
 #include "intel_hotplug.h"
 #include "intel_pm.h"
+#include "intel_pps.h"
 #include "intel_sideband.h"
 #include "intel_tc.h"
 #include "intel_vga.h"
@@ -936,7 +936,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 	 * because PPS registers are always on.
 	 */
 	if (!HAS_PCH_SPLIT(dev_priv))
-		intel_power_sequencer_reset(dev_priv);
+		intel_pps_reset_all(dev_priv);
 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 }
 
@@ -1446,7 +1446,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 	/* make sure we're done processing display irqs */
 	intel_synchronize_irq(dev_priv);
 
-	intel_power_sequencer_reset(dev_priv);
+	intel_pps_reset_all(dev_priv);
 
 	/* Prevent us from re-enabling polling on accident in late suspend */
 	if (!dev_priv->drm.dev->power.is_suspended)
@@ -2184,26 +2184,6 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
 	mutex_unlock(&power_domains->lock);
 }
 
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
-				       enum intel_display_power_domain domain)
-{
-	__intel_display_power_put(dev_priv, domain);
-	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
-}
-
 static void
 queue_async_put_domains_work(struct i915_power_domains *power_domains,
 			     intel_wakeref_t wakeref)
@@ -2410,8 +2390,85 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	__intel_display_power_put(dev_priv, domain);
 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 }
+#else
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function is only for the power domain code's internal use to suppress wakeref
+ * tracking when the correspondig debug kconfig option is disabled, should not
+ * be used otherwise.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+				       enum intel_display_power_domain domain)
+{
+	__intel_display_power_put(dev_priv, domain);
+	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
 #endif
 
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+			       struct intel_display_power_domain_set *power_domain_set,
+			       enum intel_display_power_domain domain)
+{
+	intel_wakeref_t __maybe_unused wf;
+
+	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+	wf = intel_display_power_get(i915, domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+	power_domain_set->wakerefs[domain] = wf;
+#endif
+	power_domain_set->mask |= BIT_ULL(domain);
+}
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+					  struct intel_display_power_domain_set *power_domain_set,
+					  enum intel_display_power_domain domain)
+{
+	intel_wakeref_t wf;
+
+	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+	wf = intel_display_power_get_if_enabled(i915, domain);
+	if (!wf)
+		return false;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+	power_domain_set->wakerefs[domain] = wf;
+#endif
+	power_domain_set->mask |= BIT_ULL(domain);
+
+	return true;
+}
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+				    struct intel_display_power_domain_set *power_domain_set,
+				    u64 mask)
+{
+	enum intel_display_power_domain domain;
+
+	drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+
+	for_each_power_domain(domain, mask) {
+		intel_wakeref_t __maybe_unused wf = -1;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
+#endif
+		intel_display_power_put(i915, domain, wf);
+		power_domain_set->mask &= ~BIT_ULL(domain);
+	}
+}
+
 #define I830_PIPES_POWER_DOMAINS (		\
 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
@@ -5601,12 +5658,16 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 	 * resources powered until display HW readout is complete. We drop
 	 * this reference in intel_power_domains_enable().
 	 */
-	power_domains->wakeref =
+	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+	power_domains->init_wakeref =
 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
 
 	/* Disable power support if the user asked so. */
-	if (!i915->params.disable_power_well)
-		intel_display_power_get(i915, POWER_DOMAIN_INIT);
+	if (!i915->params.disable_power_well) {
+		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
+		i915->power_domains.disable_wakeref = intel_display_power_get(i915,
+									      POWER_DOMAIN_INIT);
+	}
 	intel_power_domains_sync_hw(i915);
 
 	power_domains->initializing = false;
@@ -5626,11 +5687,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
 {
 	intel_wakeref_t wakeref __maybe_unused =
-		fetch_and_zero(&i915->power_domains.wakeref);
+		fetch_and_zero(&i915->power_domains.init_wakeref);
 
 	/* Remove the refcount we took to keep power well support disabled. */
 	if (!i915->params.disable_power_well)
-		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+		intel_display_power_put(i915, POWER_DOMAIN_INIT,
+					fetch_and_zero(&i915->power_domains.disable_wakeref));
 
 	intel_display_power_flush_work_sync(i915);
 
@@ -5655,7 +5717,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
 void intel_power_domains_enable(struct drm_i915_private *i915)
 {
 	intel_wakeref_t wakeref __maybe_unused =
-		fetch_and_zero(&i915->power_domains.wakeref);
+		fetch_and_zero(&i915->power_domains.init_wakeref);
 
 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
 	intel_power_domains_verify_state(i915);
@@ -5672,8 +5734,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
 {
 	struct i915_power_domains *power_domains = &i915->power_domains;
 
-	drm_WARN_ON(&i915->drm, power_domains->wakeref);
-	power_domains->wakeref =
+	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+	power_domains->init_wakeref =
 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
 
 	intel_power_domains_verify_state(i915);
@@ -5695,7 +5757,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
 {
 	struct i915_power_domains *power_domains = &i915->power_domains;
 	intel_wakeref_t wakeref __maybe_unused =
-		fetch_and_zero(&power_domains->wakeref);
+		fetch_and_zero(&power_domains->init_wakeref);
 
 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
 
@@ -5719,7 +5781,8 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
 	 * power wells if power domains must be deinitialized for suspend.
 	 */
 	if (!i915->params.disable_power_well)
-		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+		intel_display_power_put(i915, POWER_DOMAIN_INIT,
+					fetch_and_zero(&i915->power_domains.disable_wakeref));
 
 	intel_display_power_flush_work(i915);
 	intel_power_domains_verify_state(i915);
@@ -5754,8 +5817,8 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
 		intel_power_domains_init_hw(i915, true);
 		power_domains->display_core_suspended = false;
 	} else {
-		drm_WARN_ON(&i915->drm, power_domains->wakeref);
-		power_domains->wakeref =
+		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+		power_domains->init_wakeref =
 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
 	}
 
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 4aa0a09cf14fb78ecf28ba581f02d2a21651c6a1..bc30c479be5343c43195eb80969150ae94f19528 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -212,7 +212,8 @@ struct i915_power_domains {
 	bool display_core_suspended;
 	int power_well_count;
 
-	intel_wakeref_t wakeref;
+	intel_wakeref_t init_wakeref;
+	intel_wakeref_t disable_wakeref;
 
 	struct mutex lock;
 	int domain_use_count[POWER_DOMAIN_NUM];
@@ -224,6 +225,13 @@ struct i915_power_domains {
 	struct i915_power_well *power_wells;
 };
 
+struct intel_display_power_domain_set {
+	u64 mask;
+#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
+	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
+#endif
+};
+
 #define for_each_power_domain(domain, mask)				\
 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
 		for_each_if(BIT_ULL(domain) & (mask))
@@ -279,8 +287,6 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
 intel_wakeref_t
 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 				   enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
-				       enum intel_display_power_domain domain);
 void __intel_display_power_put_async(struct drm_i915_private *i915,
 				     enum intel_display_power_domain domain,
 				     intel_wakeref_t wakeref);
@@ -297,6 +303,9 @@ intel_display_power_put_async(struct drm_i915_private *i915,
 	__intel_display_power_put_async(i915, domain, wakeref);
 }
 #else
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+				       enum intel_display_power_domain domain);
+
 static inline void
 intel_display_power_put(struct drm_i915_private *i915,
 			enum intel_display_power_domain domain,
@@ -314,6 +323,28 @@ intel_display_power_put_async(struct drm_i915_private *i915,
 }
 #endif
 
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+			       struct intel_display_power_domain_set *power_domain_set,
+			       enum intel_display_power_domain domain);
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+					  struct intel_display_power_domain_set *power_domain_set,
+					  enum intel_display_power_domain domain);
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+				    struct intel_display_power_domain_set *power_domain_set,
+				    u64 mask);
+
+static inline void
+intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+				   struct intel_display_power_domain_set *power_domain_set)
+{
+	intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+}
+
 enum dbuf_slice {
 	DBUF_S1,
 	DBUF_S2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 34d78c654df3b72dab3b208086ddbbe1cd8984be..39397748b4b0c2ec650febfa2118f8ca18e367af 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -225,6 +225,17 @@ struct intel_encoder {
 	const struct drm_connector *audio_connector;
 };
 
+struct intel_panel_bl_funcs {
+	/* Connector and platform specific backlight functions */
+	int (*setup)(struct intel_connector *connector, enum pipe pipe);
+	u32 (*get)(struct intel_connector *connector, enum pipe pipe);
+	void (*set)(const struct drm_connector_state *conn_state, u32 level);
+	void (*disable)(const struct drm_connector_state *conn_state, u32 level);
+	void (*enable)(const struct intel_crtc_state *crtc_state,
+		       const struct drm_connector_state *conn_state, u32 level);
+	u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+};
+
 struct intel_panel {
 	struct drm_display_mode *fixed_mode;
 	struct drm_display_mode *downclock_mode;
@@ -241,24 +252,28 @@ struct intel_panel {
 		bool alternate_pwm_increment;	/* lpt+ */
 
 		/* PWM chip */
+		u32 pwm_level_min;
+		u32 pwm_level_max;
+		bool pwm_enabled;
 		bool util_pin_active_low;	/* bxt+ */
 		u8 controller;		/* bxt+ only */
 		struct pwm_device *pwm;
 		struct pwm_state pwm_state;
 
 		/* DPCD backlight */
-		u8 pwmgen_bit_count;
+		union {
+			struct {
+				u8 pwmgen_bit_count;
+			} vesa;
+			struct {
+				bool sdr_uses_aux;
+			} intel;
+		} edp;
 
 		struct backlight_device *device;
 
-		/* Connector and platform specific backlight functions */
-		int (*setup)(struct intel_connector *connector, enum pipe pipe);
-		u32 (*get)(struct intel_connector *connector);
-		void (*set)(const struct drm_connector_state *conn_state, u32 level);
-		void (*disable)(const struct drm_connector_state *conn_state);
-		void (*enable)(const struct intel_crtc_state *crtc_state,
-			       const struct drm_connector_state *conn_state);
-		u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+		const struct intel_panel_bl_funcs *funcs;
+		const struct intel_panel_bl_funcs *pwm_funcs;
 		void (*power)(struct intel_connector *, bool enable);
 	} backlight;
 };
@@ -339,6 +354,10 @@ struct intel_hdcp_shim {
 				 enum transcoder cpu_transcoder,
 				 bool enable);
 
+	/* Enable/Disable stream encryption on DP MST Transport Link */
+	int (*stream_encryption)(struct intel_connector *connector,
+				 bool enable);
+
 	/* Ensures the link is still protected */
 	bool (*check_link)(struct intel_digital_port *dig_port,
 			   struct intel_connector *connector);
@@ -370,8 +389,13 @@ struct intel_hdcp_shim {
 	int (*config_stream_type)(struct intel_digital_port *dig_port,
 				  bool is_repeater, u8 type);
 
+	/* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+	int (*stream_2_2_encryption)(struct intel_connector *connector,
+				     bool enable);
+
 	/* HDCP2.2 Link Integrity Check */
-	int (*check_2_2_link)(struct intel_digital_port *dig_port);
+	int (*check_2_2_link)(struct intel_digital_port *dig_port,
+			      struct intel_connector *connector);
 };
 
 struct intel_hdcp {
@@ -398,7 +422,6 @@ struct intel_hdcp {
 	 * content can flow only through a link protected by HDCP2.2.
 	 */
 	u8 content_type;
-	struct hdcp_port_data port_data;
 
 	bool is_paired;
 	bool is_repeater;
@@ -432,6 +455,8 @@ struct intel_hdcp {
 	 * Hence caching the transcoder here.
 	 */
 	enum transcoder cpu_transcoder;
+	/* Only used for DP MST stream encryption */
+	enum transcoder stream_transcoder;
 };
 
 struct intel_connector {
@@ -531,7 +556,7 @@ struct intel_plane_state {
 		struct drm_framebuffer *fb;
 
 		u16 alpha;
-		uint16_t pixel_blend_mode;
+		u16 pixel_blend_mode;
 		unsigned int rotation;
 		enum drm_color_encoding color_encoding;
 		enum drm_color_range color_range;
@@ -604,6 +629,11 @@ struct intel_plane_state {
 	u32 planar_slave;
 
 	struct drm_intel_sprite_colorkey ckey;
+
+	struct drm_rect psr2_sel_fetch_area;
+
+	/* Clear Color Value */
+	u64 ccval;
 };
 
 struct intel_initial_plane_config {
@@ -663,6 +693,8 @@ struct intel_crtc_scaler_state {
 #define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
 /* Flag to indicate mipi dsi periodic command mode where we do not get TE */
 #define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
+/* Do tricks to make vblank timestamps sane with VRR? */
+#define I915_MODE_FLAG_VRR (1<<6)
 
 struct intel_wm_level {
 	bool enable;
@@ -1047,7 +1079,10 @@ struct intel_crtc_state {
 		u32 cgm_mode;
 	};
 
-	/* bitmask of visible planes (enum plane_id) */
+	/* bitmask of logically enabled planes (enum plane_id) */
+	u8 enabled_planes;
+
+	/* bitmask of actually visible planes (enum plane_id) */
 	u8 active_planes;
 	u8 nv12_planes;
 	u8 c8_planes;
@@ -1118,6 +1153,13 @@ struct intel_crtc_state {
 	struct intel_dsb *dsb;
 
 	u32 psr2_man_track_ctl;
+
+	/* Variable Refresh Rate state */
+	struct {
+		bool enable;
+		u8 pipeline_full;
+		u16 flipline, vmin, vmax;
+	} vrr;
 };
 
 enum intel_pipe_crc_source {
@@ -1160,7 +1202,9 @@ struct intel_crtc {
 	/* I915_MODE_FLAG_* */
 	u8 mode_flags;
 
-	unsigned long long enabled_power_domains;
+	u16 vmax_vblank_start;
+
+	struct intel_display_power_domain_set enabled_power_domains;
 	struct intel_overlay *overlay;
 
 	struct intel_crtc_state *config;
@@ -1186,6 +1230,15 @@ struct intel_crtc {
 		ktime_t start_vbl_time;
 		int min_vbl, max_vbl;
 		int scanline_start;
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+		struct {
+			u64 min;
+			u64 max;
+			u64 sum;
+			unsigned int over;
+			unsigned int times[17]; /* [1us, 16ms] */
+		} vbl;
+#endif
 	} debug;
 
 	/* scalers available on this crtc */
@@ -1203,6 +1256,7 @@ struct intel_plane {
 	enum pipe pipe;
 	bool has_fbc;
 	bool has_ccs;
+	bool need_async_flip_disable_wa;
 	u32 frontbuffer_bit;
 
 	struct {
@@ -1239,7 +1293,10 @@ struct intel_plane {
 			 const struct intel_plane_state *plane_state);
 	void (*async_flip)(struct intel_plane *plane,
 			   const struct intel_crtc_state *crtc_state,
-			   const struct intel_plane_state *plane_state);
+			   const struct intel_plane_state *plane_state,
+			   bool async_flip);
+	void (*enable_flip_done)(struct intel_plane *plane);
+	void (*disable_flip_done)(struct intel_plane *plane);
 };
 
 struct intel_watermark_params {
@@ -1321,6 +1378,43 @@ struct intel_dp_compliance {
 	u8 test_lane_count;
 };
 
+struct intel_dp_pcon_frl {
+	bool is_trained;
+	int trained_rate_gbps;
+};
+
+struct intel_pps {
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct delayed_work panel_vdd_work;
+	bool want_panel_vdd;
+	unsigned long last_power_on;
+	unsigned long last_backlight_off;
+	ktime_t panel_power_off_time;
+	intel_wakeref_t vdd_wakeref;
+
+	/*
+	 * Pipe whose power sequencer is currently locked into
+	 * this port. Only relevant on VLV/CHV.
+	 */
+	enum pipe pps_pipe;
+	/*
+	 * Pipe currently driving the port. Used for preventing
+	 * the use of the PPS for any pipe currentrly driving
+	 * external DP as that will mess things up on VLV.
+	 */
+	enum pipe active_pipe;
+	/*
+	 * Set if the sequencer may be reset due to a power transition,
+	 * requiring a reinitialization. Only relevant on BXT.
+	 */
+	bool pps_reset;
+	struct edp_power_seq pps_delays;
+};
+
 struct intel_dp {
 	i915_reg_t output_reg;
 	u32 DP;
@@ -1331,6 +1425,7 @@ struct intel_dp {
 	bool has_hdmi_sink;
 	bool has_audio;
 	bool reset_link_params;
+	bool use_max_params;
 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
 	u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
 	u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1339,6 +1434,7 @@ struct intel_dp {
 	u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
 	u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
 	u8 fec_capable;
+	u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE];
 	/* source rates */
 	int num_source_rates;
 	const int *source_rates;
@@ -1355,38 +1451,11 @@ struct intel_dp {
 	int max_link_rate;
 	/* sink or branch descriptor */
 	struct drm_dp_desc desc;
-	u32 edid_quirks;
 	struct drm_dp_aux aux;
 	u32 aux_busy_last_status;
 	u8 train_set[4];
-	int panel_power_up_delay;
-	int panel_power_down_delay;
-	int panel_power_cycle_delay;
-	int backlight_on_delay;
-	int backlight_off_delay;
-	struct delayed_work panel_vdd_work;
-	bool want_panel_vdd;
-	unsigned long last_power_on;
-	unsigned long last_backlight_off;
-	ktime_t panel_power_off_time;
 
-	/*
-	 * Pipe whose power sequencer is currently locked into
-	 * this port. Only relevant on VLV/CHV.
-	 */
-	enum pipe pps_pipe;
-	/*
-	 * Pipe currently driving the port. Used for preventing
-	 * the use of the PPS for any pipe currentrly driving
-	 * external DP as that will mess things up on VLV.
-	 */
-	enum pipe active_pipe;
-	/*
-	 * Set if the sequencer may be reset due to a power transition,
-	 * requiring a reinitialization. Only relevant on BXT.
-	 */
-	bool pps_reset;
-	struct edp_power_seq pps_delays;
+	struct intel_pps pps;
 
 	bool can_mst; /* this port supports mst */
 	bool is_mst;
@@ -1432,8 +1501,10 @@ struct intel_dp {
 	struct {
 		int min_tmds_clock, max_tmds_clock;
 		int max_dotclock;
+		int pcon_max_frl_bw;
 		u8 max_bpc;
 		bool ycbcr_444_to_420;
+		bool rgb_to_ycbcr;
 	} dfp;
 
 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1444,6 +1515,8 @@ struct intel_dp {
 
 	bool hobl_failed;
 	bool hobl_active;
+
+	struct intel_dp_pcon_frl frl;
 };
 
 enum lspcon_vendor {
@@ -1453,6 +1526,7 @@ enum lspcon_vendor {
 
 struct intel_lspcon {
 	bool active;
+	bool hdr_supported;
 	enum drm_lspcon_mode mode;
 	enum lspcon_vendor vendor;
 };
@@ -1469,6 +1543,8 @@ struct intel_digital_port {
 	/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
 	enum aux_ch aux_ch;
 	enum intel_display_power_domain ddi_io_power_domain;
+	intel_wakeref_t ddi_io_wakeref;
+	intel_wakeref_t aux_wakeref;
 	struct mutex tc_lock;	/* protects the TypeC port mode */
 	intel_wakeref_t tc_lock_wakeref;
 	int tc_link_refcount;
@@ -1478,10 +1554,14 @@ struct intel_digital_port {
 	enum phy_fia tc_phy_fia;
 	u8 tc_phy_fia_idx;
 
-	/* protects num_hdcp_streams reference count */
+	/* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
 	struct mutex hdcp_mutex;
 	/* the number of pipes using HDCP signalling out of this port */
 	unsigned int num_hdcp_streams;
+	/* port HDCP auth status */
+	bool hdcp_auth_status;
+	/* HDCP port data need to pass to security f/w */
+	struct hdcp_port_data hdcp_port_data;
 
 	void (*write_infoframe)(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
@@ -1758,6 +1838,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
 		 (1 << INTEL_OUTPUT_EDP));
 }
 
+static inline bool
+intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
+{
+	return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+}
+
 static inline void
 intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
@@ -1780,4 +1866,32 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
 	return i915_ggtt_offset(state->vma);
 }
 
+static inline struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->params.panel_use_ssc >= 0)
+		return dev_priv->params.panel_use_ssc != 0;
+	return dev_priv->vbt.lvds_use_ssc
+		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+				      const struct intel_crtc_state *pipe_config)
+{
+	if (HAS_DDI(dev_priv))
+		return pipe_config->port_clock; /* SPLL */
+	else
+		return dev_priv->fdi_pll_freq;
+}
+
 #endif /*  __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 8a26307c48960110b678af7075db41ad3e65de6f..8c12d5375607a57cd2755dade7c18b7381bfa82f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -41,13 +41,13 @@
 
 #include "i915_debugfs.h"
 #include "i915_drv.h"
-#include "i915_trace.h"
 #include "intel_atomic.h"
 #include "intel_audio.h"
 #include "intel_connector.h"
 #include "intel_ddi.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
+#include "intel_dp_aux.h"
 #include "intel_dp_link_training.h"
 #include "intel_dp_mst.h"
 #include "intel_dpio_phy.h"
@@ -58,10 +58,12 @@
 #include "intel_lspcon.h"
 #include "intel_lvds.h"
 #include "intel_panel.h"
+#include "intel_pps.h"
 #include "intel_psr.h"
 #include "intel_sideband.h"
 #include "intel_tc.h"
 #include "intel_vdsc.h"
+#include "intel_vrr.h"
 
 #define DP_DPRX_ESI_LEN 14
 
@@ -121,6 +123,11 @@ static const struct dp_link_dpll chv_dpll[] = {
 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
 };
 
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+	return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+}
+
 /* Constants for DP DSC configurations */
 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
 
@@ -145,12 +152,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
 
 static void intel_dp_link_down(struct intel_encoder *encoder,
 			       const struct intel_crtc_state *old_crtc_state);
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
-					   const struct intel_crtc_state *crtc_state);
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
-				      enum pipe pipe);
 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
 /* update sink rates from dpcd */
@@ -162,8 +163,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 	int i, max_rate;
 	int max_lttpr_rate;
 
-	if (drm_dp_has_quirk(&intel_dp->desc, 0,
-			     DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
 		/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
 		static const int quirk_rates[] = { 162000, 270000, 324000 };
 
@@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 		return -1;
 	}
 
+	if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
+		drm_dbg_kms(&i915->drm,
+			    "Retrying Link training for eDP with max parameters\n");
+		intel_dp->use_max_params = true;
+		return 0;
+	}
+
 	index = intel_dp_rate_index(intel_dp->common_rates,
 				    intel_dp->num_common_rates,
 				    link_rate);
@@ -651,6 +658,10 @@ intel_dp_output_format(struct drm_connector *connector,
 	    !drm_mode_is_420_only(info, mode))
 		return INTEL_OUTPUT_FORMAT_RGB;
 
+	if (intel_dp->dfp.rgb_to_ycbcr &&
+	    intel_dp->dfp.ycbcr_444_to_420)
+		return INTEL_OUTPUT_FORMAT_RGB;
+
 	if (intel_dp->dfp.ycbcr_444_to_420)
 		return INTEL_OUTPUT_FORMAT_YCBCR444;
 	else
@@ -716,6 +727,25 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
 	const struct drm_display_info *info = &connector->base.display_info;
 	int tmds_clock;
 
+	/* If PCON supports FRL MODE, check FRL bandwidth constraints */
+	if (intel_dp->dfp.pcon_max_frl_bw) {
+		int target_bw;
+		int max_frl_bw;
+		int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
+
+		target_bw = bpp * target_clock;
+
+		max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+
+		/* converting bw from Gbps to Kbps*/
+		max_frl_bw = max_frl_bw * 1000000;
+
+		if (target_bw > max_frl_bw)
+			return MODE_CLOCK_HIGH;
+
+		return MODE_OK;
+	}
+
 	if (intel_dp->dfp.max_dotclock &&
 	    target_clock > intel_dp->dfp.max_dotclock)
 		return MODE_CLOCK_HIGH;
@@ -833,1329 +863,206 @@ intel_dp_mode_valid(struct drm_connector *connector,
 	return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
 }
 
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
-{
-	int i;
-	u32 v = 0;
-
-	if (src_bytes > 4)
-		src_bytes = 4;
-	for (i = 0; i < src_bytes; i++)
-		v |= ((u32)src[i]) << ((3 - i) * 8);
-	return v;
-}
-
-static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
-{
-	int i;
-	if (dst_bytes > 4)
-		dst_bytes = 4;
-	for (i = 0; i < dst_bytes; i++)
-		dst[i] = src >> ((3-i) * 8);
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
-					      bool force_disable_vdd);
-static void
-intel_dp_pps_init(struct intel_dp *intel_dp);
-
-static intel_wakeref_t
-pps_lock(struct intel_dp *intel_dp)
+bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	intel_wakeref_t wakeref;
-
-	/*
-	 * See intel_power_sequencer_reset() why we need
-	 * a power domain reference here.
-	 */
-	wakeref = intel_display_power_get(dev_priv,
-					  intel_aux_power_domain(dp_to_dig_port(intel_dp)));
-
-	mutex_lock(&dev_priv->pps_mutex);
+	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
 
-	return wakeref;
+	return max_rate >= 540000;
 }
 
-static intel_wakeref_t
-pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
 
-	mutex_unlock(&dev_priv->pps_mutex);
-	intel_display_power_put(dev_priv,
-				intel_aux_power_domain(dp_to_dig_port(intel_dp)),
-				wakeref);
-	return 0;
+	return max_rate >= 810000;
 }
 
-#define with_pps_lock(dp, wf) \
-	for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
-
 static void
-vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+intel_dp_set_clock(struct intel_encoder *encoder,
+		   struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum pipe pipe = intel_dp->pps_pipe;
-	bool pll_enabled, release_cl_override = false;
-	enum dpio_phy phy = DPIO_PHY(pipe);
-	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
-	u32 DP;
-
-	if (drm_WARN(&dev_priv->drm,
-		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
-		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
-		     pipe_name(pipe), dig_port->base.base.base.id,
-		     dig_port->base.base.name))
-		return;
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
-		    pipe_name(pipe), dig_port->base.base.base.id,
-		    dig_port->base.base.name);
-
-	/* Preserve the BIOS-computed detected bit. This is
-	 * supposed to be read-only.
-	 */
-	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
-	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-	DP |= DP_PORT_WIDTH(1);
-	DP |= DP_LINK_TRAIN_PAT_1;
-
-	if (IS_CHERRYVIEW(dev_priv))
-		DP |= DP_PIPE_SEL_CHV(pipe);
-	else
-		DP |= DP_PIPE_SEL(pipe);
-
-	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
-
-	/*
-	 * The DPLL for the pipe must be enabled for this to work.
-	 * So enable temporarily it if it's not already enabled.
-	 */
-	if (!pll_enabled) {
-		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
-			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	const struct dp_link_dpll *divisor = NULL;
+	int i, count = 0;
 
-		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
-				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
-			drm_err(&dev_priv->drm,
-				"Failed to force on pll for pipe %c!\n",
-				pipe_name(pipe));
-			return;
-		}
+	if (IS_G4X(dev_priv)) {
+		divisor = g4x_dpll;
+		count = ARRAY_SIZE(g4x_dpll);
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
+		divisor = pch_dpll;
+		count = ARRAY_SIZE(pch_dpll);
+	} else if (IS_CHERRYVIEW(dev_priv)) {
+		divisor = chv_dpll;
+		count = ARRAY_SIZE(chv_dpll);
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		divisor = vlv_dpll;
+		count = ARRAY_SIZE(vlv_dpll);
 	}
 
-	/*
-	 * Similar magic as in intel_dp_enable_port().
-	 * We _must_ do this port enable + disable trick
-	 * to make this power sequencer lock onto the port.
-	 * Otherwise even VDD force bit won't work.
-	 */
-	intel_de_write(dev_priv, intel_dp->output_reg, DP);
-	intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
-	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
-	intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
-	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
-	intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
-	if (!pll_enabled) {
-		vlv_force_pll_off(dev_priv, pipe);
-
-		if (release_cl_override)
-			chv_phy_powergate_ch(dev_priv, phy, ch, false);
+	if (divisor && count) {
+		for (i = 0; i < count; i++) {
+			if (pipe_config->port_clock == divisor[i].clock) {
+				pipe_config->dpll = divisor[i].dpll;
+				pipe_config->clock_set = true;
+				break;
+			}
+		}
 	}
 }
 
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+static void snprintf_int_array(char *str, size_t len,
+			       const int *array, int nelem)
 {
-	struct intel_encoder *encoder;
-	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
-
-	/*
-	 * We don't have power sequencer currently.
-	 * Pick one that's not used by other ports.
-	 */
-	for_each_intel_dp(&dev_priv->drm, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		if (encoder->type == INTEL_OUTPUT_EDP) {
-			drm_WARN_ON(&dev_priv->drm,
-				    intel_dp->active_pipe != INVALID_PIPE &&
-				    intel_dp->active_pipe !=
-				    intel_dp->pps_pipe);
+	int i;
 
-			if (intel_dp->pps_pipe != INVALID_PIPE)
-				pipes &= ~(1 << intel_dp->pps_pipe);
-		} else {
-			drm_WARN_ON(&dev_priv->drm,
-				    intel_dp->pps_pipe != INVALID_PIPE);
+	str[0] = '\0';
 
-			if (intel_dp->active_pipe != INVALID_PIPE)
-				pipes &= ~(1 << intel_dp->active_pipe);
-		}
+	for (i = 0; i < nelem; i++) {
+		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
+		if (r >= len)
+			return;
+		str += r;
+		len -= r;
 	}
-
-	if (pipes == 0)
-		return INVALID_PIPE;
-
-	return ffs(pipes) - 1;
 }
 
-static enum pipe
-vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum pipe pipe;
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	char str[128]; /* FIXME: too big for stack? */
 
-	lockdep_assert_held(&dev_priv->pps_mutex);
+	if (!drm_debug_enabled(DRM_UT_KMS))
+		return;
 
-	/* We should never land here with regular DP ports */
-	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+	snprintf_int_array(str, sizeof(str),
+			   intel_dp->source_rates, intel_dp->num_source_rates);
+	drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
 
-	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
-		    intel_dp->active_pipe != intel_dp->pps_pipe);
+	snprintf_int_array(str, sizeof(str),
+			   intel_dp->sink_rates, intel_dp->num_sink_rates);
+	drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
 
-	if (intel_dp->pps_pipe != INVALID_PIPE)
-		return intel_dp->pps_pipe;
+	snprintf_int_array(str, sizeof(str),
+			   intel_dp->common_rates, intel_dp->num_common_rates);
+	drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
+}
 
-	pipe = vlv_find_free_pps(dev_priv);
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	int len;
 
-	/*
-	 * Didn't find one. This should not happen since there
-	 * are two power sequencers and up to two eDP ports.
-	 */
-	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
-		pipe = PIPE_A;
+	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
+	if (drm_WARN_ON(&i915->drm, len <= 0))
+		return 162000;
 
-	vlv_steal_power_sequencer(dev_priv, pipe);
-	intel_dp->pps_pipe = pipe;
+	return intel_dp->common_rates[len - 1];
+}
 
-	drm_dbg_kms(&dev_priv->drm,
-		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
-		    pipe_name(intel_dp->pps_pipe),
-		    dig_port->base.base.base.id,
-		    dig_port->base.base.name);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	int i = intel_dp_rate_index(intel_dp->sink_rates,
+				    intel_dp->num_sink_rates, rate);
 
-	/* init power sequencer on this pipe and port */
-	intel_dp_init_panel_power_sequencer(intel_dp);
-	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
+	if (drm_WARN_ON(&i915->drm, i < 0))
+		i = 0;
 
-	/*
-	 * Even vdd force doesn't work until we've made
-	 * the power sequencer lock in on the port.
-	 */
-	vlv_power_sequencer_kick(intel_dp);
+	return i;
+}
 
-	return intel_dp->pps_pipe;
+void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+			   u8 *link_bw, u8 *rate_select)
+{
+	/* eDP 1.4 rate select method. */
+	if (intel_dp->use_rate_select) {
+		*link_bw = 0;
+		*rate_select =
+			intel_dp_rate_select(intel_dp, port_clock);
+	} else {
+		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
+		*rate_select = 0;
+	}
 }
 
-static int
-bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+					 const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	int backlight_controller = dev_priv->vbt.backlight.controller;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	/* We should never land here with regular DP ports */
-	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
-	if (!intel_dp->pps_reset)
-		return backlight_controller;
-
-	intel_dp->pps_reset = false;
+	/* On TGL, FEC is supported on all Pipes */
+	if (INTEL_GEN(dev_priv) >= 12)
+		return true;
 
-	/*
-	 * Only the HW needs to be reprogrammed, the SW state is fixed and
-	 * has been setup during connector init.
-	 */
-	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
+	if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+		return true;
 
-	return backlight_controller;
+	return false;
 }
 
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
-			       enum pipe pipe);
-
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
-			       enum pipe pipe)
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+				  const struct intel_crtc_state *pipe_config)
 {
-	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+		drm_dp_sink_supports_fec(intel_dp->fec_capable);
 }
 
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
-				enum pipe pipe)
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+				  const struct intel_crtc_state *crtc_state)
 {
-	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
+		return false;
+
+	return intel_dsc_source_support(crtc_state) &&
+		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
 }
 
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
-			 enum pipe pipe)
+static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
+				   const struct intel_crtc_state *crtc_state)
 {
-	return true;
+	return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+		(crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+		 intel_dp->dfp.ycbcr_444_to_420);
 }
 
-static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
-		     enum port port,
-		     vlv_pipe_check pipe_check)
+static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
+				    const struct intel_crtc_state *crtc_state, int bpc)
 {
-	enum pipe pipe;
-
-	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
-		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
-			PANEL_PORT_SELECT_MASK;
-
-		if (port_sel != PANEL_PORT_SELECT_VLV(port))
-			continue;
-
-		if (!pipe_check(dev_priv, pipe))
-			continue;
+	int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
 
-		return pipe;
-	}
+	if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
+		clock /= 2;
 
-	return INVALID_PIPE;
+	return clock;
 }
 
-static void
-vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
+					   const struct intel_crtc_state *crtc_state, int bpc)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum port port = dig_port->base.port;
+	int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
 
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	/* try to find a pipe with this port selected */
-	/* first pick one where the panel is on */
-	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
-						  vlv_pipe_has_pp_on);
-	/* didn't find one? pick one where vdd is on */
-	if (intel_dp->pps_pipe == INVALID_PIPE)
-		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
-							  vlv_pipe_has_vdd_on);
-	/* didn't find one? pick one with just the correct port */
-	if (intel_dp->pps_pipe == INVALID_PIPE)
-		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
-							  vlv_pipe_any);
-
-	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
-	if (intel_dp->pps_pipe == INVALID_PIPE) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "no initial power sequencer for [ENCODER:%d:%s]\n",
-			    dig_port->base.base.base.id,
-			    dig_port->base.base.name);
-		return;
-	}
+	if (intel_dp->dfp.min_tmds_clock &&
+	    tmds_clock < intel_dp->dfp.min_tmds_clock)
+		return false;
 
-	drm_dbg_kms(&dev_priv->drm,
-		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
-		    dig_port->base.base.base.id,
-		    dig_port->base.base.name,
-		    pipe_name(intel_dp->pps_pipe));
+	if (intel_dp->dfp.max_tmds_clock &&
+	    tmds_clock > intel_dp->dfp.max_tmds_clock)
+		return false;
 
-	intel_dp_init_panel_power_sequencer(intel_dp);
-	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
+	return true;
 }
 
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
+static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
+					      const struct intel_crtc_state *crtc_state,
+					      int bpc)
 {
-	struct intel_encoder *encoder;
-
-	if (drm_WARN_ON(&dev_priv->drm,
-			!(IS_VALLEYVIEW(dev_priv) ||
-			  IS_CHERRYVIEW(dev_priv) ||
-			  IS_GEN9_LP(dev_priv))))
-		return;
 
-	/*
-	 * We can't grab pps_mutex here due to deadlock with power_domain
-	 * mutex when power_domain functions are called while holding pps_mutex.
-	 * That also means that in order to use pps_pipe the code needs to
-	 * hold both a power domain reference and pps_mutex, and the power domain
-	 * reference get/put must be done while _not_ holding pps_mutex.
-	 * pps_{lock,unlock}() do these steps in the correct order, so one
-	 * should use them always.
-	 */
-
-	for_each_intel_dp(&dev_priv->drm, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		drm_WARN_ON(&dev_priv->drm,
-			    intel_dp->active_pipe != INVALID_PIPE);
-
-		if (encoder->type != INTEL_OUTPUT_EDP)
-			continue;
-
-		if (IS_GEN9_LP(dev_priv))
-			intel_dp->pps_reset = true;
-		else
-			intel_dp->pps_pipe = INVALID_PIPE;
-	}
-}
-
-struct pps_registers {
-	i915_reg_t pp_ctrl;
-	i915_reg_t pp_stat;
-	i915_reg_t pp_on;
-	i915_reg_t pp_off;
-	i915_reg_t pp_div;
-};
-
-static void intel_pps_get_registers(struct intel_dp *intel_dp,
-				    struct pps_registers *regs)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	int pps_idx = 0;
-
-	memset(regs, 0, sizeof(*regs));
-
-	if (IS_GEN9_LP(dev_priv))
-		pps_idx = bxt_power_sequencer_idx(intel_dp);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		pps_idx = vlv_power_sequencer_pipe(intel_dp);
-
-	regs->pp_ctrl = PP_CONTROL(pps_idx);
-	regs->pp_stat = PP_STATUS(pps_idx);
-	regs->pp_on = PP_ON_DELAYS(pps_idx);
-	regs->pp_off = PP_OFF_DELAYS(pps_idx);
-
-	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
-	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
-		regs->pp_div = INVALID_MMIO_REG;
-	else
-		regs->pp_div = PP_DIVISOR(pps_idx);
-}
-
-static i915_reg_t
-_pp_ctrl_reg(struct intel_dp *intel_dp)
-{
-	struct pps_registers regs;
-
-	intel_pps_get_registers(intel_dp, &regs);
-
-	return regs.pp_ctrl;
-}
-
-static i915_reg_t
-_pp_stat_reg(struct intel_dp *intel_dp)
-{
-	struct pps_registers regs;
-
-	intel_pps_get_registers(intel_dp, &regs);
-
-	return regs.pp_stat;
-}
-
-static bool edp_have_panel_power(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-	    intel_dp->pps_pipe == INVALID_PIPE)
-		return false;
-
-	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
-}
-
-static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-	    intel_dp->pps_pipe == INVALID_PIPE)
-		return false;
-
-	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
-}
-
-static void
-intel_dp_check_edp(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
-		drm_WARN(&dev_priv->drm, 1,
-			 "eDP powered off while attempting aux channel communication.\n");
-		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
-			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
-			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
-	}
-}
-
-static u32
-intel_dp_aux_wait_done(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
-	const unsigned int timeout_ms = 10;
-	u32 status;
-	bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
-	done = wait_event_timeout(i915->gmbus_wait_queue, C,
-				  msecs_to_jiffies_timeout(timeout_ms));
-
-	/* just trace the final value */
-	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
-	if (!done)
-		drm_err(&i915->drm,
-			"%s: did not complete or timeout within %ums (status 0x%08x)\n",
-			intel_dp->aux.name, timeout_ms, status);
-#undef C
-
-	return status;
-}
-
-static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	if (index)
-		return 0;
-
-	/*
-	 * The clock divider is based off the hrawclk, and would like to run at
-	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
-	 */
-	return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
-}
-
-static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	u32 freq;
-
-	if (index)
-		return 0;
-
-	/*
-	 * The clock divider is based off the cdclk or PCH rawclk, and would
-	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
-	 * divide by 2000 and use that
-	 */
-	if (dig_port->aux_ch == AUX_CH_A)
-		freq = dev_priv->cdclk.hw.cdclk;
-	else
-		freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
-	return DIV_ROUND_CLOSEST(freq, 2000);
-}
-
-static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
-	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
-		/* Workaround for non-ULT HSW */
-		switch (index) {
-		case 0: return 63;
-		case 1: return 72;
-		default: return 0;
-		}
-	}
-
-	return ilk_get_aux_clock_divider(intel_dp, index);
-}
-
-static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
-	/*
-	 * SKL doesn't need us to program the AUX clock divider (Hardware will
-	 * derive the clock from CDCLK automatically). We still implement the
-	 * get_aux_clock_divider vfunc to plug-in into the existing code.
-	 */
-	return index ? 0 : 1;
-}
-
-static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
-				int send_bytes,
-				u32 aux_clock_divider)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv =
-			to_i915(dig_port->base.base.dev);
-	u32 precharge, timeout;
-
-	if (IS_GEN(dev_priv, 6))
-		precharge = 3;
-	else
-		precharge = 5;
-
-	if (IS_BROADWELL(dev_priv))
-		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
-	else
-		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
-	return DP_AUX_CH_CTL_SEND_BUSY |
-	       DP_AUX_CH_CTL_DONE |
-	       DP_AUX_CH_CTL_INTERRUPT |
-	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
-	       timeout |
-	       DP_AUX_CH_CTL_RECEIVE_ERROR |
-	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
-}
-
-static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
-				int send_bytes,
-				u32 unused)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *i915 =
-			to_i915(dig_port->base.base.dev);
-	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-	u32 ret;
-
-	ret = DP_AUX_CH_CTL_SEND_BUSY |
-	      DP_AUX_CH_CTL_DONE |
-	      DP_AUX_CH_CTL_INTERRUPT |
-	      DP_AUX_CH_CTL_TIME_OUT_ERROR |
-	      DP_AUX_CH_CTL_TIME_OUT_MAX |
-	      DP_AUX_CH_CTL_RECEIVE_ERROR |
-	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
-	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
-
-	if (intel_phy_is_tc(i915, phy) &&
-	    dig_port->tc_mode == TC_PORT_TBT_ALT)
-		ret |= DP_AUX_CH_CTL_TBT_IO;
-
-	return ret;
-}
-
-static int
-intel_dp_aux_xfer(struct intel_dp *intel_dp,
-		  const u8 *send, int send_bytes,
-		  u8 *recv, int recv_size,
-		  u32 aux_send_ctl_flags)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *i915 =
-			to_i915(dig_port->base.base.dev);
-	struct intel_uncore *uncore = &i915->uncore;
-	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-	bool is_tc_port = intel_phy_is_tc(i915, phy);
-	i915_reg_t ch_ctl, ch_data[5];
-	u32 aux_clock_divider;
-	enum intel_display_power_domain aux_domain;
-	intel_wakeref_t aux_wakeref;
-	intel_wakeref_t pps_wakeref;
-	int i, ret, recv_bytes;
-	int try, clock = 0;
-	u32 status;
-	bool vdd;
-
-	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
-	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
-		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
-
-	if (is_tc_port)
-		intel_tc_port_lock(dig_port);
-
-	aux_domain = intel_aux_power_domain(dig_port);
-
-	aux_wakeref = intel_display_power_get(i915, aux_domain);
-	pps_wakeref = pps_lock(intel_dp);
-
-	/*
-	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
-	 * In such cases we want to leave VDD enabled and it's up to upper layers
-	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
-	 * ourselves.
-	 */
-	vdd = edp_panel_vdd_on(intel_dp);
-
-	/* dp aux is extremely sensitive to irq latency, hence request the
-	 * lowest possible wakeup latency and so prevent the cpu from going into
-	 * deep sleep states.
-	 */
-	cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
-
-	intel_dp_check_edp(intel_dp);
-
-	/* Try to wait for any previous AUX channel activity */
-	for (try = 0; try < 3; try++) {
-		status = intel_uncore_read_notrace(uncore, ch_ctl);
-		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
-			break;
-		msleep(1);
-	}
-	/* just trace the final value */
-	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
-	if (try == 3) {
-		const u32 status = intel_uncore_read(uncore, ch_ctl);
-
-		if (status != intel_dp->aux_busy_last_status) {
-			drm_WARN(&i915->drm, 1,
-				 "%s: not started (status 0x%08x)\n",
-				 intel_dp->aux.name, status);
-			intel_dp->aux_busy_last_status = status;
-		}
-
-		ret = -EBUSY;
-		goto out;
-	}
-
-	/* Only 5 data registers! */
-	if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
-		ret = -E2BIG;
-		goto out;
-	}
-
-	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
-		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
-							  send_bytes,
-							  aux_clock_divider);
-
-		send_ctl |= aux_send_ctl_flags;
-
-		/* Must try at least 3 times according to DP spec */
-		for (try = 0; try < 5; try++) {
-			/* Load the send data into the aux channel data registers */
-			for (i = 0; i < send_bytes; i += 4)
-				intel_uncore_write(uncore,
-						   ch_data[i >> 2],
-						   intel_dp_pack_aux(send + i,
-								     send_bytes - i));
-
-			/* Send the command and wait for it to complete */
-			intel_uncore_write(uncore, ch_ctl, send_ctl);
-
-			status = intel_dp_aux_wait_done(intel_dp);
-
-			/* Clear done status and any errors */
-			intel_uncore_write(uncore,
-					   ch_ctl,
-					   status |
-					   DP_AUX_CH_CTL_DONE |
-					   DP_AUX_CH_CTL_TIME_OUT_ERROR |
-					   DP_AUX_CH_CTL_RECEIVE_ERROR);
-
-			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
-			 *   400us delay required for errors and timeouts
-			 *   Timeout errors from the HW already meet this
-			 *   requirement so skip to next iteration
-			 */
-			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
-				continue;
-
-			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
-				usleep_range(400, 500);
-				continue;
-			}
-			if (status & DP_AUX_CH_CTL_DONE)
-				goto done;
-		}
-	}
-
-	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
-		drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
-			intel_dp->aux.name, status);
-		ret = -EBUSY;
-		goto out;
-	}
-
-done:
-	/* Check for timeout or receive error.
-	 * Timeouts occur when the sink is not connected
-	 */
-	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
-		drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
-			intel_dp->aux.name, status);
-		ret = -EIO;
-		goto out;
-	}
-
-	/* Timeouts occur when the device isn't connected, so they're
-	 * "normal" -- don't fill the kernel log with these */
-	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
-		drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
-			    intel_dp->aux.name, status);
-		ret = -ETIMEDOUT;
-		goto out;
-	}
-
-	/* Unload any bytes sent back from the other side */
-	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
-		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
-	/*
-	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
-	 * We have no idea of what happened so we return -EBUSY so
-	 * drm layer takes care for the necessary retries.
-	 */
-	if (recv_bytes == 0 || recv_bytes > 20) {
-		drm_dbg_kms(&i915->drm,
-			    "%s: Forbidden recv_bytes = %d on aux transaction\n",
-			    intel_dp->aux.name, recv_bytes);
-		ret = -EBUSY;
-		goto out;
-	}
-
-	if (recv_bytes > recv_size)
-		recv_bytes = recv_size;
-
-	for (i = 0; i < recv_bytes; i += 4)
-		intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
-				    recv + i, recv_bytes - i);
-
-	ret = recv_bytes;
-out:
-	cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
-
-	if (vdd)
-		edp_panel_vdd_off(intel_dp, false);
-
-	pps_unlock(intel_dp, pps_wakeref);
-	intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
-	if (is_tc_port)
-		intel_tc_port_unlock(dig_port);
-
-	return ret;
-}
-
-#define BARE_ADDRESS_SIZE	3
-#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
-
-static void
-intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
-		    const struct drm_dp_aux_msg *msg)
-{
-	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
-	txbuf[1] = (msg->address >> 8) & 0xff;
-	txbuf[2] = msg->address & 0xff;
-	txbuf[3] = msg->size - 1;
-}
-
-static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
-{
-	/*
-	 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
-	 * select bit to inform the hardware to send the Aksv after our header
-	 * since we can't access that data from software.
-	 */
-	if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
-	    msg->address == DP_AUX_HDCP_AKSV)
-		return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
-
-	return 0;
-}
-
-static ssize_t
-intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
-{
-	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	u8 txbuf[20], rxbuf[20];
-	size_t txsize, rxsize;
-	u32 flags = intel_dp_aux_xfer_flags(msg);
-	int ret;
-
-	intel_dp_aux_header(txbuf, msg);
-
-	switch (msg->request & ~DP_AUX_I2C_MOT) {
-	case DP_AUX_NATIVE_WRITE:
-	case DP_AUX_I2C_WRITE:
-	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
-		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
-		rxsize = 2; /* 0 or 1 data bytes */
-
-		if (drm_WARN_ON(&i915->drm, txsize > 20))
-			return -E2BIG;
-
-		drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
-
-		if (msg->buffer)
-			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-
-		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
-					rxbuf, rxsize, flags);
-		if (ret > 0) {
-			msg->reply = rxbuf[0] >> 4;
-
-			if (ret > 1) {
-				/* Number of bytes written in a short write. */
-				ret = clamp_t(int, rxbuf[1], 0, msg->size);
-			} else {
-				/* Return payload size. */
-				ret = msg->size;
-			}
-		}
-		break;
-
-	case DP_AUX_NATIVE_READ:
-	case DP_AUX_I2C_READ:
-		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
-		rxsize = msg->size + 1;
-
-		if (drm_WARN_ON(&i915->drm, rxsize > 20))
-			return -E2BIG;
-
-		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
-					rxbuf, rxsize, flags);
-		if (ret > 0) {
-			msg->reply = rxbuf[0] >> 4;
-			/*
-			 * Assume happy day, and copy the data. The caller is
-			 * expected to check msg->reply before touching it.
-			 *
-			 * Return payload size.
-			 */
-			ret--;
-			memcpy(msg->buffer, rxbuf + 1, ret);
-		}
-		break;
-
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-
-static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_D:
-		return DP_AUX_CH_CTL(aux_ch);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_CTL(AUX_CH_B);
-	}
-}
-
-static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_D:
-		return DP_AUX_CH_DATA(aux_ch, index);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_DATA(AUX_CH_B, index);
-	}
-}
-
-static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_A:
-		return DP_AUX_CH_CTL(aux_ch);
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_D:
-		return PCH_DP_AUX_CH_CTL(aux_ch);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_CTL(AUX_CH_A);
-	}
-}
-
-static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_A:
-		return DP_AUX_CH_DATA(aux_ch, index);
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_D:
-		return PCH_DP_AUX_CH_DATA(aux_ch, index);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_DATA(AUX_CH_A, index);
-	}
-}
-
-static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_A:
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_D:
-	case AUX_CH_E:
-	case AUX_CH_F:
-		return DP_AUX_CH_CTL(aux_ch);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_CTL(AUX_CH_A);
-	}
-}
-
-static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_A:
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_D:
-	case AUX_CH_E:
-	case AUX_CH_F:
-		return DP_AUX_CH_DATA(aux_ch, index);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_DATA(AUX_CH_A, index);
-	}
-}
-
-static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_A:
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_USBC1:
-	case AUX_CH_USBC2:
-	case AUX_CH_USBC3:
-	case AUX_CH_USBC4:
-	case AUX_CH_USBC5:
-	case AUX_CH_USBC6:
-		return DP_AUX_CH_CTL(aux_ch);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_CTL(AUX_CH_A);
-	}
-}
-
-static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	switch (aux_ch) {
-	case AUX_CH_A:
-	case AUX_CH_B:
-	case AUX_CH_C:
-	case AUX_CH_USBC1:
-	case AUX_CH_USBC2:
-	case AUX_CH_USBC3:
-	case AUX_CH_USBC4:
-	case AUX_CH_USBC5:
-	case AUX_CH_USBC6:
-		return DP_AUX_CH_DATA(aux_ch, index);
-	default:
-		MISSING_CASE(aux_ch);
-		return DP_AUX_CH_DATA(AUX_CH_A, index);
-	}
-}
-
-static void
-intel_dp_aux_fini(struct intel_dp *intel_dp)
-{
-	if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
-		cpu_latency_qos_remove_request(&intel_dp->pm_qos);
-
-	kfree(intel_dp->aux.name);
-}
-
-static void
-intel_dp_aux_init(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct intel_encoder *encoder = &dig_port->base;
-	enum aux_ch aux_ch = dig_port->aux_ch;
-
-	if (INTEL_GEN(dev_priv) >= 12) {
-		intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
-		intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
-	} else if (INTEL_GEN(dev_priv) >= 9) {
-		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
-		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
-		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
-	} else {
-		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
-		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
-	}
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
-	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
-	else if (HAS_PCH_SPLIT(dev_priv))
-		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
-	else
-		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
-	else
-		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
-	drm_dp_aux_init(&intel_dp->aux);
-
-	/* Failure to allocate our preferred name is not critical */
-	if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
-		intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
-					       aux_ch - AUX_CH_USBC1 + '1',
-					       encoder->base.name);
-	else
-		intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
-					       aux_ch_name(aux_ch),
-					       encoder->base.name);
-
-	intel_dp->aux.transfer = intel_dp_aux_transfer;
-	cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
-}
-
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
-{
-	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
-	return max_rate >= 540000;
-}
-
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
-{
-	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
-	return max_rate >= 810000;
-}
-
-static void
-intel_dp_set_clock(struct intel_encoder *encoder,
-		   struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	const struct dp_link_dpll *divisor = NULL;
-	int i, count = 0;
-
-	if (IS_G4X(dev_priv)) {
-		divisor = g4x_dpll;
-		count = ARRAY_SIZE(g4x_dpll);
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		divisor = pch_dpll;
-		count = ARRAY_SIZE(pch_dpll);
-	} else if (IS_CHERRYVIEW(dev_priv)) {
-		divisor = chv_dpll;
-		count = ARRAY_SIZE(chv_dpll);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		divisor = vlv_dpll;
-		count = ARRAY_SIZE(vlv_dpll);
-	}
-
-	if (divisor && count) {
-		for (i = 0; i < count; i++) {
-			if (pipe_config->port_clock == divisor[i].clock) {
-				pipe_config->dpll = divisor[i].dpll;
-				pipe_config->clock_set = true;
-				break;
-			}
-		}
-	}
-}
-
-static void snprintf_int_array(char *str, size_t len,
-			       const int *array, int nelem)
-{
-	int i;
-
-	str[0] = '\0';
-
-	for (i = 0; i < nelem; i++) {
-		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
-		if (r >= len)
-			return;
-		str += r;
-		len -= r;
-	}
-}
-
-static void intel_dp_print_rates(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	char str[128]; /* FIXME: too big for stack? */
-
-	if (!drm_debug_enabled(DRM_UT_KMS))
-		return;
-
-	snprintf_int_array(str, sizeof(str),
-			   intel_dp->source_rates, intel_dp->num_source_rates);
-	drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
-
-	snprintf_int_array(str, sizeof(str),
-			   intel_dp->sink_rates, intel_dp->num_sink_rates);
-	drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
-
-	snprintf_int_array(str, sizeof(str),
-			   intel_dp->common_rates, intel_dp->num_common_rates);
-	drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
-}
-
-int
-intel_dp_max_link_rate(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	int len;
-
-	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
-	if (drm_WARN_ON(&i915->drm, len <= 0))
-		return 162000;
-
-	return intel_dp->common_rates[len - 1];
-}
-
-int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	int i = intel_dp_rate_index(intel_dp->sink_rates,
-				    intel_dp->num_sink_rates, rate);
-
-	if (drm_WARN_ON(&i915->drm, i < 0))
-		i = 0;
-
-	return i;
-}
-
-void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
-			   u8 *link_bw, u8 *rate_select)
-{
-	/* eDP 1.4 rate select method. */
-	if (intel_dp->use_rate_select) {
-		*link_bw = 0;
-		*rate_select =
-			intel_dp_rate_select(intel_dp, port_clock);
-	} else {
-		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
-		*rate_select = 0;
-	}
-}
-
-static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
-					 const struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	/* On TGL, FEC is supported on all Pipes */
-	if (INTEL_GEN(dev_priv) >= 12)
-		return true;
-
-	if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
-		return true;
-
-	return false;
-}
-
-static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
-				  const struct intel_crtc_state *pipe_config)
-{
-	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
-		drm_dp_sink_supports_fec(intel_dp->fec_capable);
-}
-
-static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
-				  const struct intel_crtc_state *crtc_state)
-{
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
-		return false;
-
-	return intel_dsc_source_support(crtc_state) &&
-		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
-}
-
-static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
-				   const struct intel_crtc_state *crtc_state)
-{
-	return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-		(crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
-		 intel_dp->dfp.ycbcr_444_to_420);
-}
-
-static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
-				    const struct intel_crtc_state *crtc_state, int bpc)
-{
-	int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
-
-	if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
-		clock /= 2;
-
-	return clock;
-}
-
-static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
-					   const struct intel_crtc_state *crtc_state, int bpc)
-{
-	int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
-
-	if (intel_dp->dfp.min_tmds_clock &&
-	    tmds_clock < intel_dp->dfp.min_tmds_clock)
-		return false;
-
-	if (intel_dp->dfp.max_tmds_clock &&
-	    tmds_clock > intel_dp->dfp.max_tmds_clock)
-		return false;
-
-	return true;
-}
-
-static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
-					      const struct intel_crtc_state *crtc_state,
-					      int bpc)
-{
-
-	return intel_hdmi_deep_color_possible(crtc_state, bpc,
-					      intel_dp->has_hdmi_sink,
-					      intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
-		intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
-}
+	return intel_hdmi_deep_color_possible(crtc_state, bpc,
+					      intel_dp->has_hdmi_sink,
+					      intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
+		intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
+}
 
 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
 			    const struct intel_crtc_state *crtc_state)
@@ -2267,6 +1174,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
 	return -EINVAL;
 }
 
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static int
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+				  struct intel_crtc_state *pipe_config,
+				  const struct link_config_limits *limits)
+{
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+	int bpp, clock, lane_count;
+	int mode_rate, link_clock, link_avail;
+
+	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+		int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+
+		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+						   output_bpp);
+
+		for (lane_count = limits->min_lane_count;
+		     lane_count <= limits->max_lane_count;
+		     lane_count <<= 1) {
+			for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+				link_clock = intel_dp->common_rates[clock];
+				link_avail = intel_dp_max_data_rate(link_clock,
+								    lane_count);
+
+				if (mode_rate <= link_avail) {
+					pipe_config->lane_count = lane_count;
+					pipe_config->pipe_bpp = bpp;
+					pipe_config->port_clock = link_clock;
+
+					return 0;
+				}
+			}
+		}
+	}
+
+	return -EINVAL;
+}
+
 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 {
 	int i, num_bpc;
@@ -2293,6 +1238,14 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
 	u8 line_buf_depth;
 	int ret;
 
+	/*
+	 * RC_MODEL_SIZE is currently a constant across all configurations.
+	 *
+	 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
+	 * DP_DSC_RC_BUF_SIZE for this.
+	 */
+	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
 	ret = intel_dsc_compute_params(encoder, crtc_state);
 	if (ret)
 		return ret;
@@ -2482,13 +1435,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
 	limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
 
-	if (intel_dp_is_edp(intel_dp)) {
+	if (intel_dp->use_max_params) {
 		/*
 		 * Use the maximum clock and number of lanes the eDP panel
-		 * advertizes being capable of. The panels are generally
+		 * advertizes being capable of in case the initial fast
+		 * optimal params failed us. The panels are generally
 		 * designed to support only a single clock and lane
-		 * configuration, and typically these values correspond to the
-		 * native resolution of the panel.
+		 * configuration, and typically on older panels these
+		 * values correspond to the native resolution of the panel.
 		 */
 		limits.min_lane_count = limits.max_lane_count;
 		limits.min_clock = limits.max_clock;
@@ -2507,11 +1461,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	    intel_dp_can_bigjoiner(intel_dp))
 		pipe_config->bigjoiner = true;
 
-	/*
-	 * Optimize for slow and wide. This is the place to add alternative
-	 * optimization policy.
-	 */
-	ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+	if (intel_dp_is_edp(intel_dp))
+		/*
+		 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+		 * section A.1: "It is recommended that the minimum number of
+		 * lanes be used, using the minimum link rate allowed for that
+		 * lane configuration."
+		 *
+		 * Note that we fall back to the max clock and lane count for eDP
+		 * panels that fail with the fast optimal settings (see
+		 * intel_dp->use_max_params), in which case the fast vs. wide
+		 * choice doesn't matter.
+		 */
+		ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
+	else
+		/* Optimize for slow and wide. */
+		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
 
 	/* enable compression if the mode doesn't fit available BW */
 	drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2760,6 +1725,9 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
+	if (pipe_config->vrr.enable)
+		return;
+
 	/*
 	 * DRRS and PSR can't be enable together, so giving preference to PSR
 	 * as it allows more power-savings by complete shutting down display,
@@ -2792,8 +1760,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
-	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
-					   DP_DPCD_QUIRK_CONSTANT_N);
+	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
 	int ret = 0, output_bpp;
 
 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
@@ -2863,6 +1830,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	if (!HAS_DDI(dev_priv))
 		intel_dp_set_clock(encoder, pipe_config);
 
+	intel_vrr_compute_config(pipe_config, conn_state);
 	intel_psr_compute_config(intel_dp, pipe_config);
 	intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
 				     constant_n);
@@ -2956,435 +1924,14 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
-		if (IS_CHERRYVIEW(dev_priv))
-			intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
-		else
-			intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
-	}
-}
-
-#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
-
-#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
-#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
-
-#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
-
-static void intel_pps_verify_state(struct intel_dp *intel_dp);
-
-static void wait_panel_status(struct intel_dp *intel_dp,
-				       u32 mask,
-				       u32 value)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	intel_pps_verify_state(intel_dp);
-
-	pp_stat_reg = _pp_stat_reg(intel_dp);
-	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "mask %08x value %08x status %08x control %08x\n",
-		    mask, value,
-		    intel_de_read(dev_priv, pp_stat_reg),
-		    intel_de_read(dev_priv, pp_ctrl_reg));
-
-	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
-				       mask, value, 5000))
-		drm_err(&dev_priv->drm,
-			"Panel status timeout: status %08x control %08x\n",
-			intel_de_read(dev_priv, pp_stat_reg),
-			intel_de_read(dev_priv, pp_ctrl_reg));
-
-	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
-}
-
-static void wait_panel_on(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
-	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
-	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
-}
-
-static void wait_panel_off(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
-	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
-	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
-}
-
-static void wait_panel_power_cycle(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	ktime_t panel_power_on_time;
-	s64 panel_power_off_duration;
-
-	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
-
-	/* take the difference of currrent time and panel power off time
-	 * and then make panel wait for t11_t12 if needed. */
-	panel_power_on_time = ktime_get_boottime();
-	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
-
-	/* When we disable the VDD override bit last we have to do the manual
-	 * wait. */
-	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
-		wait_remaining_ms_from_jiffies(jiffies,
-				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
-
-	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
-}
-
-static void wait_backlight_on(struct intel_dp *intel_dp)
-{
-	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
-				       intel_dp->backlight_on_delay);
-}
-
-static void edp_wait_backlight_off(struct intel_dp *intel_dp)
-{
-	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
-				       intel_dp->backlight_off_delay);
-}
-
-/* Read the current pp_control value, unlocking the register if it
- * is locked
- */
-
-static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 control;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
-	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
-			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
-		control &= ~PANEL_UNLOCK_MASK;
-		control |= PANEL_UNLOCK_REGS;
-	}
-	return control;
-}
-
-/*
- * Must be paired with edp_panel_vdd_off().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	u32 pp;
-	i915_reg_t pp_stat_reg, pp_ctrl_reg;
-	bool need_to_disable = !intel_dp->want_panel_vdd;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if (!intel_dp_is_edp(intel_dp))
-		return false;
-
-	cancel_delayed_work(&intel_dp->panel_vdd_work);
-	intel_dp->want_panel_vdd = true;
-
-	if (edp_have_panel_vdd(intel_dp))
-		return need_to_disable;
-
-	intel_display_power_get(dev_priv,
-				intel_aux_power_domain(dig_port));
-
-	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
-		    dig_port->base.base.base.id,
-		    dig_port->base.base.name);
-
-	if (!edp_have_panel_power(intel_dp))
-		wait_panel_power_cycle(intel_dp);
-
-	pp = ilk_get_pp_control(intel_dp);
-	pp |= EDP_FORCE_VDD;
-
-	pp_stat_reg = _pp_stat_reg(intel_dp);
-	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
-	intel_de_write(dev_priv, pp_ctrl_reg, pp);
-	intel_de_posting_read(dev_priv, pp_ctrl_reg);
-	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
-		    intel_de_read(dev_priv, pp_stat_reg),
-		    intel_de_read(dev_priv, pp_ctrl_reg));
-	/*
-	 * If the panel wasn't on, delay before accessing aux channel
-	 */
-	if (!edp_have_panel_power(intel_dp)) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
-			    dig_port->base.base.base.id,
-			    dig_port->base.base.name);
-		msleep(intel_dp->panel_power_up_delay);
-	}
-
-	return need_to_disable;
-}
-
-/*
- * Must be paired with intel_edp_panel_vdd_off() or
- * intel_edp_panel_off().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
-	intel_wakeref_t wakeref;
-	bool vdd;
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	vdd = false;
-	with_pps_lock(intel_dp, wakeref)
-		vdd = edp_panel_vdd_on(intel_dp);
-	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
-			dp_to_dig_port(intel_dp)->base.base.base.id,
-			dp_to_dig_port(intel_dp)->base.base.name);
-}
-
-static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port =
-		dp_to_dig_port(intel_dp);
-	u32 pp;
-	i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
-
-	if (!edp_have_panel_vdd(intel_dp))
-		return;
-
-	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
-		    dig_port->base.base.base.id,
-		    dig_port->base.base.name);
-
-	pp = ilk_get_pp_control(intel_dp);
-	pp &= ~EDP_FORCE_VDD;
-
-	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-	pp_stat_reg = _pp_stat_reg(intel_dp);
-
-	intel_de_write(dev_priv, pp_ctrl_reg, pp);
-	intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
-	/* Make sure sequencer is idle before allowing subsequent activity */
-	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
-		    intel_de_read(dev_priv, pp_stat_reg),
-		    intel_de_read(dev_priv, pp_ctrl_reg));
-
-	if ((pp & PANEL_POWER_ON) == 0)
-		intel_dp->panel_power_off_time = ktime_get_boottime();
-
-	intel_display_power_put_unchecked(dev_priv,
-					  intel_aux_power_domain(dig_port));
-}
-
-static void edp_panel_vdd_work(struct work_struct *__work)
-{
-	struct intel_dp *intel_dp =
-		container_of(to_delayed_work(__work),
-			     struct intel_dp, panel_vdd_work);
-	intel_wakeref_t wakeref;
-
-	with_pps_lock(intel_dp, wakeref) {
-		if (!intel_dp->want_panel_vdd)
-			edp_panel_vdd_off_sync(intel_dp);
-	}
-}
-
-static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
-{
-	unsigned long delay;
-
-	/*
-	 * Queue the timer to fire a long time from now (relative to the power
-	 * down delay) to keep the panel power up across a sequence of
-	 * operations.
-	 */
-	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
-	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
-}
-
-/*
- * Must be paired with edp_panel_vdd_on().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
-			dp_to_dig_port(intel_dp)->base.base.base.id,
-			dp_to_dig_port(intel_dp)->base.base.name);
-
-	intel_dp->want_panel_vdd = false;
-
-	if (sync)
-		edp_panel_vdd_off_sync(intel_dp);
-	else
-		edp_panel_vdd_schedule_off(intel_dp);
-}
-
-static void edp_panel_on(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 pp;
-	i915_reg_t pp_ctrl_reg;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
-		    dp_to_dig_port(intel_dp)->base.base.base.id,
-		    dp_to_dig_port(intel_dp)->base.base.name);
-
-	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
-		     "[ENCODER:%d:%s] panel power already on\n",
-		     dp_to_dig_port(intel_dp)->base.base.base.id,
-		     dp_to_dig_port(intel_dp)->base.base.name))
-		return;
-
-	wait_panel_power_cycle(intel_dp);
-
-	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-	pp = ilk_get_pp_control(intel_dp);
-	if (IS_GEN(dev_priv, 5)) {
-		/* ILK workaround: disable reset around power sequence */
-		pp &= ~PANEL_POWER_RESET;
-		intel_de_write(dev_priv, pp_ctrl_reg, pp);
-		intel_de_posting_read(dev_priv, pp_ctrl_reg);
-	}
-
-	pp |= PANEL_POWER_ON;
-	if (!IS_GEN(dev_priv, 5))
-		pp |= PANEL_POWER_RESET;
-
-	intel_de_write(dev_priv, pp_ctrl_reg, pp);
-	intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
-	wait_panel_on(intel_dp);
-	intel_dp->last_power_on = jiffies;
-
-	if (IS_GEN(dev_priv, 5)) {
-		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
-		intel_de_write(dev_priv, pp_ctrl_reg, pp);
-		intel_de_posting_read(dev_priv, pp_ctrl_reg);
-	}
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
-{
-	intel_wakeref_t wakeref;
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	with_pps_lock(intel_dp, wakeref)
-		edp_panel_on(intel_dp);
-}
-
-
-static void edp_panel_off(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	u32 pp;
-	i915_reg_t pp_ctrl_reg;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
-		    dig_port->base.base.base.id, dig_port->base.base.name);
-
-	drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
-		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
-		 dig_port->base.base.base.id, dig_port->base.base.name);
-
-	pp = ilk_get_pp_control(intel_dp);
-	/* We need to switch off panel power _and_ force vdd, for otherwise some
-	 * panels get very unhappy and cease to work. */
-	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
-		EDP_BLC_ENABLE);
-
-	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
-	intel_dp->want_panel_vdd = false;
-
-	intel_de_write(dev_priv, pp_ctrl_reg, pp);
-	intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
-	wait_panel_off(intel_dp);
-	intel_dp->panel_power_off_time = ktime_get_boottime();
-
-	/* We got a reference when we enabled the VDD. */
-	intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
-}
-
-void intel_edp_panel_off(struct intel_dp *intel_dp)
-{
-	intel_wakeref_t wakeref;
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	with_pps_lock(intel_dp, wakeref)
-		edp_panel_off(intel_dp);
-}
-
-/* Enable backlight in the panel power control. */
-static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	intel_wakeref_t wakeref;
-
-	/*
-	 * If we enable the backlight right away following a panel power
-	 * on, we may see slight flicker as the panel syncs with the eDP
-	 * link.  So delay a bit to make sure the image is solid before
-	 * allowing it to appear.
-	 */
-	wait_backlight_on(intel_dp);
-
-	with_pps_lock(intel_dp, wakeref) {
-		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-		u32 pp;
-
-		pp = ilk_get_pp_control(intel_dp);
-		pp |= EDP_BLC_ENABLE;
-
-		intel_de_write(dev_priv, pp_ctrl_reg, pp);
-		intel_de_posting_read(dev_priv, pp_ctrl_reg);
+		if (IS_CHERRYVIEW(dev_priv))
+			intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+		else
+			intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
 	}
 }
 
+
 /* Enable backlight PWM and backlight PP control. */
 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
 			    const struct drm_connector_state *conn_state)
@@ -3398,31 +1945,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
 	drm_dbg_kms(&i915->drm, "\n");
 
 	intel_panel_enable_backlight(crtc_state, conn_state);
-	_intel_edp_backlight_on(intel_dp);
-}
-
-/* Disable backlight in the panel power control. */
-static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	intel_wakeref_t wakeref;
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	with_pps_lock(intel_dp, wakeref) {
-		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-		u32 pp;
-
-		pp = ilk_get_pp_control(intel_dp);
-		pp &= ~EDP_BLC_ENABLE;
-
-		intel_de_write(dev_priv, pp_ctrl_reg, pp);
-		intel_de_posting_read(dev_priv, pp_ctrl_reg);
-	}
-
-	intel_dp->last_backlight_off = jiffies;
-	edp_wait_backlight_off(intel_dp);
+	intel_pps_backlight_on(intel_dp);
 }
 
 /* Disable backlight PP control and backlight PWM. */
@@ -3436,37 +1959,10 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
 
 	drm_dbg_kms(&i915->drm, "\n");
 
-	_intel_edp_backlight_off(intel_dp);
+	intel_pps_backlight_off(intel_dp);
 	intel_panel_disable_backlight(old_conn_state);
 }
 
-/*
- * Hook for controlling the panel power control backlight through the bl_power
- * sysfs attribute. Take care to handle multiple calls.
- */
-static void intel_edp_backlight_power(struct intel_connector *connector,
-				      bool enable)
-{
-	struct drm_i915_private *i915 = to_i915(connector->base.dev);
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	intel_wakeref_t wakeref;
-	bool is_enabled;
-
-	is_enabled = false;
-	with_pps_lock(intel_dp, wakeref)
-		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
-	if (is_enabled == enable)
-		return;
-
-	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
-		    enable ? "enable" : "disable");
-
-	if (enable)
-		_intel_edp_backlight_on(intel_dp);
-	else
-		_intel_edp_backlight_off(intel_dp);
-}
-
 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -3583,6 +2079,29 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
 			    enable ? "enable" : "disable");
 }
 
+static void
+intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	u8 oui[] = { 0x00, 0xaa, 0x01 };
+	u8 buf[3] = { 0 };
+
+	/*
+	 * During driver init, we want to be careful and avoid changing the source OUI if it's
+	 * already set to what we want, so as to avoid clearing any state by accident
+	 */
+	if (careful) {
+		if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+			drm_err(&i915->drm, "Failed to read source OUI\n");
+
+		if (memcmp(oui, buf, sizeof(oui)) == 0)
+			return;
+	}
+
+	if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
+		drm_err(&i915->drm, "Failed to write source OUI\n");
+}
+
 /* If the device supports it, try to set the power state appropriately */
 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
 {
@@ -3604,6 +2123,10 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
 
 		lspcon_resume(dp_to_dig_port(intel_dp));
 
+		/* Write the source OUI as early as possible */
+		if (intel_dp_is_edp(intel_dp))
+			intel_edp_init_source_oui(intel_dp, false);
+
 		/*
 		 * When turning on, we need to retry for 1ms to give the sink
 		 * time to wake up.
@@ -3860,10 +2383,12 @@ static void intel_disable_dp(struct intel_atomic_state *state,
 
 	/* Make sure the panel is off before trying to change the mode. But also
 	 * ensure that we have vdd while we switch off the panel. */
-	intel_edp_panel_vdd_on(intel_dp);
+	intel_pps_vdd_on(intel_dp);
 	intel_edp_backlight_off(old_conn_state);
 	intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
-	intel_edp_panel_off(intel_dp);
+	intel_pps_off(intel_dp);
+	intel_dp->frl.is_trained = false;
+	intel_dp->frl.trained_rate_gbps = 0;
 }
 
 static void g4x_disable_dp(struct intel_atomic_state *state,
@@ -3959,6 +2484,280 @@ cpt_set_link_train(struct intel_dp *intel_dp,
 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
+static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+	/* Clear the cached register set to avoid using stale values */
+
+	memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
+			     intel_dp->pcon_dsc_dpcd,
+			     sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
+		drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+			DP_PCON_DSC_ENCODER);
+
+	drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+		    (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
+}
+
+static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
+{
+	int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+	int i;
+
+	for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
+		if (frl_bw_mask & (1 << i))
+			return bw_gbps[i];
+	}
+	return 0;
+}
+
+static int intel_dp_pcon_set_frl_mask(int max_frl)
+{
+	switch (max_frl) {
+	case 48:
+		return DP_PCON_FRL_BW_MASK_48GBPS;
+	case 40:
+		return DP_PCON_FRL_BW_MASK_40GBPS;
+	case 32:
+		return DP_PCON_FRL_BW_MASK_32GBPS;
+	case 24:
+		return DP_PCON_FRL_BW_MASK_24GBPS;
+	case 18:
+		return DP_PCON_FRL_BW_MASK_18GBPS;
+	case 9:
+		return DP_PCON_FRL_BW_MASK_9GBPS;
+	}
+
+	return 0;
+}
+
+static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
+{
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct drm_connector *connector = &intel_connector->base;
+	int max_frl_rate;
+	int max_lanes, rate_per_lane;
+	int max_dsc_lanes, dsc_rate_per_lane;
+
+	max_lanes = connector->display_info.hdmi.max_lanes;
+	rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+	max_frl_rate = max_lanes * rate_per_lane;
+
+	if (connector->display_info.hdmi.dsc_cap.v_1p2) {
+		max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
+		dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+		if (max_dsc_lanes && dsc_rate_per_lane)
+			max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
+	}
+
+	return max_frl_rate;
+}
+
+static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
+{
+#define PCON_EXTENDED_TRAIN_MODE (1 > 0)
+#define PCON_CONCURRENT_MODE (1 > 0)
+#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
+#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
+#define TIMEOUT_FRL_READY_MS 500
+#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
+
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
+	u8 max_frl_bw_mask = 0, frl_trained_mask;
+	bool is_active;
+
+	ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+	if (ret < 0)
+		return ret;
+
+	max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+	drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+
+	max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
+	drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+
+	max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
+
+	if (max_frl_bw <= 0)
+		return -EINVAL;
+
+	ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
+	if (ret < 0)
+		return ret;
+	/* Wait for PCON to be FRL Ready */
+	wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
+
+	if (!is_active)
+		return -ETIMEDOUT;
+
+	max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+	ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
+	if (ret < 0)
+		return ret;
+	ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
+	if (ret < 0)
+		return ret;
+	ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
+	if (ret < 0)
+		return ret;
+	/*
+	 * Wait for FRL to be completed
+	 * Check if the HDMI Link is up and active.
+	 */
+	wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
+
+	if (!is_active)
+		return -ETIMEDOUT;
+
+	/* Verify HDMI Link configuration shows FRL Mode */
+	if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
+	    DP_PCON_HDMI_MODE_FRL) {
+		drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
+		return -EINVAL;
+	}
+	drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
+
+	intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
+	intel_dp->frl.is_trained = true;
+	drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+
+	return 0;
+}
+
+static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
+{
+	if (drm_dp_is_branch(intel_dp->dpcd) &&
+	    intel_dp->has_hdmi_sink &&
+	    intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
+		return true;
+
+	return false;
+}
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	/* Always go for FRL training if supported */
+	if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+	    intel_dp->frl.is_trained)
+		return;
+
+	if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
+		int ret, mode;
+
+		drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+		ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+		mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
+
+		if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
+			drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+	} else {
+		drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+	}
+}
+
+static int
+intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
+{
+	int vactive = crtc_state->hw.adjusted_mode.vdisplay;
+
+	return intel_hdmi_dsc_get_slice_height(vactive);
+}
+
+static int
+intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
+			     const struct intel_crtc_state *crtc_state)
+{
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct drm_connector *connector = &intel_connector->base;
+	int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
+	int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+	int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
+	int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
+
+	return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
+					     pcon_max_slice_width,
+					     hdmi_max_slices, hdmi_throughput);
+}
+
+static int
+intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
+			  const struct intel_crtc_state *crtc_state,
+			  int num_slices, int slice_width)
+{
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct drm_connector *connector = &intel_connector->base;
+	int output_format = crtc_state->output_format;
+	bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+	int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
+	int hdmi_max_chunk_bytes =
+		connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+
+	return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
+				      num_slices, output_format, hdmi_all_bpp,
+				      hdmi_max_chunk_bytes);
+}
+
+void
+intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+			    const struct intel_crtc_state *crtc_state)
+{
+	u8 pps_param[6];
+	int slice_height;
+	int slice_width;
+	int num_slices;
+	int bits_per_pixel;
+	int ret;
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct drm_connector *connector;
+	bool hdmi_is_dsc_1_2;
+
+	if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
+		return;
+
+	if (!intel_connector)
+		return;
+	connector = &intel_connector->base;
+	hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+	if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
+	    !hdmi_is_dsc_1_2)
+		return;
+
+	slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
+	if (!slice_height)
+		return;
+
+	num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
+	if (!num_slices)
+		return;
+
+	slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
+				   num_slices);
+
+	bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
+						   num_slices, slice_width);
+	if (!bits_per_pixel)
+		return;
+
+	pps_param[0] = slice_height & 0xFF;
+	pps_param[1] = slice_height >> 8;
+	pps_param[2] = slice_width & 0xFF;
+	pps_param[3] = slice_width >> 8;
+	pps_param[4] = bits_per_pixel & 0xFF;
+	pps_param[5] = (bits_per_pixel >> 8) & 0x3;
+
+	ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
+	if (ret < 0)
+		drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+}
+
 static void
 g4x_set_link_train(struct intel_dp *intel_dp,
 		   const struct intel_crtc_state *crtc_state,
@@ -4044,12 +2843,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
 			    enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
 
 	tmp = 0;
+	if (intel_dp->dfp.rgb_to_ycbcr) {
+		bool bt2020, bt709;
 
-	if (drm_dp_dpcd_writeb(&intel_dp->aux,
-			       DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+		/*
+		 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
+		 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
+		 *
+		 */
+		tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
+
+		bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+								   intel_dp->downstream_ports,
+								   DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
+		bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+								  intel_dp->downstream_ports,
+								  DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
+		switch (crtc_state->infoframes.vsc.colorimetry) {
+		case DP_COLORIMETRY_BT2020_RGB:
+		case DP_COLORIMETRY_BT2020_YCC:
+			if (bt2020)
+				tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
+			break;
+		case DP_COLORIMETRY_BT709_YCC:
+		case DP_COLORIMETRY_XVYCC_709:
+			if (bt709)
+				tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
 		drm_dbg_kms(&i915->drm,
-			    "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
-			    enableddisabled(false));
+			   "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
+			   enableddisabled(tmp ? true : false));
 }
 
 static void intel_enable_dp(struct intel_atomic_state *state,
@@ -4067,15 +2896,15 @@ static void intel_enable_dp(struct intel_atomic_state *state,
 	if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
 		return;
 
-	with_pps_lock(intel_dp, wakeref) {
+	with_intel_pps_lock(intel_dp, wakeref) {
 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			vlv_init_panel_power_sequencer(encoder, pipe_config);
+			vlv_pps_init(encoder, pipe_config);
 
 		intel_dp_enable_port(intel_dp, pipe_config);
 
-		edp_panel_vdd_on(intel_dp);
-		edp_panel_on(intel_dp);
-		edp_panel_vdd_off(intel_dp, true);
+		intel_pps_vdd_on_unlocked(intel_dp);
+		intel_pps_on_unlocked(intel_dp);
+		intel_pps_vdd_off_unlocked(intel_dp, true);
 	}
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -4090,152 +2919,48 @@ static void intel_enable_dp(struct intel_atomic_state *state,
 
 	intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
 	intel_dp_configure_protocol_converter(intel_dp, pipe_config);
-	intel_dp_start_link_train(intel_dp, pipe_config);
-	intel_dp_stop_link_train(intel_dp, pipe_config);
-
-	if (pipe_config->has_audio) {
-		drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
-			pipe_name(pipe));
-		intel_audio_codec_enable(encoder, pipe_config, conn_state);
-	}
-}
-
-static void g4x_enable_dp(struct intel_atomic_state *state,
-			  struct intel_encoder *encoder,
-			  const struct intel_crtc_state *pipe_config,
-			  const struct drm_connector_state *conn_state)
-{
-	intel_enable_dp(state, encoder, pipe_config, conn_state);
-	intel_edp_backlight_on(pipe_config, conn_state);
-}
-
-static void vlv_enable_dp(struct intel_atomic_state *state,
-			  struct intel_encoder *encoder,
-			  const struct intel_crtc_state *pipe_config,
-			  const struct drm_connector_state *conn_state)
-{
-	intel_edp_backlight_on(pipe_config, conn_state);
-}
-
-static void g4x_pre_enable_dp(struct intel_atomic_state *state,
-			      struct intel_encoder *encoder,
-			      const struct intel_crtc_state *pipe_config,
-			      const struct drm_connector_state *conn_state)
-{
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	enum port port = encoder->port;
-
-	intel_dp_prepare(encoder, pipe_config);
-
-	/* Only ilk+ has port A */
-	if (port == PORT_A)
-		ilk_edp_pll_on(intel_dp, pipe_config);
-}
-
-static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	enum pipe pipe = intel_dp->pps_pipe;
-	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
-
-	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
-	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
-		return;
-
-	edp_panel_vdd_off_sync(intel_dp);
-
-	/*
-	 * VLV seems to get confused when multiple power sequencers
-	 * have the same port selected (even if only one has power/vdd
-	 * enabled). The failure manifests as vlv_wait_port_ready() failing
-	 * CHV on the other hand doesn't seem to mind having the same port
-	 * selected in multiple power sequencers, but let's clear the
-	 * port select always when logically disconnecting a power sequencer
-	 * from a port.
-	 */
-	drm_dbg_kms(&dev_priv->drm,
-		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
-		    pipe_name(pipe), dig_port->base.base.base.id,
-		    dig_port->base.base.name);
-	intel_de_write(dev_priv, pp_on_reg, 0);
-	intel_de_posting_read(dev_priv, pp_on_reg);
-
-	intel_dp->pps_pipe = INVALID_PIPE;
-}
-
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
-				      enum pipe pipe)
-{
-	struct intel_encoder *encoder;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	for_each_intel_dp(&dev_priv->drm, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
-			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
-			 pipe_name(pipe), encoder->base.base.id,
-			 encoder->base.name);
-
-		if (intel_dp->pps_pipe != pipe)
-			continue;
-
-		drm_dbg_kms(&dev_priv->drm,
-			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
-			    pipe_name(pipe), encoder->base.base.id,
-			    encoder->base.name);
-
-		/* make sure vdd is off before we steal it */
-		vlv_detach_power_sequencer(intel_dp);
-	}
-}
-
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
-					   const struct intel_crtc_state *crtc_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
+	intel_dp_check_frl_training(intel_dp);
+	intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
+	intel_dp_start_link_train(intel_dp, pipe_config);
+	intel_dp_stop_link_train(intel_dp, pipe_config);
 
-	if (intel_dp->pps_pipe != INVALID_PIPE &&
-	    intel_dp->pps_pipe != crtc->pipe) {
-		/*
-		 * If another power sequencer was being used on this
-		 * port previously make sure to turn off vdd there while
-		 * we still have control of it.
-		 */
-		vlv_detach_power_sequencer(intel_dp);
+	if (pipe_config->has_audio) {
+		drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
+			pipe_name(pipe));
+		intel_audio_codec_enable(encoder, pipe_config, conn_state);
 	}
+}
 
-	/*
-	 * We may be stealing the power
-	 * sequencer from another port.
-	 */
-	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
-
-	intel_dp->active_pipe = crtc->pipe;
+static void g4x_enable_dp(struct intel_atomic_state *state,
+			  struct intel_encoder *encoder,
+			  const struct intel_crtc_state *pipe_config,
+			  const struct drm_connector_state *conn_state)
+{
+	intel_enable_dp(state, encoder, pipe_config, conn_state);
+	intel_edp_backlight_on(pipe_config, conn_state);
+}
 
-	if (!intel_dp_is_edp(intel_dp))
-		return;
+static void vlv_enable_dp(struct intel_atomic_state *state,
+			  struct intel_encoder *encoder,
+			  const struct intel_crtc_state *pipe_config,
+			  const struct drm_connector_state *conn_state)
+{
+	intel_edp_backlight_on(pipe_config, conn_state);
+}
 
-	/* now it's all ours */
-	intel_dp->pps_pipe = crtc->pipe;
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+			      struct intel_encoder *encoder,
+			      const struct intel_crtc_state *pipe_config,
+			      const struct drm_connector_state *conn_state)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	enum port port = encoder->port;
 
-	drm_dbg_kms(&dev_priv->drm,
-		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
-		    pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
-		    encoder->base.name);
+	intel_dp_prepare(encoder, pipe_config);
 
-	/* init power sequencer on this pipe and port */
-	intel_dp_init_panel_power_sequencer(intel_dp);
-	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
+	/* Only ilk+ has port A */
+	if (port == PORT_A)
+		ilk_edp_pll_on(intel_dp, pipe_config);
 }
 
 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
@@ -4637,18 +3362,35 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
+static char dp_training_pattern_name(u8 train_pat)
+{
+	switch (train_pat) {
+	case DP_TRAINING_PATTERN_1:
+	case DP_TRAINING_PATTERN_2:
+	case DP_TRAINING_PATTERN_3:
+		return '0' + train_pat;
+	case DP_TRAINING_PATTERN_4:
+		return '4';
+	default:
+		MISSING_CASE(train_pat);
+		return '?';
+	}
+}
+
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
 				       const struct intel_crtc_state *crtc_state,
 				       u8 dp_train_pat)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
 
-	if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
-	    DP_TRAINING_PATTERN_DISABLE)
+	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
 		drm_dbg_kms(&dev_priv->drm,
-			    "Using DP training pattern TPS%d\n",
-			    intel_dp_training_pattern_symbol(dp_train_pat));
+			    "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+			    encoder->base.base.id, encoder->base.name,
+			    dp_training_pattern_name(train_pat));
 
 	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
 }
@@ -4714,15 +3456,15 @@ intel_dp_link_down(struct intel_encoder *encoder,
 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 	}
 
-	msleep(intel_dp->panel_power_down_delay);
+	msleep(intel_dp->pps.panel_power_down_delay);
 
 	intel_dp->DP = DP;
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		intel_wakeref_t wakeref;
 
-		with_pps_lock(intel_dp, wakeref)
-			intel_dp->active_pipe = INVALID_PIPE;
+		with_intel_pps_lock(intel_dp, wakeref)
+			intel_dp->pps.active_pipe = INVALID_PIPE;
 	}
 }
 
@@ -4852,6 +3594,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		intel_dp_get_dsc_sink_cap(intel_dp);
 
+	/*
+	 * If needed, program our source OUI so we can make various Intel-specific AUX services
+	 * available (such as HDR backlight controls)
+	 */
+	intel_edp_init_source_oui(intel_dp, true);
+
 	return true;
 }
 
@@ -5758,6 +4506,17 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
 			    "Could not write test response to sink\n");
 }
 
+static void
+intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
+{
+		drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
+
+		if (esi[1] & DP_CP_IRQ) {
+			intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+			*handled = true;
+		}
+}
+
 /**
  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
  * @intel_dp: Intel DP struct
@@ -5802,7 +4561,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
 
 		drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
 
-		drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+		intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
+
 		if (!handled)
 			break;
 
@@ -5820,6 +4580,28 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
 	return link_ok;
 }
 
+static void
+intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
+{
+	bool is_active;
+	u8 buf = 0;
+
+	is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
+	if (intel_dp->frl.is_trained && !is_active) {
+		if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
+			return;
+
+		buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
+		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
+			return;
+
+		drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+
+		/* Restart FRL training or fall back to TMDS mode */
+		intel_dp_check_frl_training(intel_dp);
+	}
+}
+
 static bool
 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
 {
@@ -5993,6 +4775,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 		    !intel_dp_mst_is_master_trans(crtc_state))
 			continue;
 
+		intel_dp_check_frl_training(intel_dp);
+		intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
 		intel_dp_start_link_train(intel_dp, crtc_state);
 		intel_dp_stop_link_train(intel_dp, crtc_state);
 		break;
@@ -6102,7 +4886,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
 	return 0;
 }
 
-static void intel_dp_phy_test(struct intel_encoder *encoder)
+void intel_dp_phy_test(struct intel_encoder *encoder)
 {
 	struct drm_modeset_acquire_ctx ctx;
 	int ret;
@@ -6184,7 +4968,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
 	return state;
 }
 
-static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 	u8 val;
@@ -6208,6 +4992,30 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
 		drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
 }
 
+static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	u8 val;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux,
+			      DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
+		drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+		return;
+	}
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux,
+			       DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
+		drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+		return;
+	}
+
+	if (val & HDMI_LINK_STATUS_CHANGED)
+		intel_dp_handle_hdmi_link_status_change(intel_dp);
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -6247,7 +5055,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
 		return false;
 	}
 
-	intel_dp_check_service_irq(intel_dp);
+	intel_dp_check_device_service_irq(intel_dp);
+	intel_dp_check_link_service_irq(intel_dp);
 
 	/* Handle CEC interrupts, if any */
 	drm_dp_cec_irq(&intel_dp->aux);
@@ -6467,13 +5276,20 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
 						 intel_dp->downstream_ports,
 						 edid);
 
+	intel_dp->dfp.pcon_max_frl_bw =
+		drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
+					   intel_dp->downstream_ports);
+
 	drm_dbg_kms(&i915->drm,
-		    "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+		    "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
 		    connector->base.base.id, connector->base.name,
 		    intel_dp->dfp.max_bpc,
 		    intel_dp->dfp.max_dotclock,
 		    intel_dp->dfp.min_tmds_clock,
-		    intel_dp->dfp.max_tmds_clock);
+		    intel_dp->dfp.max_tmds_clock,
+		    intel_dp->dfp.pcon_max_frl_bw);
+
+	intel_dp_get_pcon_dsc_cap(intel_dp);
 }
 
 static void
@@ -6481,7 +5297,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 	struct intel_connector *connector = intel_dp->attached_connector;
-	bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+	bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
 
 	/* No YCbCr output support on gmch platforms */
 	if (HAS_GMCH(i915))
@@ -6503,14 +5319,26 @@ intel_dp_update_420(struct intel_dp *intel_dp)
 		dp_to_dig_port(intel_dp)->lspcon.active ||
 		drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
 							intel_dp->downstream_ports);
+	rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+								 intel_dp->downstream_ports,
+								 DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
+								 DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
+								 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
 
 	if (INTEL_GEN(i915) >= 11) {
+		/* Let PCON convert from RGB->YCbCr if possible */
+		if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
+			intel_dp->dfp.rgb_to_ycbcr = true;
+			intel_dp->dfp.ycbcr_444_to_420 = true;
+			connector->base.ycbcr_420_allowed = true;
+		} else {
 		/* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
-		intel_dp->dfp.ycbcr_444_to_420 =
-			ycbcr_444_to_420 && !ycbcr_420_passthrough;
+			intel_dp->dfp.ycbcr_444_to_420 =
+				ycbcr_444_to_420 && !ycbcr_420_passthrough;
 
-		connector->base.ycbcr_420_allowed =
-			!is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+			connector->base.ycbcr_420_allowed =
+				!is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+		}
 	} else {
 		/* 4:4:4->4:2:0 conversion is the only way */
 		intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
@@ -6519,8 +5347,9 @@ intel_dp_update_420(struct intel_dp *intel_dp)
 	}
 
 	drm_dbg_kms(&i915->drm,
-		    "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+		    "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
 		    connector->base.base.id, connector->base.name,
+		    yesno(intel_dp->dfp.rgb_to_ycbcr),
 		    yesno(connector->base.ycbcr_420_allowed),
 		    yesno(intel_dp->dfp.ycbcr_444_to_420));
 }
@@ -6544,7 +5373,6 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
 	}
 
 	drm_dp_cec_set_edid(&intel_dp->aux, edid);
-	intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
 }
 
 static void
@@ -6558,13 +5386,14 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 
 	intel_dp->has_hdmi_sink = false;
 	intel_dp->has_audio = false;
-	intel_dp->edid_quirks = 0;
 
 	intel_dp->dfp.max_bpc = 0;
 	intel_dp->dfp.max_dotclock = 0;
 	intel_dp->dfp.min_tmds_clock = 0;
 	intel_dp->dfp.max_tmds_clock = 0;
 
+	intel_dp->dfp.pcon_max_frl_bw = 0;
+
 	intel_dp->dfp.ycbcr_444_to_420 = false;
 	connector->base.ycbcr_420_allowed = false;
 }
@@ -6670,7 +5499,7 @@ intel_dp_detect(struct drm_connector *connector,
 	    to_intel_connector(connector)->detect_edid)
 		status = connector_status_connected;
 
-	intel_dp_check_service_irq(intel_dp);
+	intel_dp_check_device_service_irq(intel_dp);
 
 out:
 	if (status != connector_status_connected && !intel_dp->is_mst)
@@ -6723,6 +5552,10 @@ static int intel_dp_get_modes(struct drm_connector *connector)
 	edid = intel_connector->detect_edid;
 	if (edid) {
 		int ret = intel_connector_update_modes(connector, edid);
+
+		if (intel_vrr_is_capable(connector))
+			drm_connector_set_vrr_capable_property(connector,
+							       true);
 		if (ret)
 			return ret;
 	}
@@ -6761,6 +5594,8 @@ intel_dp_connector_register(struct drm_connector *connector)
 {
 	struct drm_i915_private *i915 = to_i915(connector->dev);
 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_lspcon *lspcon = &dig_port->lspcon;
 	int ret;
 
 	ret = intel_connector_register(connector);
@@ -6774,6 +5609,22 @@ intel_dp_connector_register(struct drm_connector *connector)
 	ret = drm_dp_aux_register(&intel_dp->aux);
 	if (!ret)
 		drm_dp_cec_register_connector(&intel_dp->aux, connector);
+
+	if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+		return ret;
+
+	/*
+	 * ToDo: Clean this up to handle lspcon init and resume more
+	 * efficiently and streamlined.
+	 */
+	if (lspcon_init(dig_port)) {
+		lspcon_detect_hdr_capability(lspcon);
+		if (lspcon->hdr_supported)
+			drm_object_attach_property(&connector->base,
+						   connector->dev->mode_config.hdr_output_metadata_property,
+						   0);
+	}
+
 	return ret;
 }
 
@@ -6793,17 +5644,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
 	struct intel_dp *intel_dp = &dig_port->dp;
 
 	intel_dp_mst_encoder_cleanup(dig_port);
-	if (intel_dp_is_edp(intel_dp)) {
-		intel_wakeref_t wakeref;
 
-		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-		/*
-		 * vdd might still be enabled do to the delayed vdd off.
-		 * Make sure vdd is actually turned off here.
-		 */
-		with_pps_lock(intel_dp, wakeref)
-			edp_panel_vdd_off_sync(intel_dp);
-	}
+	intel_pps_vdd_off_sync(intel_dp);
 
 	intel_dp_aux_fini(intel_dp);
 }
@@ -6819,53 +5661,15 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
-	intel_wakeref_t wakeref;
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
 
-	/*
-	 * vdd might still be enabled do to the delayed vdd off.
-	 * Make sure vdd is actually turned off here.
-	 */
-	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-	with_pps_lock(intel_dp, wakeref)
-		edp_panel_vdd_off_sync(intel_dp);
+	intel_pps_vdd_off_sync(intel_dp);
 }
 
 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
-	intel_wakeref_t wakeref;
-
-	if (!intel_dp_is_edp(intel_dp))
-		return;
-
-	with_pps_lock(intel_dp, wakeref)
-		wait_panel_power_cycle(intel_dp);
-}
-
-static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	if (!edp_have_panel_vdd(intel_dp))
-		return;
-
-	/*
-	 * The VDD bit needs a power domain reference, so if the bit is
-	 * already enabled when we boot or resume, grab this reference and
-	 * schedule a vdd off, so we don't hold on to the reference
-	 * indefinitely.
-	 */
-	drm_dbg_kms(&dev_priv->drm,
-		    "VDD left on by BIOS, adjusting state tracking\n");
-	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
 
-	edp_panel_vdd_schedule_off(intel_dp);
+	intel_pps_wait_power_cycle(intel_dp);
 }
 
 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
@@ -6885,30 +5689,20 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
-	intel_wakeref_t wakeref;
 
 	if (!HAS_DDI(dev_priv))
 		intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
 
 	intel_dp->reset_link_params = true;
 
-	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-	    !intel_dp_is_edp(intel_dp))
-		return;
-
-	with_pps_lock(intel_dp, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		intel_wakeref_t wakeref;
 
-		if (intel_dp_is_edp(intel_dp)) {
-			/*
-			 * Reinit the power sequencer, in case BIOS did
-			 * something nasty with it.
-			 */
-			intel_dp_pps_init(intel_dp);
-			intel_edp_panel_vdd_sanitize(intel_dp);
-		}
+		with_intel_pps_lock(intel_dp, wakeref)
+			intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
 	}
+
+	intel_pps_encoder_reset(intel_dp);
 }
 
 static int intel_modeset_tile_group(struct intel_atomic_state *state,
@@ -7073,19 +5867,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
 	.destroy = intel_dp_encoder_destroy,
 };
 
-static bool intel_edp_have_power(struct intel_dp *intel_dp)
-{
-	intel_wakeref_t wakeref;
-	bool have_power = false;
-
-	with_pps_lock(intel_dp, wakeref) {
-		have_power = edp_have_panel_power(intel_dp) &&
-						  edp_have_panel_vdd(intel_dp);
-	}
-
-	return have_power;
-}
-
 enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
 {
@@ -7093,7 +5874,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
 	struct intel_dp *intel_dp = &dig_port->dp;
 
 	if (dig_port->base.type == INTEL_OUTPUT_EDP &&
-	    (long_hpd || !intel_edp_have_power(intel_dp))) {
+	    (long_hpd || !intel_pps_have_power(intel_dp))) {
 		/*
 		 * vdd off can generate a long/short pulse on eDP which
 		 * would require vdd on to handle it, and thus we
@@ -7162,7 +5943,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 	else if (INTEL_GEN(dev_priv) >= 5)
 		drm_connector_attach_max_bpc_property(connector, 6, 12);
 
-	intel_attach_colorspace_property(connector);
+	/* Register HDMI colorspace for case of lspcon */
+	if (intel_bios_is_lspcon_present(dev_priv, port)) {
+		drm_connector_attach_content_type_property(connector);
+		intel_attach_hdmi_colorspace_property(connector);
+	} else {
+		intel_attach_dp_colorspace_property(connector);
+	}
 
 	if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
 		drm_object_attach_property(&connector->base,
@@ -7181,277 +5968,9 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 		connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
 
 	}
-}
-
-static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
-{
-	intel_dp->panel_power_off_time = ktime_get_boottime();
-	intel_dp->last_power_on = jiffies;
-	intel_dp->last_backlight_off = jiffies;
-}
-
-static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 pp_on, pp_off, pp_ctl;
-	struct pps_registers regs;
-
-	intel_pps_get_registers(intel_dp, &regs);
-
-	pp_ctl = ilk_get_pp_control(intel_dp);
-
-	/* Ensure PPS is unlocked */
-	if (!HAS_DDI(dev_priv))
-		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
-
-	pp_on = intel_de_read(dev_priv, regs.pp_on);
-	pp_off = intel_de_read(dev_priv, regs.pp_off);
-
-	/* Pull timing values out of registers */
-	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
-	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
-	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
-	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
-
-	if (i915_mmio_reg_valid(regs.pp_div)) {
-		u32 pp_div;
-
-		pp_div = intel_de_read(dev_priv, regs.pp_div);
-
-		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
-	} else {
-		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
-	}
-}
-
-static void
-intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
-{
-	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-		      state_name,
-		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
-}
-
-static void
-intel_pps_verify_state(struct intel_dp *intel_dp)
-{
-	struct edp_power_seq hw;
-	struct edp_power_seq *sw = &intel_dp->pps_delays;
-
-	intel_pps_readout_hw_state(intel_dp, &hw);
-
-	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
-	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
-		DRM_ERROR("PPS state mismatch\n");
-		intel_pps_dump_state("sw", sw);
-		intel_pps_dump_state("hw", &hw);
-	}
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct edp_power_seq cur, vbt, spec,
-		*final = &intel_dp->pps_delays;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	/* already initialized? */
-	if (final->t11_t12 != 0)
-		return;
-
-	intel_pps_readout_hw_state(intel_dp, &cur);
-
-	intel_pps_dump_state("cur", &cur);
-
-	vbt = dev_priv->vbt.edp.pps;
-	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
-	 * of 500ms appears to be too short. Ocassionally the panel
-	 * just fails to power back on. Increasing the delay to 800ms
-	 * seems sufficient to avoid this problem.
-	 */
-	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
-		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
-		drm_dbg_kms(&dev_priv->drm,
-			    "Increasing T12 panel delay as per the quirk to %d\n",
-			    vbt.t11_t12);
-	}
-	/* T11_T12 delay is special and actually in units of 100ms, but zero
-	 * based in the hw (so we need to add 100 ms). But the sw vbt
-	 * table multiplies it with 1000 to make it in units of 100usec,
-	 * too. */
-	vbt.t11_t12 += 100 * 10;
-
-	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
-	 * our hw here, which are all in 100usec. */
-	spec.t1_t3 = 210 * 10;
-	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
-	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
-	spec.t10 = 500 * 10;
-	/* This one is special and actually in units of 100ms, but zero
-	 * based in the hw (so we need to add 100 ms). But the sw vbt
-	 * table multiplies it with 1000 to make it in units of 100usec,
-	 * too. */
-	spec.t11_t12 = (510 + 100) * 10;
-
-	intel_pps_dump_state("vbt", &vbt);
-
-	/* Use the max of the register settings and vbt. If both are
-	 * unset, fall back to the spec limits. */
-#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
-				       spec.field : \
-				       max(cur.field, vbt.field))
-	assign_final(t1_t3);
-	assign_final(t8);
-	assign_final(t9);
-	assign_final(t10);
-	assign_final(t11_t12);
-#undef assign_final
-
-#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
-	intel_dp->panel_power_up_delay = get_delay(t1_t3);
-	intel_dp->backlight_on_delay = get_delay(t8);
-	intel_dp->backlight_off_delay = get_delay(t9);
-	intel_dp->panel_power_down_delay = get_delay(t10);
-	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
-		    intel_dp->panel_power_up_delay,
-		    intel_dp->panel_power_down_delay,
-		    intel_dp->panel_power_cycle_delay);
-
-	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
-		    intel_dp->backlight_on_delay,
-		    intel_dp->backlight_off_delay);
-
-	/*
-	 * We override the HW backlight delays to 1 because we do manual waits
-	 * on them. For T8, even BSpec recommends doing it. For T9, if we
-	 * don't do this, we'll end up waiting for the backlight off delay
-	 * twice: once when we do the manual sleep, and once when we disable
-	 * the panel and wait for the PP_STATUS bit to become zero.
-	 */
-	final->t8 = 1;
-	final->t9 = 1;
-
-	/*
-	 * HW has only a 100msec granularity for t11_t12 so round it up
-	 * accordingly.
-	 */
-	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
-}
-
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
-					      bool force_disable_vdd)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 pp_on, pp_off, port_sel = 0;
-	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
-	struct pps_registers regs;
-	enum port port = dp_to_dig_port(intel_dp)->base.port;
-	const struct edp_power_seq *seq = &intel_dp->pps_delays;
-
-	lockdep_assert_held(&dev_priv->pps_mutex);
-
-	intel_pps_get_registers(intel_dp, &regs);
-
-	/*
-	 * On some VLV machines the BIOS can leave the VDD
-	 * enabled even on power sequencers which aren't
-	 * hooked up to any port. This would mess up the
-	 * power domain tracking the first time we pick
-	 * one of these power sequencers for use since
-	 * edp_panel_vdd_on() would notice that the VDD was
-	 * already on and therefore wouldn't grab the power
-	 * domain reference. Disable VDD first to avoid this.
-	 * This also avoids spuriously turning the VDD on as
-	 * soon as the new power sequencer gets initialized.
-	 */
-	if (force_disable_vdd) {
-		u32 pp = ilk_get_pp_control(intel_dp);
-
-		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
-			 "Panel power already on\n");
-
-		if (pp & EDP_FORCE_VDD)
-			drm_dbg_kms(&dev_priv->drm,
-				    "VDD already on, disabling first\n");
-
-		pp &= ~EDP_FORCE_VDD;
-
-		intel_de_write(dev_priv, regs.pp_ctrl, pp);
-	}
-
-	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
-		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
-	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
-		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
-
-	/* Haswell doesn't have any port selection bits for the panel
-	 * power sequencer any more. */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		port_sel = PANEL_PORT_SELECT_VLV(port);
-	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
-		switch (port) {
-		case PORT_A:
-			port_sel = PANEL_PORT_SELECT_DPA;
-			break;
-		case PORT_C:
-			port_sel = PANEL_PORT_SELECT_DPC;
-			break;
-		case PORT_D:
-			port_sel = PANEL_PORT_SELECT_DPD;
-			break;
-		default:
-			MISSING_CASE(port);
-			break;
-		}
-	}
-
-	pp_on |= port_sel;
-
-	intel_de_write(dev_priv, regs.pp_on, pp_on);
-	intel_de_write(dev_priv, regs.pp_off, pp_off);
-
-	/*
-	 * Compute the divisor for the pp clock, simply match the Bspec formula.
-	 */
-	if (i915_mmio_reg_valid(regs.pp_div)) {
-		intel_de_write(dev_priv, regs.pp_div,
-			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
-	} else {
-		u32 pp_ctl;
-
-		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
-		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
-		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
-		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
-		    intel_de_read(dev_priv, regs.pp_on),
-		    intel_de_read(dev_priv, regs.pp_off),
-		    i915_mmio_reg_valid(regs.pp_div) ?
-		    intel_de_read(dev_priv, regs.pp_div) :
-		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
-}
 
-static void intel_dp_pps_init(struct intel_dp *intel_dp)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		vlv_initial_power_sequencer_setup(intel_dp);
-	} else {
-		intel_dp_init_panel_power_sequencer(intel_dp);
-		intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-	}
+	if (HAS_VRR(dev_priv))
+		drm_connector_attach_vrr_capable_property(connector);
 }
 
 /**
@@ -7890,14 +6409,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	struct drm_display_mode *downclock_mode = NULL;
 	bool has_dpcd;
 	enum pipe pipe = INVALID_PIPE;
-	intel_wakeref_t wakeref;
 	struct edid *edid;
 
 	if (!intel_dp_is_edp(intel_dp))
 		return true;
 
-	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
-
 	/*
 	 * On IBX/CPT we may get here with LVDS already registered. Since the
 	 * driver uses the only internal power sequencer available for both
@@ -7913,11 +6429,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 		return false;
 	}
 
-	with_pps_lock(intel_dp, wakeref) {
-		intel_dp_init_panel_power_timestamps(intel_dp);
-		intel_dp_pps_init(intel_dp);
-		intel_edp_panel_vdd_sanitize(intel_dp);
-	}
+	intel_pps_init(intel_dp);
 
 	/* Cache DPCD and EDID for edp. */
 	has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -7934,7 +6446,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	if (edid) {
 		if (drm_add_edid_modes(connector, edid)) {
 			drm_connector_update_edid_property(connector, edid);
-			intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
 		} else {
 			kfree(edid);
 			edid = ERR_PTR(-EINVAL);
@@ -7962,7 +6473,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 		pipe = vlv_active_pipe(intel_dp);
 
 		if (pipe != PIPE_A && pipe != PIPE_B)
-			pipe = intel_dp->pps_pipe;
+			pipe = intel_dp->pps.pps_pipe;
 
 		if (pipe != PIPE_A && pipe != PIPE_B)
 			pipe = PIPE_A;
@@ -7973,7 +6484,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	}
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
-	intel_connector->panel.backlight.power = intel_edp_backlight_power;
+	intel_connector->panel.backlight.power = intel_pps_backlight_power;
 	intel_panel_setup_backlight(connector, pipe);
 
 	if (fixed_mode) {
@@ -7985,13 +6496,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	return true;
 
 out_vdd_off:
-	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-	/*
-	 * vdd might still be enabled do to the delayed vdd off.
-	 * Make sure vdd is actually turned off here.
-	 */
-	with_pps_lock(intel_dp, wakeref)
-		edp_panel_vdd_off_sync(intel_dp);
+	intel_pps_vdd_off_sync(intel_dp);
 
 	return false;
 }
@@ -8045,8 +6550,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
 	intel_dp_set_source_rates(intel_dp);
 
 	intel_dp->reset_link_params = true;
-	intel_dp->pps_pipe = INVALID_PIPE;
-	intel_dp->active_pipe = INVALID_PIPE;
+	intel_dp->pps.pps_pipe = INVALID_PIPE;
+	intel_dp->pps.active_pipe = INVALID_PIPE;
 
 	/* Preserve the current hw state. */
 	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
@@ -8064,7 +6569,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
 	}
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+		intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
 
 	/*
 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
@@ -8133,6 +6638,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
 			       (temp & ~0xf) | 0xd);
 	}
 
+	intel_dp->frl.is_trained = false;
+	intel_dp->frl.trained_rate_gbps = 0;
+
 	return true;
 
 fail:
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 6620f9efdcbbaebb525ee3425bfc58ce08fd7a91..d80839139bfb445cfc7f7f53498ef6d4a8418ce9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -70,16 +70,11 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
 			    const struct drm_connector_state *conn_state);
 void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
 void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
 int intel_dp_max_link_rate(struct intel_dp *intel_dp);
 int intel_dp_max_lane_count(struct intel_dp *intel_dp);
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
 
 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
 			   const struct intel_crtc_state *crtc_state);
@@ -141,5 +136,11 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
 				    struct intel_crtc_state *crtc_state);
 void intel_dp_sync_state(struct intel_encoder *encoder,
 			 const struct intel_crtc_state *crtc_state);
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp);
+void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+				 const struct intel_crtc_state *crtc_state);
+void intel_dp_phy_test(struct intel_encoder *encoder);
 
 #endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
new file mode 100644
index 0000000000000000000000000000000000000000..eaebf123310a437de066e0f6f6f66ec20d82452a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
+{
+	int i;
+	u32 v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((u32)src[i]) << ((3 - i) * 8);
+	return v;
+}
+
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
+{
+	int i;
+
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3 - i) * 8);
+}
+
+static u32
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+	const unsigned int timeout_ms = 10;
+	u32 status;
+	bool done;
+
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+	done = wait_event_timeout(i915->gmbus_wait_queue, C,
+				  msecs_to_jiffies_timeout(timeout_ms));
+
+	/* just trace the final value */
+	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+	if (!done)
+		drm_err(&i915->drm,
+			"%s: did not complete or timeout within %ums (status 0x%08x)\n",
+			intel_dp->aux.name, timeout_ms, status);
+#undef C
+
+	return status;
+}
+
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	if (index)
+		return 0;
+
+	/*
+	 * The clock divider is based off the hrawclk, and would like to run at
+	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
+	 */
+	return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+}
+
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	u32 freq;
+
+	if (index)
+		return 0;
+
+	/*
+	 * The clock divider is based off the cdclk or PCH rawclk, and would
+	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
+	 * divide by 2000 and use that
+	 */
+	if (dig_port->aux_ch == AUX_CH_A)
+		freq = dev_priv->cdclk.hw.cdclk;
+	else
+		freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+	return DIV_ROUND_CLOSEST(freq, 2000);
+}
+
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+		/* Workaround for non-ULT HSW */
+		switch (index) {
+		case 0: return 63;
+		case 1: return 72;
+		default: return 0;
+		}
+	}
+
+	return ilk_get_aux_clock_divider(intel_dp, index);
+}
+
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	/*
+	 * SKL doesn't need us to program the AUX clock divider (Hardware will
+	 * derive the clock from CDCLK automatically). We still implement the
+	 * get_aux_clock_divider vfunc to plug-in into the existing code.
+	 */
+	return index ? 0 : 1;
+}
+
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+				int send_bytes,
+				u32 aux_clock_divider)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv =
+			to_i915(dig_port->base.base.dev);
+	u32 precharge, timeout;
+
+	if (IS_GEN(dev_priv, 6))
+		precharge = 3;
+	else
+		precharge = 5;
+
+	if (IS_BROADWELL(dev_priv))
+		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+	else
+		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+	return DP_AUX_CH_CTL_SEND_BUSY |
+	       DP_AUX_CH_CTL_DONE |
+	       DP_AUX_CH_CTL_INTERRUPT |
+	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
+	       timeout |
+	       DP_AUX_CH_CTL_RECEIVE_ERROR |
+	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+				int send_bytes,
+				u32 unused)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *i915 =
+			to_i915(dig_port->base.base.dev);
+	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+	u32 ret;
+
+	ret = DP_AUX_CH_CTL_SEND_BUSY |
+	      DP_AUX_CH_CTL_DONE |
+	      DP_AUX_CH_CTL_INTERRUPT |
+	      DP_AUX_CH_CTL_TIME_OUT_ERROR |
+	      DP_AUX_CH_CTL_TIME_OUT_MAX |
+	      DP_AUX_CH_CTL_RECEIVE_ERROR |
+	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+	if (intel_phy_is_tc(i915, phy) &&
+	    dig_port->tc_mode == TC_PORT_TBT_ALT)
+		ret |= DP_AUX_CH_CTL_TBT_IO;
+
+	return ret;
+}
+
+static int
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+		  const u8 *send, int send_bytes,
+		  u8 *recv, int recv_size,
+		  u32 aux_send_ctl_flags)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *i915 =
+			to_i915(dig_port->base.base.dev);
+	struct intel_uncore *uncore = &i915->uncore;
+	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+	bool is_tc_port = intel_phy_is_tc(i915, phy);
+	i915_reg_t ch_ctl, ch_data[5];
+	u32 aux_clock_divider;
+	enum intel_display_power_domain aux_domain;
+	intel_wakeref_t aux_wakeref;
+	intel_wakeref_t pps_wakeref;
+	int i, ret, recv_bytes;
+	int try, clock = 0;
+	u32 status;
+	bool vdd;
+
+	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
+	if (is_tc_port)
+		intel_tc_port_lock(dig_port);
+
+	aux_domain = intel_aux_power_domain(dig_port);
+
+	aux_wakeref = intel_display_power_get(i915, aux_domain);
+	pps_wakeref = intel_pps_lock(intel_dp);
+
+	/*
+	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
+	 * In such cases we want to leave VDD enabled and it's up to upper layers
+	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+	 * ourselves.
+	 */
+	vdd = intel_pps_vdd_on_unlocked(intel_dp);
+
+	/*
+	 * dp aux is extremely sensitive to irq latency, hence request the
+	 * lowest possible wakeup latency and so prevent the cpu from going into
+	 * deep sleep states.
+	 */
+	cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+
+	intel_pps_check_power_unlocked(intel_dp);
+
+	/* Try to wait for any previous AUX channel activity */
+	for (try = 0; try < 3; try++) {
+		status = intel_uncore_read_notrace(uncore, ch_ctl);
+		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+			break;
+		msleep(1);
+	}
+	/* just trace the final value */
+	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+	if (try == 3) {
+		const u32 status = intel_uncore_read(uncore, ch_ctl);
+
+		if (status != intel_dp->aux_busy_last_status) {
+			drm_WARN(&i915->drm, 1,
+				 "%s: not started (status 0x%08x)\n",
+				 intel_dp->aux.name, status);
+			intel_dp->aux_busy_last_status = status;
+		}
+
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Only 5 data registers! */
+	if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+		ret = -E2BIG;
+		goto out;
+	}
+
+	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+							  send_bytes,
+							  aux_clock_divider);
+
+		send_ctl |= aux_send_ctl_flags;
+
+		/* Must try at least 3 times according to DP spec */
+		for (try = 0; try < 5; try++) {
+			/* Load the send data into the aux channel data registers */
+			for (i = 0; i < send_bytes; i += 4)
+				intel_uncore_write(uncore,
+						   ch_data[i >> 2],
+						   intel_dp_pack_aux(send + i,
+								     send_bytes - i));
+
+			/* Send the command and wait for it to complete */
+			intel_uncore_write(uncore, ch_ctl, send_ctl);
+
+			status = intel_dp_aux_wait_done(intel_dp);
+
+			/* Clear done status and any errors */
+			intel_uncore_write(uncore,
+					   ch_ctl,
+					   status |
+					   DP_AUX_CH_CTL_DONE |
+					   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+					   DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+			/*
+			 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+			 *   400us delay required for errors and timeouts
+			 *   Timeout errors from the HW already meet this
+			 *   requirement so skip to next iteration
+			 */
+			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+				continue;
+
+			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+				usleep_range(400, 500);
+				continue;
+			}
+			if (status & DP_AUX_CH_CTL_DONE)
+				goto done;
+		}
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+			intel_dp->aux.name, status);
+		ret = -EBUSY;
+		goto out;
+	}
+
+done:
+	/*
+	 * Check for timeout or receive error. Timeouts occur when the sink is
+	 * not connected.
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+			intel_dp->aux.name, status);
+		ret = -EIO;
+		goto out;
+	}
+
+	/*
+	 * Timeouts occur when the device isn't connected, so they're "normal"
+	 * -- don't fill the kernel log with these
+	 */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+			    intel_dp->aux.name, status);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+	/*
+	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
+	 * We have no idea of what happened so we return -EBUSY so
+	 * drm layer takes care for the necessary retries.
+	 */
+	if (recv_bytes == 0 || recv_bytes > 20) {
+		drm_dbg_kms(&i915->drm,
+			    "%s: Forbidden recv_bytes = %d on aux transaction\n",
+			    intel_dp->aux.name, recv_bytes);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	for (i = 0; i < recv_bytes; i += 4)
+		intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
+				    recv + i, recv_bytes - i);
+
+	ret = recv_bytes;
+out:
+	cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+	if (vdd)
+		intel_pps_vdd_off_unlocked(intel_dp, false);
+
+	intel_pps_unlock(intel_dp, pps_wakeref);
+	intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+
+	if (is_tc_port)
+		intel_tc_port_unlock(dig_port);
+
+	return ret;
+}
+
+#define BARE_ADDRESS_SIZE	3
+#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+		    const struct drm_dp_aux_msg *msg)
+{
+	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+	txbuf[1] = (msg->address >> 8) & 0xff;
+	txbuf[2] = msg->address & 0xff;
+	txbuf[3] = msg->size - 1;
+}
+
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+	/*
+	 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+	 * select bit to inform the hardware to send the Aksv after our header
+	 * since we can't access that data from software.
+	 */
+	if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+	    msg->address == DP_AUX_HDCP_AKSV)
+		return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+	return 0;
+}
+
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	u8 txbuf[20], rxbuf[20];
+	size_t txsize, rxsize;
+	u32 flags = intel_dp_aux_xfer_flags(msg);
+	int ret;
+
+	intel_dp_aux_header(txbuf, msg);
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_NATIVE_WRITE:
+	case DP_AUX_I2C_WRITE:
+	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+		rxsize = 2; /* 0 or 1 data bytes */
+
+		if (drm_WARN_ON(&i915->drm, txsize > 20))
+			return -E2BIG;
+
+		drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+
+		if (msg->buffer)
+			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+					rxbuf, rxsize, flags);
+		if (ret > 0) {
+			msg->reply = rxbuf[0] >> 4;
+
+			if (ret > 1) {
+				/* Number of bytes written in a short write. */
+				ret = clamp_t(int, rxbuf[1], 0, msg->size);
+			} else {
+				/* Return payload size. */
+				ret = msg->size;
+			}
+		}
+		break;
+
+	case DP_AUX_NATIVE_READ:
+	case DP_AUX_I2C_READ:
+		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+		rxsize = msg->size + 1;
+
+		if (drm_WARN_ON(&i915->drm, rxsize > 20))
+			return -E2BIG;
+
+		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+					rxbuf, rxsize, flags);
+		if (ret > 0) {
+			msg->reply = rxbuf[0] >> 4;
+			/*
+			 * Assume happy day, and copy the data. The caller is
+			 * expected to check msg->reply before touching it.
+			 *
+			 * Return payload size.
+			 */
+			ret--;
+			memcpy(msg->buffer, rxbuf + 1, ret);
+		}
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_D:
+		return DP_AUX_CH_CTL(aux_ch);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_CTL(AUX_CH_B);
+	}
+}
+
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_D:
+		return DP_AUX_CH_DATA(aux_ch, index);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_DATA(AUX_CH_B, index);
+	}
+}
+
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_A:
+		return DP_AUX_CH_CTL(aux_ch);
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_D:
+		return PCH_DP_AUX_CH_CTL(aux_ch);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_CTL(AUX_CH_A);
+	}
+}
+
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_A:
+		return DP_AUX_CH_DATA(aux_ch, index);
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_D:
+		return PCH_DP_AUX_CH_DATA(aux_ch, index);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_DATA(AUX_CH_A, index);
+	}
+}
+
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_A:
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_D:
+	case AUX_CH_E:
+	case AUX_CH_F:
+		return DP_AUX_CH_CTL(aux_ch);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_CTL(AUX_CH_A);
+	}
+}
+
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_A:
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_D:
+	case AUX_CH_E:
+	case AUX_CH_F:
+		return DP_AUX_CH_DATA(aux_ch, index);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_DATA(AUX_CH_A, index);
+	}
+}
+
+static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_A:
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_USBC1:
+	case AUX_CH_USBC2:
+	case AUX_CH_USBC3:
+	case AUX_CH_USBC4:
+	case AUX_CH_USBC5:
+	case AUX_CH_USBC6:
+		return DP_AUX_CH_CTL(aux_ch);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_CTL(AUX_CH_A);
+	}
+}
+
+static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	switch (aux_ch) {
+	case AUX_CH_A:
+	case AUX_CH_B:
+	case AUX_CH_C:
+	case AUX_CH_USBC1:
+	case AUX_CH_USBC2:
+	case AUX_CH_USBC3:
+	case AUX_CH_USBC4:
+	case AUX_CH_USBC5:
+	case AUX_CH_USBC6:
+		return DP_AUX_CH_DATA(aux_ch, index);
+	default:
+		MISSING_CASE(aux_ch);
+		return DP_AUX_CH_DATA(AUX_CH_A, index);
+	}
+}
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp)
+{
+	if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+		cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
+	kfree(intel_dp->aux.name);
+}
+
+void intel_dp_aux_init(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &dig_port->base;
+	enum aux_ch aux_ch = dig_port->aux_ch;
+
+	if (INTEL_GEN(dev_priv) >= 12) {
+		intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
+		intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
+	} else if (INTEL_GEN(dev_priv) >= 9) {
+		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
+		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+	} else {
+		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+	else if (HAS_PCH_SPLIT(dev_priv))
+		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+	else
+		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+	else
+		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+
+	drm_dp_aux_init(&intel_dp->aux);
+
+	/* Failure to allocate our preferred name is not critical */
+	if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+		intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
+					       aux_ch - AUX_CH_USBC1 + '1',
+					       encoder->base.name);
+	else
+		intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+					       aux_ch_name(aux_ch),
+					       encoder->base.name);
+
+	intel_dp->aux.transfer = intel_dp_aux_transfer;
+	cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
new file mode 100644
index 0000000000000000000000000000000000000000..4afbe76217b9f1ff74331437865ba0f9a172b7f9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_H__
+#define __INTEL_DP_AUX_H__
+
+#include <linux/types.h>
+
+struct intel_dp;
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp);
+void intel_dp_aux_init(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 51d27fc98d483b9e9f2a27767a28e3016800da99..6518843901376bcff82a1b493a467968a57e9e72 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,10 +22,253 @@
  *
  */
 
+/*
+ * Laptops with Intel GPUs which have panels that support controlling the
+ * backlight through DP AUX can actually use two different interfaces: Intel's
+ * proprietary DP AUX backlight interface, and the standard VESA backlight
+ * interface. Unfortunately, at the time of writing this a lot of laptops will
+ * advertise support for the standard VESA backlight interface when they
+ * don't properly support it. However, on these systems the Intel backlight
+ * interface generally does work properly. Additionally, these systems will
+ * usually just indicate that they use PWM backlight controls in their VBIOS
+ * for some reason.
+ */
+
 #include "intel_display_types.h"
 #include "intel_dp_aux_backlight.h"
+#include "intel_panel.h"
+
+/* TODO:
+ * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
+ * can make people's backlights work in the mean time
+ */
+
+/*
+ * DP AUX registers for Intel's proprietary HDR backlight interface. We define
+ * them here since we'll likely be the only driver to ever use these.
+ */
+#define INTEL_EDP_HDR_TCON_CAP0                                        0x340
+
+#define INTEL_EDP_HDR_TCON_CAP1                                        0x341
+# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP                           BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP                            BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP                          BIT(2)
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP                   BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP                       BIT(4)
+# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP                          BIT(5)
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP                       BIT(6)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP        BIT(7)
+
+#define INTEL_EDP_HDR_TCON_CAP2                                        0x342
+# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP                        BIT(0)
+
+#define INTEL_EDP_HDR_TCON_CAP3                                        0x343
+
+#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS                               0x344
+# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE                        BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE                         BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE                       BIT(2) /* Pre-TGL+ */
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE                BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE                     BIT(4)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE                BIT(5)
+/* Bit 6 is reserved */
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE                    BIT(7)
+
+#define INTEL_EDP_HDR_CONTENT_LUMINANCE                                0x346 /* Pre-TGL+ */
+#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE                         0x34A
+#define INTEL_EDP_SDR_LUMINANCE_LEVEL                                  0x352
+#define INTEL_EDP_BRIGHTNESS_NITS_LSB                                  0x354
+#define INTEL_EDP_BRIGHTNESS_NITS_MSB                                  0x355
+#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES                              0x356
+#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS                           0x357
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0                            0x358
+# define INTEL_EDP_TCON_USAGE_MASK                             GENMASK(0, 3)
+# define INTEL_EDP_TCON_USAGE_UNKNOWN                                    0x0
+# define INTEL_EDP_TCON_USAGE_DESKTOP                                    0x1
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA                          0x2
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING                         0x3
+# define INTEL_EDP_TCON_POWER_MASK                                    BIT(4)
+# define INTEL_EDP_TCON_POWER_DC                                    (0 << 4)
+# define INTEL_EDP_TCON_POWER_AC                                    (1 << 4)
+# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK             GENMASK(5, 7)
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1                            0x359
+
+/* Intel EDP backlight callbacks */
+static bool
+intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+{
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	struct drm_dp_aux *aux = &intel_dp->aux;
+	struct intel_panel *panel = &connector->panel;
+	int ret;
+	u8 tcon_cap[4];
+
+	ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
+	if (ret < 0)
+		return false;
+
+	if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
+		return false;
+
+	if (tcon_cap[0] >= 1) {
+		drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
+			    tcon_cap[0]);
+	} else {
+		drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
+			    tcon_cap[0]);
+		return false;
+	}
+
+	panel->backlight.edp.intel.sdr_uses_aux =
+		tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+	return true;
+}
+
+static u32
+intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	u8 tmp;
+	u8 buf[2] = { 0 };
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) < 0) {
+		drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+		return 0;
+	}
+
+	if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) {
+		if (!panel->backlight.edp.intel.sdr_uses_aux) {
+			u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
+
+			return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+		}
+
+		/* Assume 100% brightness if backlight controls aren't enabled yet */
+		return panel->backlight.max;
+	}
+
+	if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) < 0) {
+		drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+		return 0;
+	}
+
+	return (buf[1] << 8 | buf[0]);
+}
+
+static void
+intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct drm_device *dev = connector->base.dev;
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	u8 buf[4] = { 0 };
+
+	buf[0] = level & 0xFF;
+	buf[1] = (level & 0xFF00) >> 8;
+
+	if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 4) < 0)
+		drm_err(dev, "Failed to write brightness level to DPCD\n");
+}
+
+static void
+intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct intel_panel *panel = &connector->panel;
+
+	if (panel->backlight.edp.intel.sdr_uses_aux) {
+		intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+	} else {
+		const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+		intel_panel_set_pwm_level(conn_state, pwm_level);
+	}
+}
+
+static void
+intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
+				  const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct intel_panel *panel = &connector->panel;
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	int ret;
+	u8 old_ctrl, ctrl;
+
+	ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
+	if (ret < 0) {
+		drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+		return;
+	}
+
+	ctrl = old_ctrl;
+	if (panel->backlight.edp.intel.sdr_uses_aux) {
+		ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+		intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+	} else {
+		u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+		panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+
+		ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+	}
+
+	if (ctrl != old_ctrl)
+		if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) < 0)
+			drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+}
+
+static void
+intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct intel_panel *panel = &connector->panel;
+
+	/* Nothing to do for AUX based backlight controls */
+	if (panel->backlight.edp.intel.sdr_uses_aux)
+		return;
+
+	/* Note we want the actual pwm_level to be 0, regardless of pwm_min */
+	panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+}
+
+static int
+intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
+	int ret;
+
+	if (panel->backlight.edp.intel.sdr_uses_aux) {
+		drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
+	} else {
+		drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+
+		ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+		if (ret < 0) {
+			drm_err(&i915->drm,
+				"Failed to setup SDR backlight controls through PWM: %d\n", ret);
+			return ret;
+		}
+	}
+
+	panel->backlight.max = 512;
+	panel->backlight.min = 0;
+	panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
+	panel->backlight.enabled = panel->backlight.level != 0;
 
-static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+	return 0;
+}
+
+/* VESA backlight callbacks */
+static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable)
 {
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 	u8 reg_val = 0;
@@ -52,7 +295,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 	}
 }
 
-static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -75,7 +318,7 @@ static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
  * Read the current backlight value from DPCD register(s) based
  * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
  */
-static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -86,7 +329,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 	 * If we're not in DPCD control mode yet, the programmed brightness
 	 * value is meaningless and we should assume max brightness
 	 */
-	if (!intel_dp_aux_backlight_dpcd_mode(connector))
+	if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector))
 		return connector->panel.backlight.max;
 
 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
@@ -107,7 +350,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
  * 8-bit or 16 bit value (MSB and LSB)
  */
 static void
-intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state,
+				u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -137,11 +381,11 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
  * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
  *             EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
  */
-static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+	const u8 pn = connector->panel.backlight.edp.vesa.pwmgen_bit_count;
 	int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
 
 	freq = dev_priv->vbt.backlight.pwm_freq_hz;
@@ -173,14 +417,16 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
 	return true;
 }
 
-static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
-					  const struct drm_connector_state *conn_state)
+static void
+intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
+				   const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 	struct intel_panel *panel = &connector->panel;
 	u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+	u8 pwmgen_bit_count = panel->backlight.edp.vesa.pwmgen_bit_count;
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux,
 			DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -201,7 +447,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 
 		if (drm_dp_dpcd_writeb(&intel_dp->aux,
 				       DP_EDP_PWMGEN_BIT_COUNT,
-				       panel->backlight.pwmgen_bit_count) < 0)
+				       pwmgen_bit_count) < 0)
 			drm_dbg_kms(&i915->drm,
 				    "Failed to write aux pwmgen bit count\n");
 
@@ -214,7 +460,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 	}
 
 	if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
-		if (intel_dp_aux_set_pwm_freq(connector))
+		if (intel_dp_aux_vesa_set_pwm_freq(connector))
 			new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
 
 	if (new_dpcd_buf != dpcd_buf) {
@@ -225,18 +471,18 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 		}
 	}
 
-	intel_dp_aux_set_backlight(conn_state,
-				   connector->panel.backlight.level);
-	set_aux_backlight_enable(intel_dp, true);
+	intel_dp_aux_vesa_set_backlight(conn_state, level);
+	set_vesa_backlight_enable(intel_dp, true);
 }
 
-static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state,
+						u32 level)
 {
-	set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
-				 false);
+	set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+				  false);
 }
 
-static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector)
 {
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -309,40 +555,45 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
 			    "Failed to write aux pwmgen bit count\n");
 		return max_backlight;
 	}
-	panel->backlight.pwmgen_bit_count = pn;
+	panel->backlight.edp.vesa.pwmgen_bit_count = pn;
 
 	max_backlight = (1 << pn) - 1;
 
 	return max_backlight;
 }
 
-static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
-					enum pipe pipe)
+static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
+					     enum pipe pipe)
 {
 	struct intel_panel *panel = &connector->panel;
 
-	panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+	panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector);
 	if (!panel->backlight.max)
 		return -ENODEV;
 
 	panel->backlight.min = 0;
-	panel->backlight.level = intel_dp_aux_get_backlight(connector);
-	panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) &&
+	panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, pipe);
+	panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) &&
 				   panel->backlight.level != 0;
 
 	return 0;
 }
 
 static bool
-intel_dp_aux_display_control_capable(struct intel_connector *connector)
+intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 	/* Check the eDP Display control capabilities registers to determine if
-	 * the panel can support backlight control over the aux channel
+	 * the panel can support backlight control over the aux channel.
+	 *
+	 * TODO: We currently only support AUX only backlight configurations, not backlights which
+	 * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
+	 * work just fine using normal PWM controls anyway.
 	 */
 	if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+	    (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
 	    (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
 		drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
 		return true;
@@ -350,40 +601,89 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
 	return false;
 }
 
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = {
+	.setup = intel_dp_aux_hdr_setup_backlight,
+	.enable = intel_dp_aux_hdr_enable_backlight,
+	.disable = intel_dp_aux_hdr_disable_backlight,
+	.set = intel_dp_aux_hdr_set_backlight,
+	.get = intel_dp_aux_hdr_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
+	.setup = intel_dp_aux_vesa_setup_backlight,
+	.enable = intel_dp_aux_vesa_enable_backlight,
+	.disable = intel_dp_aux_vesa_disable_backlight,
+	.set = intel_dp_aux_vesa_set_backlight,
+	.get = intel_dp_aux_vesa_get_backlight,
+};
+
+enum intel_dp_aux_backlight_modparam {
+	INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+	INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+	INTEL_DP_AUX_BACKLIGHT_ON = 1,
+	INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+	INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
 {
-	struct intel_panel *panel = &intel_connector->panel;
-	struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+	struct drm_device *dev = connector->base.dev;
+	struct intel_panel *panel = &connector->panel;
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	bool try_intel_interface = false, try_vesa_interface = false;
 
-	if (i915->params.enable_dpcd_backlight == 0 ||
-	    !intel_dp_aux_display_control_capable(intel_connector))
+	/* Check the VBT and user's module parameters to figure out which
+	 * interfaces to probe
+	 */
+	switch (i915->params.enable_dpcd_backlight) {
+	case INTEL_DP_AUX_BACKLIGHT_OFF:
 		return -ENODEV;
+	case INTEL_DP_AUX_BACKLIGHT_AUTO:
+		switch (i915->vbt.backlight.type) {
+		case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+			try_vesa_interface = true;
+			break;
+		case INTEL_BACKLIGHT_DISPLAY_DDI:
+			try_intel_interface = true;
+			try_vesa_interface = true;
+			break;
+		default:
+			return -ENODEV;
+		}
+		break;
+	case INTEL_DP_AUX_BACKLIGHT_ON:
+		if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+			try_intel_interface = true;
+
+		try_vesa_interface = true;
+		break;
+	case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
+		try_vesa_interface = true;
+		break;
+	case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
+		try_intel_interface = true;
+		break;
+	}
 
 	/*
-	 * There are a lot of machines that don't advertise the backlight
-	 * control interface to use properly in their VBIOS, :\
+	 * A lot of eDP panels in the wild will report supporting both the
+	 * Intel proprietary backlight control interface, and the VESA
+	 * backlight control interface. Many of these panels are liars though,
+	 * and will only work with the Intel interface. So, always probe for
+	 * that first.
 	 */
-	if (i915->vbt.backlight.type !=
-	    INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
-	    i915->params.enable_dpcd_backlight != 1 &&
-	    !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
-			      DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
-		drm_info(&i915->drm,
-			 "Panel advertises DPCD backlight support, but "
-			 "VBT disagrees. If your backlight controls "
-			 "don't work try booting with "
-			 "i915.enable_dpcd_backlight=1. If your machine "
-			 "needs this, please file a _new_ bug report on "
-			 "drm/i915, see " FDO_BUG_URL " for details.\n");
-		return -ENODEV;
+	if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+		drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+		panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
+		return 0;
 	}
 
-	panel->backlight.setup = intel_dp_aux_setup_backlight;
-	panel->backlight.enable = intel_dp_aux_enable_backlight;
-	panel->backlight.disable = intel_dp_aux_disable_backlight;
-	panel->backlight.set = intel_dp_aux_set_backlight;
-	panel->backlight.get = intel_dp_aux_get_backlight;
+	if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+		drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+		panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
+		return 0;
+	}
 
-	return 0;
+	return -ENODEV;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 03424d20e9f76c6cb448cc88e73a2399e8805f09..4dba5bb15af5a7e7b7418acba866572abeffce20 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -16,6 +16,30 @@
 #include "intel_dp.h"
 #include "intel_hdcp.h"
 
+static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+{
+	u32 stream_enc_mask;
+
+	switch (cpu_transcoder) {
+	case TRANSCODER_A:
+		stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
+		break;
+	case TRANSCODER_B:
+		stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
+		break;
+	case TRANSCODER_C:
+		stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
+		break;
+	case TRANSCODER_D:
+		stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
+		break;
+	default:
+		stream_enc_mask = 0;
+	}
+
+	return stream_enc_mask;
+}
+
 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
 {
 	long ret;
@@ -561,7 +585,8 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
 }
 
 static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
+			      struct intel_connector *connector)
 {
 	u8 rx_status;
 	int ret;
@@ -622,48 +647,143 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
 };
 
 static int
-intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
-				    enum transcoder cpu_transcoder,
-				    bool enable)
+intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
+				       bool enable)
 {
-	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
-	if (!enable)
-		usleep_range(6, 60); /* Bspec says >= 6us */
-
-	ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
-					       cpu_transcoder, enable);
+	ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+					 hdcp->stream_transcoder, enable,
+					 TRANS_DDI_HDCP_SELECT);
 	if (ret)
-		drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
-			      enable ? "Enable" : "Disable", ret);
+		drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+			enable ? "Enable" : "Disable", ret);
 	return ret;
 }
 
-static
-bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
-				  struct intel_connector *connector)
+static int
+intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
+				    bool enable)
+{
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	enum port port = dig_port->base.port;
+	enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+	u32 stream_enc_status;
+	int ret;
+
+	ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+	if (ret)
+		return ret;
+
+	stream_enc_status =  transcoder_to_stream_enc_status(cpu_transcoder);
+	if (!stream_enc_status)
+		return -EINVAL;
+
+	/* Wait for encryption confirmation */
+	if (intel_de_wait_for_register(i915,
+				       HDCP_STATUS(i915, cpu_transcoder, port),
+				       stream_enc_status,
+				       enable ? stream_enc_status : 0,
+				       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+		drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+			transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
+					 struct intel_connector *connector)
 {
 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-	struct intel_dp *intel_dp = &dig_port->dp;
 	struct drm_dp_query_stream_enc_status_ack_reply reply;
+	struct intel_dp *intel_dp = &dig_port->dp;
 	int ret;
 
-	if (!intel_dp_hdcp_check_link(dig_port, connector))
-		return false;
-
 	ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
 						  connector->port, &reply);
 	if (ret) {
 		drm_dbg_kms(&i915->drm,
-			    "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
-			    connector->base.base.id, connector->base.name, ret);
+			    "[%s:%d] failed QSES ret=%d\n",
+			    connector->base.name, connector->base.base.id, ret);
 		return false;
 	}
 
+	drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
+		    connector->base.name, connector->base.base.id,
+		    reply.auth_completed, reply.encryption_enabled);
+
 	return reply.auth_completed && reply.encryption_enabled;
 }
 
+static int
+intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
+				     bool enable)
+{
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+	enum pipe pipe = (enum pipe)cpu_transcoder;
+	enum port port = dig_port->base.port;
+	int ret;
+
+	drm_WARN_ON(&i915->drm, enable &&
+		    !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+		    & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+
+	ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+	if (ret)
+		return ret;
+
+	/* Wait for encryption confirmation */
+	if (intel_de_wait_for_register(i915,
+				       HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+				       STREAM_ENCRYPTION_STATUS,
+				       enable ? STREAM_ENCRYPTION_STATUS : 0,
+				       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+		drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+			transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
+ * I.3.5 MST source device may use a QSES msg to query downstream status
+ * for a particular stream.
+ */
+static
+int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
+				  struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	int ret;
+
+	/*
+	 * We do need to do the Link Check only for the connector involved with
+	 * HDCP port authentication and encryption.
+	 * We can re-use the hdcp->is_repeater flag to know that the connector
+	 * involved with HDCP port authentication and encryption.
+	 */
+	if (hdcp->is_repeater) {
+		ret = intel_dp_hdcp2_check_link(dig_port, connector);
+		if (ret)
+			return ret;
+	}
+
+	return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+}
+
 static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
 	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
 	.read_bksv = intel_dp_hdcp_read_bksv,
@@ -673,10 +793,16 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
 	.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
 	.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
 	.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
-	.toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
-	.check_link = intel_dp_mst_hdcp_check_link,
+	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
+	.stream_encryption = intel_dp_mst_hdcp_stream_encryption,
+	.check_link = intel_dp_hdcp_check_link,
 	.hdcp_capable = intel_dp_hdcp_capable,
-
+	.write_2_2_msg = intel_dp_hdcp2_write_msg,
+	.read_2_2_msg = intel_dp_hdcp2_read_msg,
+	.config_stream_type = intel_dp_hdcp2_config_stream_type,
+	.stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
+	.check_2_2_link = intel_dp_mst_hdcp2_check_link,
+	.hdcp_2_2_capable = intel_dp_hdcp2_capable,
 	.protocol = HDCP_PROTOCOL_DP,
 };
 
@@ -693,10 +819,10 @@ int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
 		return 0;
 
 	if (intel_connector->mst_port)
-		return intel_hdcp_init(intel_connector, port,
+		return intel_hdcp_init(intel_connector, dig_port,
 				       &intel_dp_mst_hdcp_shim);
 	else if (!intel_dp_is_edp(intel_dp))
-		return intel_hdcp_init(intel_connector, port,
+		return intel_hdcp_init(intel_connector, dig_port,
 				       &intel_dp_hdcp_shim);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index d8c6d7054d11d929ff8d4170b10b96dcd2c6dcda..892d7db7d94f699e996d27bc3fc552818935a1e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,18 +34,6 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
 		      link_status[3], link_status[4], link_status[5]);
 }
 
-static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
-{
-	int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
-
-	/*
-	 * Pretend no LTTPRs in case of LTTPR detection error, or
-	 * if too many (>8) LTTPRs are detected. This translates to link
-	 * training in transparent mode.
-	 */
-	return count <= 0 ? 0 : count;
-}
-
 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
 {
 	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -142,6 +130,17 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
 		return 0;
 
 	ret = intel_dp_read_lttpr_common_caps(intel_dp);
+	if (!ret)
+		return 0;
+
+	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+	/*
+	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
+	 * detected as this breaks link training at least on the Dell WD19TB
+	 * dock.
+	 */
+	if (lttpr_count == 0)
+		return 0;
 
 	/*
 	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
@@ -150,17 +149,12 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
 	 */
 	intel_dp_set_lttpr_transparent_mode(intel_dp, true);
 
-	if (!ret)
-		return 0;
-
-	lttpr_count = intel_dp_lttpr_count(intel_dp);
-
 	/*
 	 * In case of unsupported number of LTTPRs or failing to switch to
 	 * non-transparent mode fall-back to transparent link training mode,
 	 * still taking into account any LTTPR common lane- rate/count limits.
 	 */
-	if (lttpr_count == 0)
+	if (lttpr_count < 0)
 		return 0;
 
 	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
@@ -222,11 +216,11 @@ intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
 				     enum drm_dp_phy dp_phy)
 {
 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-	int lttpr_count = intel_dp_lttpr_count(intel_dp);
+	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
 
-	drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
+	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
 
-	return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
+	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
 }
 
 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
@@ -434,7 +428,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
 				  &rate_select, 1);
 
-	link_config[0] = 0;
+	link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
 	link_config[1] = DP_SET_ANSI_8B10B;
 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
@@ -697,9 +691,9 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
  * @intel_dp: DP struct
  * @crtc_state: state for CRTC attached to the encoder
  *
- * Stop the link training of the @intel_dp port, disabling the test pattern
- * symbol generation on the port and disabling the training pattern in
- * the sink's DPCD.
+ * Stop the link training of the @intel_dp port, disabling the training
+ * pattern in the sink's DPCD, and disabling the test pattern symbol
+ * generation on the port.
  *
  * What symbols are output on the port after this point is
  * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
@@ -713,10 +707,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
 {
 	intel_dp->link_trained = true;
 
-	intel_dp_program_link_training_pattern(intel_dp,
-					       crtc_state,
-					       DP_TRAINING_PATTERN_DISABLE);
 	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+					       DP_TRAINING_PATTERN_DISABLE);
 }
 
 static bool
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 3286b232be0b8f0d1dbb1340a9408fea82b1fc9c..b4621ed0127e7a7336117081ac079871c0d4841e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,8 +53,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->hw.adjusted_mode;
-	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
-					   DP_DPCD_QUIRK_CONSTANT_N);
+	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
 	int bpp, slots = -EINVAL;
 
 	crtc_state->lane_count = limits->max_lane_count;
@@ -571,7 +570,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
 	if (conn_state->content_protection ==
 	    DRM_MODE_CONTENT_PROTECTION_DESIRED)
 		intel_hdcp_enable(to_intel_connector(conn_state->connector),
-				  pipe_config->cpu_transcoder,
+				  pipe_config,
 				  (u8)conn_state->hdcp_content_type);
 }
 
@@ -831,12 +830,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
 
-
-	/* TODO: Figure out how to make HDCP work on GEN12+ */
-	if (INTEL_GEN(dev_priv) < 12) {
+	if (INTEL_GEN(dev_priv) <= 12) {
 		ret = intel_dp_init_hdcp(dig_port, intel_connector);
 		if (ret)
-			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+			drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+				    connector->name, connector->base.id);
 	}
 
 	/*
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
new file mode 100644
index 0000000000000000000000000000000000000000..7ba7f315aaeedc34812225a41f740d34e86ad408
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include "intel_display_types.h"
+#include "intel_display.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+
+struct intel_limit {
+	struct {
+		int min, max;
+	} dot, vco, n, m, m1, m2, p, p1;
+
+	struct {
+		int dot_limit;
+		int p2_slow, p2_fast;
+	} p2;
+};
+static const struct intel_limit intel_limits_i8xx_dac = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 2, .max = 33 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 2, .max = 33 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 1, .max = 6 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1400000, .max = 2800000 },
+	.n = { .min = 1, .max = 6 },
+	.m = { .min = 70, .max = 120 },
+	.m1 = { .min = 8, .max = 18 },
+	.m2 = { .min = 3, .max = 7 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1400000, .max = 2800000 },
+	.n = { .min = 1, .max = 6 },
+	.m = { .min = 70, .max = 120 },
+	.m1 = { .min = 8, .max = 18 },
+	.m2 = { .min = 3, .max = 7 },
+	.p = { .min = 7, .max = 98 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 4 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 10, .max = 30 },
+	.p1 = { .min = 1, .max = 3},
+	.p2 = { .dot_limit = 270000,
+		.p2_slow = 10,
+		.p2_fast = 10
+	},
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+	.dot = { .min = 22000, .max = 400000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 4 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 16, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8},
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+	.dot = { .min = 20000, .max = 115000 },
+	.vco = { .min = 1750000, .max = 3500000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 14, .p2_fast = 14
+	},
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+	.dot = { .min = 80000, .max = 224000 },
+	.vco = { .min = 1750000, .max = 3500000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 14, .max = 42 },
+	.p1 = { .min = 2, .max = 6 },
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 7, .p2_fast = 7
+	},
+};
+
+static const struct intel_limit pnv_limits_sdvo = {
+	.dot = { .min = 20000, .max = 400000},
+	.vco = { .min = 1700000, .max = 3500000 },
+	/* Pineview's Ncounter is a ring counter */
+	.n = { .min = 3, .max = 6 },
+	.m = { .min = 2, .max = 256 },
+	/* Pineview only has one combined m divider, which we treat as m2. */
+	.m1 = { .min = 0, .max = 0 },
+	.m2 = { .min = 0, .max = 254 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit pnv_limits_lvds = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1700000, .max = 3500000 },
+	.n = { .min = 3, .max = 6 },
+	.m = { .min = 2, .max = 256 },
+	.m1 = { .min = 0, .max = 0 },
+	.m2 = { .min = 0, .max = 254 },
+	.p = { .min = 7, .max = 112 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit ilk_limits_dac = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 5 },
+	.m = { .min = 79, .max = 127 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit ilk_limits_single_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 118 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 127 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 14, .max = 56 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit ilk_limits_single_lvds_100m = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 2 },
+	.m = { .min = 79, .max = 126 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 126 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 14, .max = 42 },
+	.p1 = { .min = 2, .max = 6 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+	 /*
+	  * These are the data rate limits (measured in fast clocks)
+	  * since those are the strictest limits we have. The fast
+	  * clock and actual rate limits are more relaxed, so checking
+	  * them would make no difference.
+	  */
+	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
+	.vco = { .min = 4000000, .max = 6000000 },
+	.n = { .min = 1, .max = 7 },
+	.m1 = { .min = 2, .max = 3 },
+	.m2 = { .min = 11, .max = 156 },
+	.p1 = { .min = 2, .max = 3 },
+	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+	/*
+	 * These are the data rate limits (measured in fast clocks)
+	 * since those are the strictest limits we have.  The fast
+	 * clock and actual rate limits are more relaxed, so checking
+	 * them would make no difference.
+	 */
+	.dot = { .min = 25000 * 5, .max = 540000 * 5},
+	.vco = { .min = 4800000, .max = 6480000 },
+	.n = { .min = 1, .max = 1 },
+	.m1 = { .min = 2, .max = 2 },
+	.m2 = { .min = 24 << 22, .max = 175 << 22 },
+	.p1 = { .min = 2, .max = 4 },
+	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+	/* FIXME: find real dot limits */
+	.dot = { .min = 0, .max = INT_MAX },
+	.vco = { .min = 4800000, .max = 6700000 },
+	.n = { .min = 1, .max = 1 },
+	.m1 = { .min = 2, .max = 2 },
+	/* FIXME: find real m2 limits */
+	.m2 = { .min = 2 << 22, .max = 255 << 22 },
+	.p1 = { .min = 2, .max = 4 },
+	.p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+	clock->m = clock->m2 + 2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+{
+	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+	clock->m = i9xx_dpll_compute_m(clock);
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
+}
+
+int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+					   clock->n << 22);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+			       const struct intel_limit *limit,
+			       const struct dpll *clock)
+{
+	if (clock->n < limit->n.min || limit->n.max < clock->n)
+		return false;
+	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+		return false;
+	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+		return false;
+	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+		return false;
+
+	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+		if (clock->m1 <= clock->m2)
+			return false;
+
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+	    !IS_GEN9_LP(dev_priv)) {
+		if (clock->p < limit->p.min || limit->p.max < clock->p)
+			return false;
+		if (clock->m < limit->m.min || limit->m.max < clock->m)
+			return false;
+	}
+
+	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+		return false;
+	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+	 * connector, etc., rather than just a single range.
+	 */
+	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+		return false;
+
+	return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+		   const struct intel_crtc_state *crtc_state,
+		   int target)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		/*
+		 * For LVDS just rely on its current settings for dual-channel.
+		 * We haven't figured out how to reliably set up different
+		 * single/dual channel state, if we even can.
+		 */
+		if (intel_is_dual_link_lvds(dev_priv))
+			return limit->p2.p2_fast;
+		else
+			return limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			return limit->p2.p2_slow;
+		else
+			return limit->p2.p2_fast;
+	}
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+		    struct intel_crtc_state *crtc_state,
+		    int target, int refclk, struct dpll *match_clock,
+		    struct dpll *best_clock)
+{
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
+	struct dpll clock;
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 <= limit->m2.max; clock.m2++) {
+			if (clock.m2 >= clock.m1)
+				break;
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max; clock.p1++) {
+					int this_err;
+
+					i9xx_calc_dpll_params(refclk, &clock);
+					if (!intel_pll_is_valid(to_i915(dev),
+								limit,
+								&clock))
+						continue;
+					if (match_clock &&
+					    clock.p != match_clock->p)
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, struct dpll *match_clock,
+		   struct dpll *best_clock)
+{
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
+	struct dpll clock;
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 <= limit->m2.max; clock.m2++) {
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max; clock.p1++) {
+					int this_err;
+
+					pnv_calc_dpll_params(refclk, &clock);
+					if (!intel_pll_is_valid(to_i915(dev),
+								limit,
+								&clock))
+						continue;
+					if (match_clock &&
+					    clock.p != match_clock->p)
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, struct dpll *match_clock,
+		   struct dpll *best_clock)
+{
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
+	struct dpll clock;
+	int max_n;
+	bool found = false;
+	/* approximately equals target * 0.00585 */
+	int err_most = (target >> 8) + (target >> 9);
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+	max_n = limit->n.max;
+	/* based on hardware requirement, prefer smaller n to precision */
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		/* based on hardware requirement, prefere larger m1,m2 */
+		for (clock.m1 = limit->m1.max;
+		     clock.m1 >= limit->m1.min; clock.m1--) {
+			for (clock.m2 = limit->m2.max;
+			     clock.m2 >= limit->m2.min; clock.m2--) {
+				for (clock.p1 = limit->p1.max;
+				     clock.p1 >= limit->p1.min; clock.p1--) {
+					int this_err;
+
+					i9xx_calc_dpll_params(refclk, &clock);
+					if (!intel_pll_is_valid(to_i915(dev),
+								limit,
+								&clock))
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err_most) {
+						*best_clock = clock;
+						err_most = this_err;
+						max_n = clock.n;
+						found = true;
+					}
+				}
+			}
+		}
+	}
+	return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+			       const struct dpll *calculated_clock,
+			       const struct dpll *best_clock,
+			       unsigned int best_error_ppm,
+			       unsigned int *error_ppm)
+{
+	/*
+	 * For CHV ignore the error and consider only the P value.
+	 * Prefer a bigger P value based on HW requirements.
+	 */
+	if (IS_CHERRYVIEW(to_i915(dev))) {
+		*error_ppm = 0;
+
+		return calculated_clock->p > best_clock->p;
+	}
+
+	if (drm_WARN_ON_ONCE(dev, !target_freq))
+		return false;
+
+	*error_ppm = div_u64(1000000ULL *
+				abs(target_freq - calculated_clock->dot),
+			     target_freq);
+	/*
+	 * Prefer a better P value over a better (smaller) error if the error
+	 * is small. Ensure this preference for future configurations too by
+	 * setting the error to 0.
+	 */
+	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+		*error_ppm = 0;
+
+		return true;
+	}
+
+	return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, struct dpll *match_clock,
+		   struct dpll *best_clock)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct dpll clock;
+	unsigned int bestppm = 1000000;
+	/* min update 19.2 MHz */
+	int max_n = min(limit->n.max, refclk / 19200);
+	bool found = false;
+
+	target *= 5; /* fast clock */
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	/* based on hardware requirement, prefer smaller n to precision */
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+				clock.p = clock.p1 * clock.p2;
+				/* based on hardware requirement, prefer bigger m1,m2 values */
+				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+					unsigned int ppm;
+
+					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+								     refclk * clock.m1);
+
+					vlv_calc_dpll_params(refclk, &clock);
+
+					if (!intel_pll_is_valid(to_i915(dev),
+								limit,
+								&clock))
+						continue;
+
+					if (!vlv_PLL_is_optimal(dev, target,
+								&clock,
+								best_clock,
+								bestppm, &ppm))
+						continue;
+
+					*best_clock = clock;
+					bestppm = ppm;
+					found = true;
+				}
+			}
+		}
+	}
+
+	return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, struct dpll *match_clock,
+		   struct dpll *best_clock)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_device *dev = crtc->base.dev;
+	unsigned int best_error_ppm;
+	struct dpll clock;
+	u64 m2;
+	int found = false;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	best_error_ppm = 1000000;
+
+	/*
+	 * Based on hardware doc, the n always set to 1, and m1 always
+	 * set to 2.  If requires to support 200Mhz refclk, we need to
+	 * revisit this because n may not 1 anymore.
+	 */
+	clock.n = 1;
+	clock.m1 = 2;
+	target *= 5;	/* fast clock */
+
+	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+		for (clock.p2 = limit->p2.p2_fast;
+				clock.p2 >= limit->p2.p2_slow;
+				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+			unsigned int error_ppm;
+
+			clock.p = clock.p1 * clock.p2;
+
+			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+						   refclk * clock.m1);
+
+			if (m2 > INT_MAX/clock.m1)
+				continue;
+
+			clock.m2 = m2;
+
+			chv_calc_dpll_params(refclk, &clock);
+
+			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+				continue;
+
+			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+						best_error_ppm, &error_ppm))
+				continue;
+
+			*best_clock = clock;
+			best_error_ppm = error_ppm;
+			found = true;
+		}
+	}
+
+	return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+			struct dpll *best_clock)
+{
+	int refclk = 100000;
+	const struct intel_limit *limit = &intel_limits_bxt;
+
+	return chv_find_best_dpll(limit, crtc_state,
+				  crtc_state->port_clock, refclk,
+				  NULL, best_clock);
+}
+
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+{
+	return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+				     struct intel_crtc_state *crtc_state,
+				     struct dpll *reduced_clock)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	u32 fp, fp2 = 0;
+
+	if (IS_PINEVIEW(dev_priv)) {
+		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+		if (reduced_clock)
+			fp2 = pnv_dpll_compute_fp(reduced_clock);
+	} else {
+		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+		if (reduced_clock)
+			fp2 = i9xx_dpll_compute_fp(reduced_clock);
+	}
+
+	crtc_state->dpll_hw_state.fp0 = fp;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+	    reduced_clock) {
+		crtc_state->dpll_hw_state.fp1 = fp2;
+	} else {
+		crtc_state->dpll_hw_state.fp1 = fp;
+	}
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      struct dpll *reduced_clock)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	u32 dpll;
+	struct dpll *clock = &crtc_state->dpll;
+
+	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+
+	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+		dpll |= (crtc_state->pixel_multiplier - 1)
+			<< SDVO_MULTIPLIER_SHIFT_HIRES;
+	}
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	if (intel_crtc_has_dp_encoder(crtc_state))
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	if (IS_PINEVIEW(dev_priv))
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+	else {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (IS_G4X(dev_priv) && reduced_clock)
+			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+	}
+	switch (clock->p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+	if (INTEL_GEN(dev_priv) >= 4)
+		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+	if (crtc_state->sdvo_tv_clock)
+		dpll |= PLL_REF_INPUT_TVCLKINBC;
+	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv))
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	crtc_state->dpll_hw_state.dpll = dpll;
+
+	if (INTEL_GEN(dev_priv) >= 4) {
+		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
+		crtc_state->dpll_hw_state.dpll_md = dpll_md;
+	}
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      struct dpll *reduced_clock)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	u32 dpll;
+	struct dpll *clock = &crtc_state->dpll;
+
+	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	} else {
+		if (clock->p1 == 2)
+			dpll |= PLL_P1_DIVIDE_BY_TWO;
+		else
+			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (clock->p2 == 4)
+			dpll |= PLL_P2_DIVIDE_BY_4;
+	}
+
+	/*
+	 * Bspec:
+	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
+	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+	 *  Enable) must be set to “1” in both the DPLL A Control Register
+	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+	 *
+	 * For simplicity We simply keep both bits always enabled in
+	 * both DPLLS. The spec says we should disable the DVO 2X clock
+	 * when not needed, but this seems to work fine in practice.
+	 */
+	if (IS_I830(dev_priv) ||
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+		dpll |= DPLL_DVO_2X_MODE;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+	    intel_panel_use_ssc(dev_priv))
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(crtc_state->uapi.state);
+
+	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+	    INTEL_GEN(dev_priv) >= 11) {
+		struct intel_encoder *encoder =
+			intel_get_crtc_new_encoder(state, crtc_state);
+
+		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "failed to find PLL for pipe %c\n",
+				    pipe_name(crtc->pipe));
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *crtc_state,
+			     struct dpll *reduced_clock)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	u32 dpll, fp, fp2;
+	int factor;
+
+	/* Enable autotuning of the PLL clock (if permissible) */
+	factor = 21;
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if ((intel_panel_use_ssc(dev_priv) &&
+		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
+		    (HAS_PCH_IBX(dev_priv) &&
+		     intel_is_dual_link_lvds(dev_priv)))
+			factor = 25;
+	} else if (crtc_state->sdvo_tv_clock) {
+		factor = 20;
+	}
+
+	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
+	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+		fp |= FP_CB_TUNE;
+
+	if (reduced_clock) {
+		fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+		if (reduced_clock->m < factor * reduced_clock->n)
+			fp2 |= FP_CB_TUNE;
+	} else {
+		fp2 = fp;
+	}
+
+	dpll = 0;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+
+	dpll |= (crtc_state->pixel_multiplier - 1)
+		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	if (intel_crtc_has_dp_encoder(crtc_state))
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	/*
+	 * The high speed IO clock is only really required for
+	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
+	 * possible to share the DPLL between CRT and HDMI. Enabling
+	 * the clock needlessly does no real harm, except use up a
+	 * bit of power potentially.
+	 *
+	 * We'll limit this to IVB with 3 pipes, since it has only two
+	 * DPLLs and so DPLL sharing is the only way to get three pipes
+	 * driving PCH ports at the same time. On SNB we could do this,
+	 * and potentially avoid enabling the second DPLL, but it's not
+	 * clear if it''s a win or loss power wise. No point in doing
+	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+	 */
+	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	/* also FPA1 */
+	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+	switch (crtc_state->dpll.p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+	    intel_panel_use_ssc(dev_priv))
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+
+	crtc_state->dpll_hw_state.dpll = dpll;
+	crtc_state->dpll_hw_state.fp0 = fp;
+	crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(crtc_state->uapi.state);
+	const struct intel_limit *limit;
+	int refclk = 120000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+	if (!crtc_state->has_pch_encoder)
+		return 0;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    dev_priv->vbt.lvds_ssc_freq);
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+		}
+
+		if (intel_is_dual_link_lvds(dev_priv)) {
+			if (refclk == 100000)
+				limit = &ilk_limits_dual_lvds_100m;
+			else
+				limit = &ilk_limits_dual_lvds;
+		} else {
+			if (refclk == 100000)
+				limit = &ilk_limits_single_lvds_100m;
+			else
+				limit = &ilk_limits_single_lvds;
+		}
+	} else {
+		limit = &ilk_limits_dac;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	ilk_compute_dpll(crtc, crtc_state, NULL);
+
+	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "failed to find PLL for pipe %c\n",
+			    pipe_name(crtc->pipe));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void vlv_compute_dpll(struct intel_crtc *crtc,
+		      struct intel_crtc_state *pipe_config)
+{
+	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+	if (crtc->pipe != PIPE_A)
+		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+	/* DPLL not used with DSI, but still need the rest set up */
+	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+			DPLL_EXT_BUFFER_ENABLE_VLV;
+
+	pipe_config->dpll_hw_state.dpll_md =
+		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+void chv_compute_dpll(struct intel_crtc *crtc,
+		      struct intel_crtc_state *pipe_config)
+{
+	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+	if (crtc->pipe != PIPE_A)
+		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+	/* DPLL not used with DSI, but still need the rest set up */
+	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+	pipe_config->dpll_hw_state.dpll_md =
+		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	int refclk = 100000;
+	const struct intel_limit *limit = &intel_limits_chv;
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (!crtc_state->clock_set &&
+	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	chv_compute_dpll(crtc, crtc_state);
+
+	return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	int refclk = 100000;
+	const struct intel_limit *limit = &intel_limits_vlv;
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (!crtc_state->clock_set &&
+	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	vlv_compute_dpll(crtc, crtc_state);
+
+	return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_limit *limit;
+	int refclk = 96000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
+		}
+
+		if (intel_is_dual_link_lvds(dev_priv))
+			limit = &intel_limits_g4x_dual_channel_lvds;
+		else
+			limit = &intel_limits_g4x_single_channel_lvds;
+	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+		limit = &intel_limits_g4x_hdmi;
+	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+		limit = &intel_limits_g4x_sdvo;
+	} else {
+		/* The option is for other outputs */
+		limit = &intel_limits_i9xx_sdvo;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct intel_limit *limit;
+	int refclk = 96000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
+		}
+
+		limit = &pnv_limits_lvds;
+	} else {
+		limit = &pnv_limits_sdvo;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+				   struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct intel_limit *limit;
+	int refclk = 96000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
+		}
+
+		limit = &intel_limits_i9xx_lvds;
+	} else {
+		limit = &intel_limits_i9xx_sdvo;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				 refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+				   struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct intel_limit *limit;
+	int refclk = 48000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
+		}
+
+		limit = &intel_limits_i8xx_lvds;
+	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+		limit = &intel_limits_i8xx_dvo;
+	} else {
+		limit = &intel_limits_i8xx_dac;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				 refclk, NULL, &crtc_state->dpll)) {
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+void
+intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_GEN(dev_priv) >= 9 || HAS_DDI(dev_priv))
+		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+	else if (HAS_PCH_SPLIT(dev_priv))
+		dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+	else if (IS_CHERRYVIEW(dev_priv))
+		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+	else if (IS_VALLEYVIEW(dev_priv))
+		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+	else if (IS_G4X(dev_priv))
+		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+	else if (IS_PINEVIEW(dev_priv))
+		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+	else if (!IS_GEN(dev_priv, 2))
+		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+	else
+		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
new file mode 100644
index 0000000000000000000000000000000000000000..caf4615092e1ef962e072a79ed9fc484ea44303e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DPLL_H_
+#define _INTEL_DPLL_H_
+
+struct dpll;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int vlv_calc_dpll_params(int refclk, struct dpll *clock);
+int pnv_calc_dpll_params(int refclk, struct dpll *clock);
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
+void vlv_compute_dpll(struct intel_crtc *crtc,
+		      struct intel_crtc_state *pipe_config);
+void chv_compute_dpll(struct intel_crtc *crtc,
+		      struct intel_crtc_state *pipe_config);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index b53c50372918f442871ca4ecab51fce3ebe17114..584c14c4cbd0e17f2c56e90bb5f7b01cf0a53572 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -43,7 +43,7 @@
 
 #define PANEL_PWM_MAX_VALUE		0xFF
 
-static u32 dcs_get_backlight(struct intel_connector *connector)
+static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct intel_encoder *encoder = intel_attached_encoder(connector);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -77,7 +77,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
 	}
 }
 
-static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
+static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
 	struct mipi_dsi_device *dsi_device;
@@ -111,10 +111,9 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
 }
 
 static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+				 const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
-	struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
 	struct mipi_dsi_device *dsi_device;
 	enum port port;
 
@@ -142,7 +141,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
 				   &cabc, sizeof(cabc));
 	}
 
-	dcs_set_backlight(conn_state, panel->backlight.level);
+	dcs_set_backlight(conn_state, level);
 }
 
 static int dcs_setup_backlight(struct intel_connector *connector,
@@ -156,6 +155,14 @@ static int dcs_setup_backlight(struct intel_connector *connector,
 	return 0;
 }
 
+static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+	.setup = dcs_setup_backlight,
+	.enable = dcs_enable_backlight,
+	.disable = dcs_disable_backlight,
+	.set = dcs_set_backlight,
+	.get = dcs_get_backlight,
+};
+
 int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
 {
 	struct drm_device *dev = intel_connector->base.dev;
@@ -169,11 +176,7 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
 	if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
 		return -EINVAL;
 
-	panel->backlight.setup = dcs_setup_backlight;
-	panel->backlight.enable = dcs_enable_backlight;
-	panel->backlight.disable = dcs_disable_backlight;
-	panel->backlight.set = dcs_set_backlight;
-	panel->backlight.get = dcs_get_backlight;
+	panel->backlight.funcs = &dcs_bl_funcs;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 237dbb1ba0eeb6c38c95e81e4902ede184b318ab..090cd76266c6ead398ea15e2c78d442c292b55d0 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -301,12 +301,8 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
 
-	/*I915_WRITE(DVOB_SRCDIM,
-	  (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
-	  (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
 	intel_de_write(dev_priv, dvo_srcdim_reg,
 		       (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
-	/*I915_WRITE(DVOB, dvo_val);*/
 	intel_de_write(dev_priv, dvo_reg, dvo_val);
 }
 
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a5b072816a7b21626eb1ea174a0aa5da20331812..5fd4fa4805ef2ba34b2664367c3092810c534a43 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -676,7 +676,7 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 }
 
 static bool tiling_is_valid(struct drm_i915_private *dev_priv,
-			    uint64_t modifier)
+			    u64 modifier)
 {
 	switch (modifier) {
 	case DRM_FORMAT_MOD_LINEAR:
@@ -742,6 +742,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 		cache->fence_id = plane_state->vma->fence->id;
 	else
 		cache->fence_id = -1;
+
+	cache->psr2_active = crtc_state->has_psr2;
 }
 
 static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
@@ -914,6 +916,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 		return false;
 	}
 
+	/*
+	 * Tigerlake is not supporting FBC with PSR2.
+	 * Recommendation is to keep this combination disabled
+	 * Bspec: 50422 HSD: 14010260002
+	 */
+	if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) {
+		fbc->no_fbc_reason = "not supported with PSR2";
+		return false;
+	}
+
 	return true;
 }
 
@@ -1433,13 +1445,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
 	if (!HAS_FBC(dev_priv))
 		return 0;
 
-	/*
-	 * Fbc is causing random underruns in CI execution on TGL platforms.
-	 * Disabling the same while the problem is being debugged and analyzed.
-	 */
-	if (IS_TIGERLAKE(dev_priv))
-		return 0;
-
 	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
 		return 1;
 
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 842c04e6321472490a0e5b7f9a6af500764b039b..84f853f113b953abdf9c410fe9915ffaa8b6832c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -256,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	 * If the object is stolen however, it will be full of whatever
 	 * garbage was left in there.
 	 */
-	if (vma->obj->stolen && !prealloc)
+	if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -595,7 +595,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 	 * full of whatever garbage was left in there.
 	 */
 	if (state == FBINFO_STATE_RUNNING &&
-	    intel_fb_obj(&ifbdev->fb->base)->stolen)
+	    !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
new file mode 100644
index 0000000000000000000000000000000000000000..b2eb96ae10a237422530e2d3d23867f28ba8975c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+
+/* units of 100MHz */
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
+		return crtc_state->fdi_lanes;
+
+	return 0;
+}
+
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+			       struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_atomic_state *state = pipe_config->uapi.state;
+	struct intel_crtc *other_crtc;
+	struct intel_crtc_state *other_crtc_state;
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "checking fdi config on pipe %c, lanes %i\n",
+		    pipe_name(pipe), pipe_config->fdi_lanes);
+	if (pipe_config->fdi_lanes > 4) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "invalid fdi lane config on pipe %c: %i lanes\n",
+			    pipe_name(pipe), pipe_config->fdi_lanes);
+		return -EINVAL;
+	}
+
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		if (pipe_config->fdi_lanes > 2) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "only 2 lanes on haswell, required: %i lanes\n",
+				    pipe_config->fdi_lanes);
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
+
+	if (INTEL_NUM_PIPES(dev_priv) == 2)
+		return 0;
+
+	/* Ivybridge 3 pipe is really complicated */
+	switch (pipe) {
+	case PIPE_A:
+		return 0;
+	case PIPE_B:
+		if (pipe_config->fdi_lanes <= 2)
+			return 0;
+
+		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+		other_crtc_state =
+			intel_atomic_get_crtc_state(state, other_crtc);
+		if (IS_ERR(other_crtc_state))
+			return PTR_ERR(other_crtc_state);
+
+		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
+				    pipe_name(pipe), pipe_config->fdi_lanes);
+			return -EINVAL;
+		}
+		return 0;
+	case PIPE_C:
+		if (pipe_config->fdi_lanes > 2) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "only 2 lanes on pipe %c: required %i lanes\n",
+				    pipe_name(pipe), pipe_config->fdi_lanes);
+			return -EINVAL;
+		}
+
+		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+		other_crtc_state =
+			intel_atomic_get_crtc_state(state, other_crtc);
+		if (IS_ERR(other_crtc_state))
+			return PTR_ERR(other_crtc_state);
+
+		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "fdi link B uses too many lanes to enable link C\n");
+			return -EINVAL;
+		}
+		return 0;
+	default:
+		BUG();
+	}
+}
+
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+				  struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+	int lane, link_bw, fdi_dotclock, ret;
+	bool needs_recompute = false;
+
+retry:
+	/* FDI is a binary signal running at ~2.7GHz, encoding
+	 * each output octet as 10 bits. The actual frequency
+	 * is stored as a divider into a 100MHz clock, and the
+	 * mode pixel clock is stored in units of 1KHz.
+	 * Hence the bw of each lane in terms of the mode signal
+	 * is:
+	 */
+	link_bw = intel_fdi_link_freq(i915, pipe_config);
+
+	fdi_dotclock = adjusted_mode->crtc_clock;
+
+	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+				      pipe_config->pipe_bpp);
+
+	pipe_config->fdi_lanes = lane;
+
+	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+			       link_bw, &pipe_config->fdi_m_n, false, false);
+
+	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+	if (ret == -EDEADLK)
+		return ret;
+
+	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+		pipe_config->pipe_bpp -= 2*3;
+		drm_dbg_kms(&i915->drm,
+			    "fdi link bw constraint, reducing pipe bpp to %i\n",
+			    pipe_config->pipe_bpp);
+		needs_recompute = true;
+		pipe_config->bw_constrained = true;
+
+		goto retry;
+	}
+
+	if (needs_recompute)
+		return I915_DISPLAY_CONFIG_RETRY;
+
+	return ret;
+}
+
+void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t reg;
+	u32 temp;
+
+	/* enable normal train */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	if (IS_IVYBRIDGE(dev_priv)) {
+		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+	}
+	intel_de_write(dev_priv, reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	if (HAS_PCH_CPT(dev_priv)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE;
+	}
+	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+	/* wait one idle pattern time */
+	intel_de_posting_read(dev_priv, reg);
+	udelay(1000);
+
+	/* IVB wants error correction enabled */
+	if (IS_IVYBRIDGE(dev_priv))
+		intel_de_write(dev_priv, reg,
+			       intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+			       const struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t reg;
+	u32 temp, tries;
+
+	/* FDI needs bits from pipe first */
+	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	intel_de_write(dev_priv, reg, temp);
+	intel_de_read(dev_priv, reg);
+	udelay(150);
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(150);
+
+	/* Ironlake workaround, enable clock pointer after FDI enable*/
+	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+		       FDI_RX_PHASE_SYNC_POINTER_OVR);
+	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
+
+	reg = FDI_RX_IIR(pipe);
+	for (tries = 0; tries < 5; tries++) {
+		temp = intel_de_read(dev_priv, reg);
+		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+		if ((temp & FDI_RX_BIT_LOCK)) {
+			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+			break;
+		}
+	}
+	if (tries == 5)
+		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	intel_de_write(dev_priv, reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	intel_de_write(dev_priv, reg, temp);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(150);
+
+	reg = FDI_RX_IIR(pipe);
+	for (tries = 0; tries < 5; tries++) {
+		temp = intel_de_read(dev_priv, reg);
+		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_SYMBOL_LOCK) {
+			intel_de_write(dev_priv, reg,
+				       temp | FDI_RX_SYMBOL_LOCK);
+			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+			break;
+		}
+	}
+	if (tries == 5)
+		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+				const struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t reg;
+	u32 temp, i, retry;
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	intel_de_write(dev_priv, reg, temp);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(150);
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+	/* SNB-B */
+	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	if (HAS_PCH_CPT(dev_priv)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		intel_de_write(dev_priv, reg, temp);
+
+		intel_de_posting_read(dev_priv, reg);
+		udelay(500);
+
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+			if (temp & FDI_RX_BIT_LOCK) {
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_BIT_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 1 done.\n");
+				break;
+			}
+			udelay(50);
+		}
+		if (retry < 5)
+			break;
+	}
+	if (i == 4)
+		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	if (IS_GEN(dev_priv, 6)) {
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		/* SNB-B */
+		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	}
+	intel_de_write(dev_priv, reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	if (HAS_PCH_CPT(dev_priv)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_2;
+	}
+	intel_de_write(dev_priv, reg, temp);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		intel_de_write(dev_priv, reg, temp);
+
+		intel_de_posting_read(dev_priv, reg);
+		udelay(500);
+
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+			if (temp & FDI_RX_SYMBOL_LOCK) {
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_SYMBOL_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 2 done.\n");
+				break;
+			}
+			udelay(50);
+		}
+		if (retry < 5)
+			break;
+	}
+	if (i == 4)
+		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+				      const struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t reg;
+	u32 temp, i, j;
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	intel_de_write(dev_priv, reg, temp);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(150);
+
+	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+
+	/* Try each vswing and preemphasis setting twice before moving on */
+	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+		/* disable first in case we need to retry */
+		reg = FDI_TX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+		temp &= ~FDI_TX_ENABLE;
+		intel_de_write(dev_priv, reg, temp);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~FDI_LINK_TRAIN_AUTO;
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp &= ~FDI_RX_ENABLE;
+		intel_de_write(dev_priv, reg, temp);
+
+		/* enable CPU FDI TX and PCH FDI RX */
+		reg = FDI_TX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~FDI_DP_PORT_WIDTH_MASK;
+		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[j/2];
+		temp |= FDI_COMPOSITE_SYNC;
+		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+		temp |= FDI_COMPOSITE_SYNC;
+		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+		intel_de_posting_read(dev_priv, reg);
+		udelay(1); /* should be 0.5us */
+
+		for (i = 0; i < 4; i++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+			if (temp & FDI_RX_BIT_LOCK ||
+			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_BIT_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 1 done, level %i.\n",
+					    i);
+				break;
+			}
+			udelay(1); /* should be 0.5us */
+		}
+		if (i == 4) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "FDI train 1 fail on vswing %d\n", j / 2);
+			continue;
+		}
+
+		/* Train 2 */
+		reg = FDI_TX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+		intel_de_write(dev_priv, reg, temp);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = intel_de_read(dev_priv, reg);
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+		intel_de_write(dev_priv, reg, temp);
+
+		intel_de_posting_read(dev_priv, reg);
+		udelay(2); /* should be 1.5us */
+
+		for (i = 0; i < 4; i++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+			if (temp & FDI_RX_SYMBOL_LOCK ||
+			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_SYMBOL_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 2 done, level %i.\n",
+					    i);
+				goto train_done;
+			}
+			udelay(2); /* should be 1.5us */
+		}
+		if (i == 4)
+			drm_dbg_kms(&dev_priv->drm,
+				    "FDI train 2 fail on vswing %d\n", j / 2);
+	}
+
+train_done:
+	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	enum pipe pipe = intel_crtc->pipe;
+	i915_reg_t reg;
+	u32 temp;
+
+	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(200);
+
+	/* Switch from Rawclk to PCDclk */
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(200);
+
+	/* Enable CPU FDI TX PLL, always on for Ironlake */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+
+		intel_de_posting_read(dev_priv, reg);
+		udelay(100);
+	}
+}
+
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe = intel_crtc->pipe;
+	i915_reg_t reg;
+	u32 temp;
+
+	/* Switch from PCDclk to Rawclk */
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+
+	/* Disable CPU FDI TX PLL */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(100);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
+
+	/* Wait for the clocks to turn off. */
+	intel_de_posting_read(dev_priv, reg);
+	udelay(100);
+}
+
+void ilk_fdi_disable(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t reg;
+	u32 temp;
+
+	/* disable CPU FDI tx and PCH FDI rx */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+	intel_de_posting_read(dev_priv, reg);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~(0x7 << 16);
+	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(100);
+
+	/* Ironlake workaround, disable clock pointer after downing FDI */
+	if (HAS_PCH_IBX(dev_priv))
+		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+			       FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+	/* still set train pattern 1 */
+	reg = FDI_TX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	intel_de_write(dev_priv, reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = intel_de_read(dev_priv, reg);
+	if (HAS_PCH_CPT(dev_priv)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	/* BPC in FDI rx is consistent with that in PIPECONF */
+	temp &= ~(0x07 << 16);
+	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	intel_de_write(dev_priv, reg, temp);
+
+	intel_de_posting_read(dev_priv, reg);
+	udelay(100);
+}
+
+void
+intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+{
+	if (IS_GEN(dev_priv, 5)) {
+		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+	} else if (IS_GEN(dev_priv, 6)) {
+		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+	} else if (IS_IVYBRIDGE(dev_priv)) {
+		/* FIXME: detect B0+ stepping and use auto training */
+		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+	}
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
new file mode 100644
index 0000000000000000000000000000000000000000..a9cd21663eb8987ad2432b28cb633fe585331998
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_FDI_H_
+#define _INTEL_FDI_H_
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+#define I915_DISPLAY_CONFIG_RETRY 1
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+			   struct intel_crtc_state *pipe_config);
+void intel_fdi_normal_train(struct intel_crtc *crtc);
+void ilk_fdi_disable(struct intel_crtc *crtc);
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
+void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index d898b370d7a4faa5f9968fee3dd744de69bc7f18..7b38eee9980f19c5a85aa6659313ac6eeb063f94 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -225,8 +225,10 @@ static void frontbuffer_release(struct kref *ref)
 	struct i915_vma *vma;
 
 	spin_lock(&obj->vma.lock);
-	for_each_ggtt_vma(vma, obj)
+	for_each_ggtt_vma(vma, obj) {
+		i915_vma_clear_scanout(vma);
 		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+	}
 	spin_unlock(&obj->vma.lock);
 
 	RCU_INIT_POINTER(obj->frontbuffer, NULL);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b9d8825e2bb12f397ea31cee5b2288d6995a0fd0..ae1371c36a32d85341775169159f7cece7619cbd 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -15,6 +15,7 @@
 #include <drm/drm_hdcp.h>
 #include <drm/i915_component.h>
 
+#include "i915_drv.h"
 #include "i915_reg.h"
 #include "intel_display_power.h"
 #include "intel_display_types.h"
@@ -23,9 +24,78 @@
 #include "intel_connector.h"
 
 #define KEY_LOAD_TRIES	5
-#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
 #define HDCP2_LC_RETRY_CNT			3
 
+static int intel_conn_to_vcpi(struct intel_connector *connector)
+{
+	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+	return connector->port	? connector->port->vcpi.vcpi : 0;
+}
+
+/*
+ * intel_hdcp_required_content_stream selects the most highest common possible HDCP
+ * content_type for all streams in DP MST topology because security f/w doesn't
+ * have any provision to mark content_type for each stream separately, it marks
+ * all available streams with the content_type proivided at the time of port
+ * authentication. This may prohibit the userspace to use type1 content on
+ * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
+ * DP MST topology. Though it is not compulsory, security fw should change its
+ * policy to mark different content_types for different streams.
+ */
+static int
+intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+{
+	struct drm_connector_list_iter conn_iter;
+	struct intel_digital_port *conn_dig_port;
+	struct intel_connector *connector;
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+	bool enforce_type0 = false;
+	int k;
+
+	data->k = 0;
+
+	if (dig_port->hdcp_auth_status)
+		return 0;
+
+	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		if (connector->base.status == connector_status_disconnected)
+			continue;
+
+		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+			continue;
+
+		conn_dig_port = intel_attached_dig_port(connector);
+		if (conn_dig_port != dig_port)
+			continue;
+
+		if (!enforce_type0 && !intel_hdcp2_capable(connector))
+			enforce_type0 = true;
+
+		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
+		data->k++;
+
+		/* if there is only one active stream */
+		if (dig_port->dp.active_mst_links <= 1)
+			break;
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+		return -EINVAL;
+
+	/*
+	 * Apply common protection level across all streams in DP MST Topology.
+	 * Use highest supported content type for all streams in DP MST Topology.
+	 */
+	for (k = 0; k < data->k; k++)
+		data->streams[k].stream_type =
+			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+	return 0;
+}
+
 static
 bool intel_hdcp_is_ksv_valid(u8 *ksv)
 {
@@ -762,15 +832,22 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 	if (intel_de_wait_for_set(dev_priv,
 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 				  HDCP_STATUS_ENC,
-				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
 		return -ETIMEDOUT;
 	}
 
-	/*
-	 * XXX: If we have MST-connected devices, we need to enable encryption
-	 * on those as well.
-	 */
+	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
+	if (shim->stream_encryption) {
+		ret = shim->stream_encryption(connector, true);
+		if (ret) {
+			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
+				connector->base.name, connector->base.base.id);
+			return ret;
+		}
+		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+			    transcoder_name(hdcp->stream_transcoder));
+	}
 
 	if (repeater_present)
 		return intel_hdcp_auth_downstream(connector);
@@ -792,24 +869,29 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
 		    connector->base.name, connector->base.base.id);
 
-	/*
-	 * If there are other connectors on this port using HDCP, don't disable
-	 * it. Instead, toggle the HDCP signalling off on that particular
-	 * connector/pipe and exit.
-	 */
-	if (dig_port->num_hdcp_streams > 0) {
-		ret = hdcp->shim->toggle_signalling(dig_port,
-						    cpu_transcoder, false);
-		if (ret)
-			DRM_ERROR("Failed to disable HDCP signalling\n");
-		return ret;
+	if (hdcp->shim->stream_encryption) {
+		ret = hdcp->shim->stream_encryption(connector, false);
+		if (ret) {
+			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
+				connector->base.name, connector->base.base.id);
+			return ret;
+		}
+		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+			    transcoder_name(hdcp->stream_transcoder));
+		/*
+		 * If there are other connectors on this port using HDCP,
+		 * don't disable it until it disabled HDCP encryption for
+		 * all connectors in MST topology.
+		 */
+		if (dig_port->num_hdcp_streams > 0)
+			return 0;
 	}
 
 	hdcp->hdcp_encrypted = false;
 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
 	if (intel_de_wait_for_clear(dev_priv,
 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
-				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 		drm_err(&dev_priv->drm,
 			"Failed to disable HDCP, timeout clearing status\n");
 		return -ETIMEDOUT;
@@ -1014,7 +1096,8 @@ static int
 hdcp2_prepare_ake_init(struct intel_connector *connector,
 		       struct hdcp2_ake_init *ake_data)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1043,7 +1126,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
 				struct hdcp2_ake_no_stored_km *ek_pub_km,
 				size_t *msg_sz)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1070,7 +1154,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
 static int hdcp2_verify_hprime(struct intel_connector *connector,
 			       struct hdcp2_ake_send_hprime *rx_hprime)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1095,7 +1180,8 @@ static int
 hdcp2_store_pairing_info(struct intel_connector *connector,
 			 struct hdcp2_ake_send_pairing_info *pairing_info)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1121,7 +1207,8 @@ static int
 hdcp2_prepare_lc_init(struct intel_connector *connector,
 		      struct hdcp2_lc_init *lc_init)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1147,7 +1234,8 @@ static int
 hdcp2_verify_lprime(struct intel_connector *connector,
 		    struct hdcp2_lc_send_lprime *rx_lprime)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1172,7 +1260,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
 static int hdcp2_prepare_skey(struct intel_connector *connector,
 			      struct hdcp2_ske_send_eks *ske_data)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1200,7 +1289,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
 								*rep_topology,
 				      struct hdcp2_rep_send_ack *rep_send_ack)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1228,7 +1318,8 @@ static int
 hdcp2_verify_mprime(struct intel_connector *connector,
 		    struct hdcp2_rep_stream_ready *stream_ready)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1251,7 +1342,8 @@ hdcp2_verify_mprime(struct intel_connector *connector,
 
 static int hdcp2_authenticate_port(struct intel_connector *connector)
 {
-	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1275,6 +1367,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
 
 static int hdcp2_close_mei_session(struct intel_connector *connector)
 {
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct i915_hdcp_comp_master *comp;
 	int ret;
@@ -1288,7 +1381,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
 	}
 
 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
-					     &connector->hdcp.port_data);
+					     &dig_port->hdcp_port_data);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1448,13 +1541,14 @@ static
 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 {
 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	union {
 		struct hdcp2_rep_stream_manage stream_manage;
 		struct hdcp2_rep_stream_ready stream_ready;
 	} msgs;
 	const struct intel_hdcp_shim *shim = hdcp->shim;
-	int ret;
+	int ret, streams_size_delta, i;
 
 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
 		return -ERANGE;
@@ -1463,16 +1557,18 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
 
-	/* K no of streams is fixed as 1. Stored as big-endian. */
-	msgs.stream_manage.k = cpu_to_be16(1);
+	msgs.stream_manage.k = cpu_to_be16(data->k);
 
-	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
-	msgs.stream_manage.streams[0].stream_id = 0;
-	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+	for (i = 0; i < data->k; i++) {
+		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
+		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
+	}
 
+	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
+				sizeof(struct hdcp2_streamid_type);
 	/* Send it to Repeater */
 	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
-				  sizeof(msgs.stream_manage));
+				  sizeof(msgs.stream_manage) - streams_size_delta);
 	if (ret < 0)
 		goto out;
 
@@ -1481,8 +1577,8 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 	if (ret < 0)
 		goto out;
 
-	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
-	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+	data->seq_num_m = hdcp->seq_num_m;
+
 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
 
 out:
@@ -1606,6 +1702,36 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
 	return ret;
 }
 
+static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
+{
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+	enum port port = dig_port->base.port;
+	int ret = 0;
+
+	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+			    LINK_ENCRYPTION_STATUS)) {
+		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
+			connector->base.name, connector->base.base.id);
+		return -EPERM;
+	}
+
+	if (hdcp->shim->stream_2_2_encryption) {
+		ret = hdcp->shim->stream_2_2_encryption(connector, true);
+		if (ret) {
+			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
+				connector->base.name, connector->base.base.id);
+			return ret;
+		}
+		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+			    transcoder_name(hdcp->stream_transcoder));
+	}
+
+	return ret;
+}
+
 static int hdcp2_enable_encryption(struct intel_connector *connector)
 {
 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1641,7 +1767,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
 						 port),
 				    LINK_ENCRYPTION_STATUS,
-				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+	dig_port->hdcp_auth_status = true;
 
 	return ret;
 }
@@ -1665,7 +1792,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
 						   port),
 				      LINK_ENCRYPTION_STATUS,
-				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
 	if (ret == -ETIMEDOUT)
 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
 
@@ -1714,11 +1841,11 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 
 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 {
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
-	struct intel_hdcp *hdcp = &connector->hdcp;
-	int ret, i, tries = 3;
+	int ret = 0, i, tries = 3;
 
-	for (i = 0; i < tries; i++) {
+	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
 		ret = hdcp2_authenticate_sink(connector);
 		if (!ret) {
 			ret = hdcp2_propagate_stream_management_info(connector);
@@ -1728,8 +1855,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 					    ret);
 				break;
 			}
-			hdcp->port_data.streams[0].stream_type =
-							hdcp->content_type;
+
 			ret = hdcp2_authenticate_port(connector);
 			if (!ret)
 				break;
@@ -1744,7 +1870,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
 	}
 
-	if (!ret) {
+	if (!ret && !dig_port->hdcp_auth_status) {
 		/*
 		 * Ensuring the required 200mSec min time interval between
 		 * Session Key Exchange and encryption.
@@ -1759,12 +1885,16 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 		}
 	}
 
+	ret = hdcp2_enable_stream_encryption(connector);
+
 	return ret;
 }
 
 static int _intel_hdcp2_enable(struct intel_connector *connector)
 {
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
@@ -1772,6 +1902,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
 		    connector->base.name, connector->base.base.id,
 		    hdcp->content_type);
 
+	/* Stream which requires encryption */
+	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+		data->k = 1;
+		data->streams[0].stream_type = hdcp->content_type;
+	} else {
+		ret = intel_hdcp_required_content_stream(dig_port);
+		if (ret)
+			return ret;
+	}
+
 	ret = hdcp2_authenticate_and_encrypt(connector);
 	if (ret) {
 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
@@ -1789,18 +1929,37 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
 
 static int _intel_hdcp2_disable(struct intel_connector *connector)
 {
+	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
 		    connector->base.name, connector->base.base.id);
 
+	if (hdcp->shim->stream_2_2_encryption) {
+		ret = hdcp->shim->stream_2_2_encryption(connector, false);
+		if (ret) {
+			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
+				connector->base.name, connector->base.base.id);
+			return ret;
+		}
+		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+			    transcoder_name(hdcp->stream_transcoder));
+
+		if (dig_port->num_hdcp_streams > 0)
+			return 0;
+	}
+
 	ret = hdcp2_disable_encryption(connector);
 
 	if (hdcp2_deauthenticate_port(connector) < 0)
 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
 
 	connector->hdcp.hdcp2_encrypted = false;
+	dig_port->hdcp_auth_status = false;
+	data->k = 0;
 
 	return ret;
 }
@@ -1816,6 +1975,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 	int ret = 0;
 
 	mutex_lock(&hdcp->mutex);
+	mutex_lock(&dig_port->hdcp_mutex);
 	cpu_transcoder = hdcp->cpu_transcoder;
 
 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -1837,7 +1997,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 		goto out;
 	}
 
-	ret = hdcp->shim->check_2_2_link(dig_port);
+	ret = hdcp->shim->check_2_2_link(dig_port, connector);
 	if (ret == HDCP_LINK_PROTECTED) {
 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
 			intel_hdcp_update_value(connector,
@@ -1893,6 +2053,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 	}
 
 out:
+	mutex_unlock(&dig_port->hdcp_mutex);
 	mutex_unlock(&hdcp->mutex);
 	return ret;
 }
@@ -1968,12 +2129,13 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
 }
 
 static int initialize_hdcp_port_data(struct intel_connector *connector,
-				     enum port port,
+				     struct intel_digital_port *dig_port,
 				     const struct intel_hdcp_shim *shim)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct hdcp_port_data *data = &hdcp->port_data;
+	enum port port = dig_port->base.port;
 
 	if (INTEL_GEN(dev_priv) < 12)
 		data->fw_ddi = intel_get_mei_fw_ddi_index(port);
@@ -1994,16 +2156,15 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
 	data->protocol = (u8)shim->protocol;
 
-	data->k = 1;
 	if (!data->streams)
-		data->streams = kcalloc(data->k,
+		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
 					sizeof(struct hdcp2_streamid_type),
 					GFP_KERNEL);
 	if (!data->streams) {
 		drm_err(&dev_priv->drm, "Out of Memory\n");
 		return -ENOMEM;
 	}
-
+	/* For SST */
 	data->streams[0].stream_id = 0;
 	data->streams[0].stream_type = hdcp->content_type;
 
@@ -2046,14 +2207,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
+static void intel_hdcp2_init(struct intel_connector *connector,
+			     struct intel_digital_port *dig_port,
 			     const struct intel_hdcp_shim *shim)
 {
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
-	ret = initialize_hdcp_port_data(connector, port, shim);
+	ret = initialize_hdcp_port_data(connector, dig_port, shim);
 	if (ret) {
 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
 		return;
@@ -2063,7 +2225,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
 }
 
 int intel_hdcp_init(struct intel_connector *connector,
-		    enum port port,
+		    struct intel_digital_port *dig_port,
 		    const struct intel_hdcp_shim *shim)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2073,15 +2235,15 @@ int intel_hdcp_init(struct intel_connector *connector,
 	if (!shim)
 		return -EINVAL;
 
-	if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
-		intel_hdcp2_init(connector, port, shim);
+	if (is_hdcp2_supported(dev_priv))
+		intel_hdcp2_init(connector, dig_port, shim);
 
 	ret =
 	drm_connector_attach_content_protection_property(&connector->base,
 							 hdcp->hdcp2_supported);
 	if (ret) {
 		hdcp->hdcp2_supported = false;
-		kfree(hdcp->port_data.streams);
+		kfree(dig_port->hdcp_port_data.streams);
 		return ret;
 	}
 
@@ -2095,7 +2257,7 @@ int intel_hdcp_init(struct intel_connector *connector,
 }
 
 int intel_hdcp_enable(struct intel_connector *connector,
-		      enum transcoder cpu_transcoder, u8 content_type)
+		      const struct intel_crtc_state *pipe_config, u8 content_type)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2106,15 +2268,28 @@ int intel_hdcp_enable(struct intel_connector *connector,
 	if (!hdcp->shim)
 		return -ENOENT;
 
+	if (!connector->encoder) {
+		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
+			connector->base.name, connector->base.base.id);
+		return -ENODEV;
+	}
+
 	mutex_lock(&hdcp->mutex);
 	mutex_lock(&dig_port->hdcp_mutex);
 	drm_WARN_ON(&dev_priv->drm,
 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
 	hdcp->content_type = content_type;
-	hdcp->cpu_transcoder = cpu_transcoder;
+
+	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
+		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
+		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
+	} else {
+		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
+		hdcp->stream_transcoder = INVALID_TRANSCODER;
+	}
 
 	if (INTEL_GEN(dev_priv) >= 12)
-		hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+		dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
 
 	/*
 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2234,7 +2409,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
 
 	if (desired_and_not_enabled || content_protection_type_changed)
 		intel_hdcp_enable(connector,
-				  crtc_state->cpu_transcoder,
+				  crtc_state,
 				  (u8)conn_state->hdcp_content_type);
 }
 
@@ -2284,7 +2459,6 @@ void intel_hdcp_cleanup(struct intel_connector *connector)
 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
 
 	mutex_lock(&hdcp->mutex);
-	kfree(hdcp->port_data.streams);
 	hdcp->shim = NULL;
 	mutex_unlock(&hdcp->mutex);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1bbf5b67ed0af65fbb87643f2acf503f4d61db50..8f53b0c7fe5cfb37dadd4d7511c2db666b48af8e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,6 +8,8 @@
 
 #include <linux/types.h>
 
+#define HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
+
 struct drm_connector;
 struct drm_connector_state;
 struct drm_i915_private;
@@ -16,16 +18,18 @@ struct intel_connector;
 struct intel_crtc_state;
 struct intel_encoder;
 struct intel_hdcp_shim;
+struct intel_digital_port;
 enum port;
 enum transcoder;
 
 void intel_hdcp_atomic_check(struct drm_connector *connector,
 			     struct drm_connector_state *old_state,
 			     struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector, enum port port,
+int intel_hdcp_init(struct intel_connector *connector,
+		    struct intel_digital_port *dig_port,
 		    const struct intel_hdcp_shim *hdcp_shim);
 int intel_hdcp_enable(struct intel_connector *connector,
-		      enum transcoder cpu_transcoder, u8 content_type);
+		      const struct intel_crtc_state *pipe_config, u8 content_type);
 int intel_hdcp_disable(struct intel_connector *connector);
 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
 			    struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 82674a8853c606e24f7b392786948fe1dcad4c6b..95919d325b0b8b7d2efa6a49f64691ed87354b18 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -518,10 +518,10 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
 		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void hsw_write_infoframe(struct intel_encoder *encoder,
-				const struct intel_crtc_state *crtc_state,
-				unsigned int type,
-				const void *frame, ssize_t len)
+void hsw_write_infoframe(struct intel_encoder *encoder,
+			 const struct intel_crtc_state *crtc_state,
+			 unsigned int type,
+			 const void *frame, ssize_t len)
 {
 	const u32 *data = frame;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -555,10 +555,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
 	intel_de_posting_read(dev_priv, ctl_reg);
 }
 
-static void hsw_read_infoframe(struct intel_encoder *encoder,
-			       const struct intel_crtc_state *crtc_state,
-			       unsigned int type,
-			       void *frame, ssize_t len)
+void hsw_read_infoframe(struct intel_encoder *encoder,
+			const struct intel_crtc_state *crtc_state,
+			unsigned int type, void *frame, ssize_t len)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1495,15 +1494,16 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
 		usleep_range(25, 50);
 	}
 
-	ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
-					       false);
+	ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+					 false, TRANS_DDI_HDCP_SIGNALLING);
 	if (ret) {
 		drm_err(&dev_priv->drm,
 			"Disable HDCP signalling failed (%d)\n", ret);
 		return ret;
 	}
-	ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
-					       true);
+
+	ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+					 true, TRANS_DDI_HDCP_SIGNALLING);
 	if (ret) {
 		drm_err(&dev_priv->drm,
 			"Enable HDCP signalling failed (%d)\n", ret);
@@ -1526,8 +1526,9 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
 	if (!enable)
 		usleep_range(6, 60); /* Bspec says >= 6us */
 
-	ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
-					       enable);
+	ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+					 cpu_transcoder, enable,
+					 TRANS_DDI_HDCP_SIGNALLING);
 	if (ret) {
 		drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
 			enable ? "Enable" : "Disable", ret);
@@ -1732,7 +1733,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
 }
 
 static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
+				struct intel_connector *connector)
 {
 	u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
 	int ret;
@@ -2216,7 +2218,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
 					  has_hdmi_sink))
 		return MODE_CLOCK_HIGH;
 
-	/* BXT DPLL can't generate 223-240 MHz */
+	/* GLK DPLL can't generate 446-480 MHz */
+	if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+		return MODE_CLOCK_RANGE;
+
+	/* BXT/GLK DPLL can't generate 223-240 MHz */
 	if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
 		return MODE_CLOCK_RANGE;
 
@@ -2950,21 +2956,12 @@ static void
 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_digital_port *dig_port =
-				hdmi_to_dig_port(intel_hdmi);
 
 	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
 	intel_attach_aspect_ratio_property(connector);
 
-	/*
-	 * Attach Colorspace property for Non LSPCON based device
-	 * ToDo: This needs to be extended for LSPCON implementation
-	 * as well. Will be implemented separately.
-	 */
-	if (!dig_port->lspcon.active)
-		intel_attach_colorspace_property(connector);
-
+	intel_attach_hdmi_colorspace_property(connector);
 	drm_connector_attach_content_type_property(connector);
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -3300,7 +3297,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
 	intel_hdmi->attached_connector = intel_connector;
 
 	if (is_hdcp_supported(dev_priv, port)) {
-		int ret = intel_hdcp_init(intel_connector, port,
+		int ret = intel_hdcp_init(intel_connector, dig_port,
 					  &intel_hdmi_hdcp_shim);
 		if (ret)
 			drm_dbg_kms(&dev_priv->drm,
@@ -3438,3 +3435,236 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
 	dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 	intel_hdmi_init_connector(dig_port, intel_connector);
 }
+
+/*
+ * intel_hdmi_dsc_get_slice_height - get the dsc slice_height
+ * @vactive: Vactive of a display mode
+ *
+ * @return: appropriate dsc slice height for a given mode.
+ */
+int intel_hdmi_dsc_get_slice_height(int vactive)
+{
+	int slice_height;
+
+	/*
+	 * Slice Height determination : HDMI2.1 Section 7.7.5.2
+	 * Select smallest slice height >=96, that results in a valid PPS and
+	 * requires minimum padding lines required for final slice.
+	 *
+	 * Assumption : Vactive is even.
+	 */
+	for (slice_height = 96; slice_height <= vactive; slice_height += 2)
+		if (vactive % slice_height == 0)
+			return slice_height;
+
+	return 0;
+}
+
+/*
+ * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder
+ * and dsc decoder capabilities
+ *
+ * @crtc_state: intel crtc_state
+ * @src_max_slices: maximum slices supported by the DSC encoder
+ * @src_max_slice_width: maximum slice width supported by DSC encoder
+ * @hdmi_max_slices: maximum slices supported by sink DSC decoder
+ * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink
+ *
+ * @return: num of dsc slices that can be supported by the dsc encoder
+ * and decoder.
+ */
+int
+intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+			      int src_max_slices, int src_max_slice_width,
+			      int hdmi_max_slices, int hdmi_throughput)
+{
+/* Pixel rates in KPixels/sec */
+#define HDMI_DSC_PEAK_PIXEL_RATE		2720000
+/*
+ * Rates at which the source and sink are required to process pixels in each
+ * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz.
+ */
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0		340000
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1		400000
+
+/* Spec limits the slice width to 2720 pixels */
+#define MAX_HDMI_SLICE_WIDTH			2720
+	int kslice_adjust;
+	int adjusted_clk_khz;
+	int min_slices;
+	int target_slices;
+	int max_throughput; /* max clock freq. in khz per slice */
+	int max_slice_width;
+	int slice_width;
+	int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+	if (!hdmi_throughput)
+		return 0;
+
+	/*
+	 * Slice Width determination : HDMI2.1 Section 7.7.5.1
+	 * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
+	 * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
+	 * dividing adjusted clock value by 10.
+	 */
+	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+		kslice_adjust = 10;
+	else
+		kslice_adjust = 5;
+
+	/*
+	 * As per spec, the rate at which the source and the sink process
+	 * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz.
+	 * This depends upon the pixel clock rate and output formats
+	 * (kslice adjust).
+	 * If pixel clock * kslice adjust >= 2720MHz slices can be processed
+	 * at max 340MHz, otherwise they can be processed at max 400MHz.
+	 */
+
+	adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
+
+	if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
+		max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
+	else
+		max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
+
+	/*
+	 * Taking into account the sink's capability for maximum
+	 * clock per slice (in MHz) as read from HF-VSDB.
+	 */
+	max_throughput = min(max_throughput, hdmi_throughput * 1000);
+
+	min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
+	max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
+
+	/*
+	 * Keep on increasing the num of slices/line, starting from min_slices
+	 * per line till we get such a number, for which the slice_width is
+	 * just less than max_slice_width. The slices/line selected should be
+	 * less than or equal to the max horizontal slices that the combination
+	 * of PCON encoder and HDMI decoder can support.
+	 */
+	slice_width = max_slice_width;
+
+	do {
+		if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
+			target_slices = 1;
+		else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
+			target_slices = 2;
+		else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
+			target_slices = 4;
+		else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
+			target_slices = 8;
+		else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
+			target_slices = 12;
+		else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
+			target_slices = 16;
+		else
+			return 0;
+
+		slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices);
+		if (slice_width >= max_slice_width)
+			min_slices = target_slices + 1;
+	} while (slice_width >= max_slice_width);
+
+	return target_slices;
+}
+
+/*
+ * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on
+ * source and sink capabilities.
+ *
+ * @src_fraction_bpp: fractional bpp supported by the source
+ * @slice_width: dsc slice width supported by the source and sink
+ * @num_slices: num of slices supported by the source and sink
+ * @output_format: video output format
+ * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting
+ * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink
+ *
+ * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel
+ */
+int
+intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices,
+		       int output_format, bool hdmi_all_bpp,
+		       int hdmi_max_chunk_bytes)
+{
+	int max_dsc_bpp, min_dsc_bpp;
+	int target_bytes;
+	bool bpp_found = false;
+	int bpp_decrement_x16;
+	int bpp_target;
+	int bpp_target_x16;
+
+	/*
+	 * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
+	 * Start with the max bpp and keep on decrementing with
+	 * fractional bpp, if supported by PCON DSC encoder
+	 *
+	 * for each bpp we check if no of bytes can be supported by HDMI sink
+	 */
+
+	/* Assuming: bpc as 8*/
+	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+		min_dsc_bpp = 6;
+		max_dsc_bpp = 3 * 4; /* 3*bpc/2 */
+	} else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+		   output_format == INTEL_OUTPUT_FORMAT_RGB) {
+		min_dsc_bpp = 8;
+		max_dsc_bpp = 3 * 8; /* 3*bpc */
+	} else {
+		/* Assuming 4:2:2 encoding */
+		min_dsc_bpp = 7;
+		max_dsc_bpp = 2 * 8; /* 2*bpc */
+	}
+
+	/*
+	 * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
+	 * Section 7.7.34 : Source shall not enable compressed Video
+	 * Transport with bpp_target settings above 12 bpp unless
+	 * DSC_all_bpp is set to 1.
+	 */
+	if (!hdmi_all_bpp)
+		max_dsc_bpp = min(max_dsc_bpp, 12);
+
+	/*
+	 * The Sink has a limit of compressed data in bytes for a scanline,
+	 * as described in max_chunk_bytes field in HFVSDB block of edid.
+	 * The no. of bytes depend on the target bits per pixel that the
+	 * source configures. So we start with the max_bpp and calculate
+	 * the target_chunk_bytes. We keep on decrementing the target_bpp,
+	 * till we get the target_chunk_bytes just less than what the sink's
+	 * max_chunk_bytes, or else till we reach the min_dsc_bpp.
+	 *
+	 * The decrement is according to the fractional support from PCON DSC
+	 * encoder. For fractional BPP we use bpp_target as a multiple of 16.
+	 *
+	 * bpp_target_x16 = bpp_target * 16
+	 * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
+	 * {1/16, 1/8, 1/4, 1/2, 1} respectively.
+	 */
+
+	bpp_target = max_dsc_bpp;
+
+	/* src does not support fractional bpp implies decrement by 16 for bppx16 */
+	if (!src_fractional_bpp)
+		src_fractional_bpp = 1;
+	bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
+	bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16;
+
+	while (bpp_target_x16 > (min_dsc_bpp * 16)) {
+		int bpp;
+
+		bpp = DIV_ROUND_UP(bpp_target_x16, 16);
+		target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
+		if (target_bytes <= hdmi_max_chunk_bytes) {
+			bpp_found = true;
+			break;
+		}
+		bpp_target_x16 -= bpp_decrement_x16;
+	}
+	if (bpp_found)
+		return bpp_target_x16;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 15eb0ccde76e01b6908db57fe7be2417a7b381cf..fa1a9b030850ab578758c3a13c43ba00c4f69d41 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -50,5 +50,12 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
 				    const struct drm_connector_state *conn_state);
 bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
 				    bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
+			   int num_slices, int output_format, bool hdmi_all_bpp,
+			   int hdmi_max_chunk_bytes);
+int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+				  int src_max_slices, int src_max_slice_width,
+				  int hdmi_max_slices, int hdmi_throughput);
+int intel_hdmi_dsc_get_slice_height(int vactive);
 
 #endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e37d45e531dfe80b713c0eb0a1239013ffb6e0ce..e4ff533e3a69dc36d4ba7ea6df3247b3809cb6b2 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -30,11 +30,15 @@
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_lspcon.h"
+#include "intel_hdmi.h"
 
 /* LSPCON OUI Vendor ID(signatures) */
 #define LSPCON_VENDOR_PARADE_OUI 0x001CF8
 #define LSPCON_VENDOR_MCA_OUI 0x0060AD
 
+#define DPCD_MCA_LSPCON_HDR_STATUS	0x70003
+#define DPCD_PARADE_LSPCON_HDR_STATUS	0x00511
+
 /* AUX addresses to write MCA AVI IF */
 #define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
 #define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +108,35 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
 	return true;
 }
 
+static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
+{
+	if (lspcon->vendor == LSPCON_VENDOR_MCA)
+		return DPCD_MCA_LSPCON_HDR_STATUS;
+	else
+		return DPCD_PARADE_LSPCON_HDR_STATUS;
+}
+
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+	struct intel_digital_port *dig_port =
+		container_of(lspcon, struct intel_digital_port, lspcon);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+	u8 hdr_caps;
+	int ret;
+
+	ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon),
+			       &hdr_caps, 1);
+
+	if (ret < 0) {
+		drm_dbg_kms(dev, "HDR capability detection failed\n");
+		lspcon->hdr_supported = false;
+	} else if (hdr_caps & 0x1) {
+		drm_dbg_kms(dev, "LSPCON capable of HDR\n");
+		lspcon->hdr_supported = true;
+	}
+}
+
 static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
 {
 	enum drm_lspcon_mode current_mode;
@@ -418,27 +451,32 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
 			    unsigned int type,
 			    const void *frame, ssize_t len)
 {
-	bool ret;
+	bool ret = true;
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
 
-	/* LSPCON only needs AVI IF */
-	if (type != HDMI_INFOFRAME_TYPE_AVI)
+	switch (type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		if (lspcon->vendor == LSPCON_VENDOR_MCA)
+			ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+							      frame, len);
+		else
+			ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+								 frame, len);
+		break;
+	case HDMI_PACKET_TYPE_GAMUT_METADATA:
+		drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n");
+		/* It uses the legacy hsw implementation for the same */
+		hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+		break;
+	default:
 		return;
-
-	if (lspcon->vendor == LSPCON_VENDOR_MCA)
-		ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
-						      frame, len);
-	else
-		ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
-							 frame, len);
+	}
 
 	if (!ret) {
-		DRM_ERROR("Failed to write AVI infoframes\n");
+		DRM_ERROR("Failed to write infoframes\n");
 		return;
 	}
-
-	DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
 }
 
 void lspcon_read_infoframe(struct intel_encoder *encoder,
@@ -446,7 +484,10 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
 			   unsigned int type,
 			   void *frame, ssize_t len)
 {
-	/* FIXME implement this */
+	/* FIXME implement for AVI Infoframe as well */
+	if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+		hsw_read_infoframe(encoder, crtc_state, type,
+				   frame, len);
 }
 
 void lspcon_set_infoframes(struct intel_encoder *encoder,
@@ -491,12 +532,26 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 	else
 		frame.avi.colorspace = HDMI_COLORSPACE_RGB;
 
-	drm_hdmi_avi_infoframe_quant_range(&frame.avi,
-					   conn_state->connector,
-					   adjusted_mode,
-					   crtc_state->limited_color_range ?
-					   HDMI_QUANTIZATION_RANGE_LIMITED :
-					   HDMI_QUANTIZATION_RANGE_FULL);
+	/* Set the Colorspace as per the HDMI spec */
+	drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+
+	/* nonsense combination */
+	drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+		drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+						   conn_state->connector,
+						   adjusted_mode,
+						   crtc_state->limited_color_range ?
+						   HDMI_QUANTIZATION_RANGE_LIMITED :
+						   HDMI_QUANTIZATION_RANGE_FULL);
+	} else {
+		frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+		frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+	}
+
+	drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state);
 
 	ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
 	if (ret < 0) {
@@ -508,11 +563,64 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 				  buf, ret);
 }
 
+static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux)
+{
+	int ret;
+	u32 val = 0;
+	u16 reg = LSPCON_MCA_AVI_IF_CTRL;
+
+	ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+	if (ret < 0) {
+		DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+		return false;
+	}
+
+	return val & LSPCON_MCA_AVI_IF_KICKOFF;
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
+{
+	int ret;
+	u32 val = 0;
+	u16 reg = LSPCON_PARADE_AVI_IF_CTRL;
+
+	ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+	if (ret < 0) {
+		DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+		return false;
+	}
+
+	return val & LSPCON_PARADE_AVI_IF_KICKOFF;
+}
+
 u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *pipe_config)
 {
-	/* FIXME actually read this from the hw */
-	return 0;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	bool infoframes_enabled;
+	u32 val = 0;
+	u32 mask, tmp;
+
+	if (lspcon->vendor == LSPCON_VENDOR_MCA)
+		infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux);
+	else
+		infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux);
+
+	if (infoframes_enabled)
+		val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+	if (lspcon->hdr_supported) {
+		tmp = intel_de_read(dev_priv,
+				    HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+		mask = VIDEO_DIP_ENABLE_GMP_HSW;
+
+		if (tmp & mask)
+			val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+	}
+
+	return val;
 }
 
 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
@@ -520,7 +628,7 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
 	lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
 }
 
-static bool lspcon_init(struct intel_digital_port *dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
 {
 	struct intel_dp *dp = &dig_port->dp;
 	struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -550,6 +658,14 @@ static bool lspcon_init(struct intel_digital_port *dig_port)
 	return true;
 }
 
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+				    const struct intel_crtc_state *pipe_config)
+{
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+	return dig_port->infoframes_enabled(encoder, pipe_config);
+}
+
 void lspcon_resume(struct intel_digital_port *dig_port)
 {
 	struct intel_lspcon *lspcon = &dig_port->lspcon;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index b03dcb7076d84383142c70b5b08dc52e2288d24a..e19e10492b05112cc18f4b950fa47ae181650535 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,6 +15,8 @@ struct intel_digital_port;
 struct intel_encoder;
 struct intel_lspcon;
 
+bool lspcon_init(struct intel_digital_port *dig_port);
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
 void lspcon_resume(struct intel_digital_port *dig_port);
 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
 void lspcon_write_infoframe(struct intel_encoder *encoder,
@@ -31,5 +33,15 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 			   const struct drm_connector_state *conn_state);
 u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *pipe_config);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+				    const struct intel_crtc_state *pipe_config);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+			 const struct intel_crtc_state *crtc_state,
+			 unsigned int type,
+			 const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+			const struct intel_crtc_state *crtc_state,
+			unsigned int type,
+			void *frame, ssize_t len);
 
 #endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index b73d51e766ce871e4f957a6f0a2c6e66ed428916..f455040fa989c9660fe401b1a55618a76aee47e8 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -29,6 +29,7 @@
 #include <drm/drm_fourcc.h>
 
 #include "gem/i915_gem_pm.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index d64fce1a17cbc5668f756853a769244554d9504d..5fdf52643150839b2ee72dcd6501368e0a7f0dab 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -511,40 +511,79 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
 		     0, user_max);
 }
 
-static u32 intel_panel_compute_brightness(struct intel_connector *connector,
-					  u32 val)
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 
-	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
 
 	if (dev_priv->params.invert_brightness < 0)
 		return val;
 
 	if (dev_priv->params.invert_brightness > 0 ||
 	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-		return panel->backlight.max - val + panel->backlight.min;
+		return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
 	}
 
 	return val;
 }
 
-static u32 lpt_get_backlight(struct intel_connector *connector)
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
+
+	drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+	panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
+
+	drm_WARN_ON_ONCE(&dev_priv->drm,
+			 panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+	val = scale(val, panel->backlight.min, panel->backlight.max,
+		    panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+	return intel_panel_invert_pwm_level(connector, val);
+}
+
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
+
+	drm_WARN_ON_ONCE(&dev_priv->drm,
+			 panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+	if (dev_priv->params.invert_brightness > 0 ||
+	    (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+		val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+	return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+		     panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
 	return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
-static u32 pch_get_backlight(struct intel_connector *connector)
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
 	return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
-static u32 i9xx_get_backlight(struct intel_connector *connector)
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
@@ -564,23 +603,17 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
 	return val;
 }
 
-static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
 {
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
 	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 		return 0;
 
 	return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
-static u32 vlv_get_backlight(struct intel_connector *connector)
-{
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	enum pipe pipe = intel_connector_get_pipe(connector);
-
-	return _vlv_get_backlight(dev_priv, pipe);
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector)
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
@@ -589,7 +622,7 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
 			     BXT_BLC_PWM_DUTY(panel->backlight.controller));
 }
 
-static u32 pwm_get_backlight(struct intel_connector *connector)
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct intel_panel *panel = &connector->panel;
 	struct pwm_state state;
@@ -624,12 +657,12 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
 	struct intel_panel *panel = &connector->panel;
 	u32 tmp, mask;
 
-	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
 
 	if (panel->backlight.combination_mode) {
 		u8 lbpc;
 
-		lbpc = level * 0xfe / panel->backlight.max + 1;
+		lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
 		level /= lbpc;
 		pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
 	}
@@ -666,7 +699,7 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
 		       BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
 }
 
-static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
 
@@ -681,10 +714,9 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 
-	drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
+	drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
 
-	level = intel_panel_compute_brightness(connector, level);
-	panel->backlight.set(conn_state, level);
+	panel->backlight.funcs->set(conn_state, level);
 }
 
 /* set backlight brightness to level in range [0..max], assuming hw min is
@@ -726,13 +758,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
 	mutex_unlock(&dev_priv->backlight_lock);
 }
 
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	u32 tmp;
 
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, level);
 
 	/*
 	 * Although we don't support or enable CPU PWM with LPT/SPT based
@@ -754,13 +786,13 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
 	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
 }
 
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
 {
 	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	u32 tmp;
 
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, val);
 
 	tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
 	intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@@ -769,44 +801,44 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
 	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
 }
 
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
 {
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, val);
 }
 
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
 {
 	struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
 	u32 tmp;
 
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, val);
 
 	tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
 	intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
 }
 
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
 {
 	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
 	u32 tmp;
 
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, val);
 
 	tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
 	intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
 		       tmp & ~BLM_PWM_ENABLE);
 }
 
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
 {
 	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	u32 tmp, val;
+	u32 tmp;
 
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, val);
 
 	tmp = intel_de_read(dev_priv,
 			    BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -820,14 +852,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
 	}
 }
 
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
 {
 	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 	u32 tmp;
 
-	intel_panel_actually_set_backlight(old_conn_state, 0);
+	intel_panel_set_pwm_level(old_conn_state, val);
 
 	tmp = intel_de_read(dev_priv,
 			    BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -835,7 +867,7 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
 		       tmp & ~BXT_BLC_PWM_ENABLE);
 }
 
-static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
 	struct intel_panel *panel = &connector->panel;
@@ -870,13 +902,13 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
 	if (panel->backlight.device)
 		panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
 	panel->backlight.enabled = false;
-	panel->backlight.disable(old_conn_state);
+	panel->backlight.funcs->disable(old_conn_state, 0);
 
 	mutex_unlock(&dev_priv->backlight_lock);
 }
 
 static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+				 const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -906,7 +938,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
 		intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
 	}
 
-	pch_ctl2 = panel->backlight.max << 16;
+	pch_ctl2 = panel->backlight.pwm_level_max << 16;
 	intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
 
 	pch_ctl1 = 0;
@@ -923,11 +955,11 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
 		       pch_ctl1 | BLM_PCH_PWM_ENABLE);
 
 	/* This won't stick until the above enable. */
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 }
 
 static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+				 const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -958,9 +990,9 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
 	intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
 
 	/* This won't stick until the above enable. */
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 
-	pch_ctl2 = panel->backlight.max << 16;
+	pch_ctl2 = panel->backlight.pwm_level_max << 16;
 	intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
 
 	pch_ctl1 = 0;
@@ -974,7 +1006,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
 }
 
 static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
-				  const struct drm_connector_state *conn_state)
+				  const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -987,7 +1019,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
 		intel_de_write(dev_priv, BLC_PWM_CTL, 0);
 	}
 
-	freq = panel->backlight.max;
+	freq = panel->backlight.pwm_level_max;
 	if (panel->backlight.combination_mode)
 		freq /= 0xff;
 
@@ -1001,7 +1033,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
 	intel_de_posting_read(dev_priv, BLC_PWM_CTL);
 
 	/* XXX: combine this into above write? */
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 
 	/*
 	 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@@ -1013,7 +1045,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
 }
 
 static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
-				  const struct drm_connector_state *conn_state)
+				  const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1028,7 +1060,7 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
 		intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
 	}
 
-	freq = panel->backlight.max;
+	freq = panel->backlight.pwm_level_max;
 	if (panel->backlight.combination_mode)
 		freq /= 0xff;
 
@@ -1044,11 +1076,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
 	intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
 	intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
 
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 }
 
 static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+				 const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1063,11 +1095,11 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
 		intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
 	}
 
-	ctl = panel->backlight.max << 16;
+	ctl = panel->backlight.pwm_level_max << 16;
 	intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
 
 	/* XXX: combine this into above write? */
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 
 	ctl2 = 0;
 	if (panel->backlight.active_low_pwm)
@@ -1079,7 +1111,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
 }
 
 static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+				 const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1116,9 +1148,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
 
 	intel_de_write(dev_priv,
 		       BXT_BLC_PWM_FREQ(panel->backlight.controller),
-		       panel->backlight.max);
+		       panel->backlight.pwm_level_max);
 
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 
 	pwm_ctl = 0;
 	if (panel->backlight.active_low_pwm)
@@ -1133,7 +1165,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
 }
 
 static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+				 const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1152,9 +1184,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
 
 	intel_de_write(dev_priv,
 		       BXT_BLC_PWM_FREQ(panel->backlight.controller),
-		       panel->backlight.max);
+		       panel->backlight.pwm_level_max);
 
-	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+	intel_panel_set_pwm_level(conn_state, level);
 
 	pwm_ctl = 0;
 	if (panel->backlight.active_low_pwm)
@@ -1168,14 +1200,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
 		       pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
-static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
-				 const struct drm_connector_state *conn_state)
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+				     const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct intel_panel *panel = &connector->panel;
-	int level = panel->backlight.level;
 
-	level = intel_panel_compute_brightness(connector, level);
 	pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
 	panel->backlight.pwm_state.enabled = true;
 	pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
@@ -1198,7 +1228,7 @@ static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_s
 						 panel->backlight.device->props.max_brightness);
 	}
 
-	panel->backlight.enable(crtc_state, conn_state);
+	panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
 	panel->backlight.enabled = true;
 	if (panel->backlight.device)
 		panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1233,10 +1263,8 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
 
 	mutex_lock(&dev_priv->backlight_lock);
 
-	if (panel->backlight.enabled) {
-		val = panel->backlight.get(connector);
-		val = intel_panel_compute_brightness(connector, val);
-	}
+	if (panel->backlight.enabled)
+		val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
 
 	mutex_unlock(&dev_priv->backlight_lock);
 
@@ -1567,13 +1595,13 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
 	u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
 	u32 pwm;
 
-	if (!panel->backlight.hz_to_pwm) {
+	if (!panel->backlight.pwm_funcs->hz_to_pwm) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "backlight frequency conversion not supported\n");
 		return 0;
 	}
 
-	pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
+	pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
 	if (!pwm) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "backlight frequency conversion failed\n");
@@ -1592,7 +1620,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
 	struct intel_panel *panel = &connector->panel;
 	int min;
 
-	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
 
 	/*
 	 * XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1609,7 +1637,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
 	}
 
 	/* vbt value is a coefficient in range [0..255] */
-	return scale(min, 0, 255, 0, panel->backlight.max);
+	return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
 }
 
 static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
@@ -1629,29 +1657,27 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
 
 	pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
-	panel->backlight.max = pch_ctl2 >> 16;
+	panel->backlight.pwm_level_max = pch_ctl2 >> 16;
 
 	cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
 
-	if (!panel->backlight.max)
-		panel->backlight.max = get_backlight_max_vbt(connector);
+	if (!panel->backlight.pwm_level_max)
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
-	panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+	panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
 
-	cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+	cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
 		   !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
 		   (cpu_ctl2 & BLM_PWM_ENABLE);
-	if (cpu_mode)
-		val = pch_get_backlight(connector);
-	else
-		val = lpt_get_backlight(connector);
 
 	if (cpu_mode) {
+		val = pch_get_backlight(connector, unused);
+
 		drm_dbg_kms(&dev_priv->drm,
 			    "CPU backlight register was enabled, switching to PCH override\n");
 
@@ -1664,10 +1690,6 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 			       cpu_ctl2 & ~BLM_PWM_ENABLE);
 	}
 
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
-
 	return 0;
 }
 
@@ -1675,29 +1697,24 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2;
 
 	pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
 	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
 
 	pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
-	panel->backlight.max = pch_ctl2 >> 16;
+	panel->backlight.pwm_level_max = pch_ctl2 >> 16;
 
-	if (!panel->backlight.max)
-		panel->backlight.max = get_backlight_max_vbt(connector);
+	if (!panel->backlight.pwm_level_max)
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
-
-	val = pch_get_backlight(connector);
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
 	cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
-	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+	panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
 		(pch_ctl1 & BLM_PCH_PWM_ENABLE);
 
 	return 0;
@@ -1717,27 +1734,26 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
 	if (IS_PINEVIEW(dev_priv))
 		panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
 
-	panel->backlight.max = ctl >> 17;
+	panel->backlight.pwm_level_max = ctl >> 17;
 
-	if (!panel->backlight.max) {
-		panel->backlight.max = get_backlight_max_vbt(connector);
-		panel->backlight.max >>= 1;
+	if (!panel->backlight.pwm_level_max) {
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+		panel->backlight.pwm_level_max >>= 1;
 	}
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
 	if (panel->backlight.combination_mode)
-		panel->backlight.max *= 0xff;
+		panel->backlight.pwm_level_max *= 0xff;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
-	val = i9xx_get_backlight(connector);
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
+	val = i9xx_get_backlight(connector, unused);
+	val = intel_panel_invert_pwm_level(connector, val);
+	val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
 
-	panel->backlight.enabled = val != 0;
+	panel->backlight.pwm_enabled = val != 0;
 
 	return 0;
 }
@@ -1746,32 +1762,27 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	u32 ctl, ctl2, val;
+	u32 ctl, ctl2;
 
 	ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
 	panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
 	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
 	ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
-	panel->backlight.max = ctl >> 16;
+	panel->backlight.pwm_level_max = ctl >> 16;
 
-	if (!panel->backlight.max)
-		panel->backlight.max = get_backlight_max_vbt(connector);
+	if (!panel->backlight.pwm_level_max)
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
 	if (panel->backlight.combination_mode)
-		panel->backlight.max *= 0xff;
+		panel->backlight.pwm_level_max *= 0xff;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
-	val = i9xx_get_backlight(connector);
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
-
-	panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+	panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
 
 	return 0;
 }
@@ -1780,7 +1791,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	u32 ctl, ctl2, val;
+	u32 ctl, ctl2;
 
 	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 		return -ENODEV;
@@ -1789,22 +1800,17 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
 	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
 	ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
-	panel->backlight.max = ctl >> 16;
+	panel->backlight.pwm_level_max = ctl >> 16;
 
-	if (!panel->backlight.max)
-		panel->backlight.max = get_backlight_max_vbt(connector);
+	if (!panel->backlight.pwm_level_max)
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
-
-	val = _vlv_get_backlight(dev_priv, pipe);
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
-	panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+	panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
 
 	return 0;
 }
@@ -1829,24 +1835,18 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
 	}
 
 	panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
-	panel->backlight.max =
-		intel_de_read(dev_priv,
-			      BXT_BLC_PWM_FREQ(panel->backlight.controller));
+	panel->backlight.pwm_level_max =
+		intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
 
-	if (!panel->backlight.max)
-		panel->backlight.max = get_backlight_max_vbt(connector);
+	if (!panel->backlight.pwm_level_max)
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
-	val = bxt_get_backlight(connector);
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
-
-	panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+	panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
 
 	return 0;
 }
@@ -1856,7 +1856,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	u32 pwm_ctl, val;
+	u32 pwm_ctl;
 
 	/*
 	 * CNP has the BXT implementation of backlight, but with only one
@@ -1869,30 +1869,24 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
 				BXT_BLC_PWM_CTL(panel->backlight.controller));
 
 	panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
-	panel->backlight.max =
-		intel_de_read(dev_priv,
-			      BXT_BLC_PWM_FREQ(panel->backlight.controller));
+	panel->backlight.pwm_level_max =
+		intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
 
-	if (!panel->backlight.max)
-		panel->backlight.max = get_backlight_max_vbt(connector);
+	if (!panel->backlight.pwm_level_max)
+		panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
 
-	if (!panel->backlight.max)
+	if (!panel->backlight.pwm_level_max)
 		return -ENODEV;
 
-	panel->backlight.min = get_backlight_min_vbt(connector);
-
-	val = bxt_get_backlight(connector);
-	val = intel_panel_compute_brightness(connector, val);
-	panel->backlight.level = clamp(val, panel->backlight.min,
-				       panel->backlight.max);
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
-	panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+	panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
 
 	return 0;
 }
 
-static int pwm_setup_backlight(struct intel_connector *connector,
-			       enum pipe pipe)
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+				   enum pipe pipe)
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1916,8 +1910,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 		return -ENODEV;
 	}
 
-	panel->backlight.max = 100; /* 100% */
-	panel->backlight.min = get_backlight_min_vbt(connector);
+	panel->backlight.pwm_level_max = 100; /* 100% */
+	panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
 
 	if (pwm_is_enabled(panel->backlight.pwm)) {
 		/* PWM is already enabled, use existing settings */
@@ -1925,10 +1919,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 
 		level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
 						    100);
-		level = intel_panel_compute_brightness(connector, level);
-		panel->backlight.level = clamp(level, panel->backlight.min,
-					       panel->backlight.max);
-		panel->backlight.enabled = true;
+		level = intel_panel_invert_pwm_level(connector, level);
+		panel->backlight.pwm_enabled = true;
 
 		drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
 			    NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
@@ -1944,6 +1936,58 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 	return 0;
 }
 
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct intel_panel *panel = &connector->panel;
+
+	panel->backlight.pwm_funcs->set(conn_state,
+				       intel_panel_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	return intel_panel_invert_pwm_level(connector,
+					    panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+				       const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct intel_panel *panel = &connector->panel;
+
+	panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+					   intel_panel_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct intel_panel *panel = &connector->panel;
+
+	panel->backlight.pwm_funcs->disable(conn_state,
+					    intel_panel_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+	struct intel_panel *panel = &connector->panel;
+	int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+	if (ret < 0)
+		return ret;
+
+	panel->backlight.min = panel->backlight.pwm_level_min;
+	panel->backlight.max = panel->backlight.pwm_level_max;
+	panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+	panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+	return 0;
+}
+
 void intel_panel_update_backlight(struct intel_atomic_state *state,
 				  struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
@@ -1982,12 +2026,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 	}
 
 	/* ensure intel_panel has been initialized first */
-	if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
+	if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
 		return -ENODEV;
 
 	/* set level and max in panel struct */
 	mutex_lock(&dev_priv->backlight_lock);
-	ret = panel->backlight.setup(intel_connector, pipe);
+	ret = panel->backlight.funcs->setup(intel_connector, pipe);
 	mutex_unlock(&dev_priv->backlight_lock);
 
 	if (ret) {
@@ -2017,6 +2061,94 @@ static void intel_panel_destroy_backlight(struct intel_panel *panel)
 	panel->backlight.present = false;
 }
 
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+	.setup = bxt_setup_backlight,
+	.enable = bxt_enable_backlight,
+	.disable = bxt_disable_backlight,
+	.set = bxt_set_backlight,
+	.get = bxt_get_backlight,
+	.hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+	.setup = cnp_setup_backlight,
+	.enable = cnp_enable_backlight,
+	.disable = cnp_disable_backlight,
+	.set = bxt_set_backlight,
+	.get = bxt_get_backlight,
+	.hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+	.setup = lpt_setup_backlight,
+	.enable = lpt_enable_backlight,
+	.disable = lpt_disable_backlight,
+	.set = lpt_set_backlight,
+	.get = lpt_get_backlight,
+	.hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+	.setup = lpt_setup_backlight,
+	.enable = lpt_enable_backlight,
+	.disable = lpt_disable_backlight,
+	.set = lpt_set_backlight,
+	.get = lpt_get_backlight,
+	.hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+	.setup = pch_setup_backlight,
+	.enable = pch_enable_backlight,
+	.disable = pch_disable_backlight,
+	.set = pch_set_backlight,
+	.get = pch_get_backlight,
+	.hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+	.setup = ext_pwm_setup_backlight,
+	.enable = ext_pwm_enable_backlight,
+	.disable = ext_pwm_disable_backlight,
+	.set = ext_pwm_set_backlight,
+	.get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+	.setup = vlv_setup_backlight,
+	.enable = vlv_enable_backlight,
+	.disable = vlv_disable_backlight,
+	.set = vlv_set_backlight,
+	.get = vlv_get_backlight,
+	.hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+	.setup = i965_setup_backlight,
+	.enable = i965_enable_backlight,
+	.disable = i965_disable_backlight,
+	.set = i9xx_set_backlight,
+	.get = i9xx_get_backlight,
+	.hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+	.setup = i9xx_setup_backlight,
+	.enable = i9xx_enable_backlight,
+	.disable = i9xx_disable_backlight,
+	.set = i9xx_set_backlight,
+	.get = i9xx_get_backlight,
+	.hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+	.setup = intel_pwm_setup_backlight,
+	.enable = intel_pwm_enable_backlight,
+	.disable = intel_pwm_disable_backlight,
+	.set = intel_pwm_set_backlight,
+	.get = intel_pwm_get_backlight,
+};
+
 /* Set up chip specific backlight functions */
 static void
 intel_panel_init_backlight_funcs(struct intel_panel *panel)
@@ -2025,75 +2157,39 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
 		container_of(panel, struct intel_connector, panel);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
-	    intel_dp_aux_init_backlight_funcs(connector) == 0)
-		return;
-
 	if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
 	    intel_dsi_dcs_init_backlight_funcs(connector) == 0)
 		return;
 
 	if (IS_GEN9_LP(dev_priv)) {
-		panel->backlight.setup = bxt_setup_backlight;
-		panel->backlight.enable = bxt_enable_backlight;
-		panel->backlight.disable = bxt_disable_backlight;
-		panel->backlight.set = bxt_set_backlight;
-		panel->backlight.get = bxt_get_backlight;
-		panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
+		panel->backlight.pwm_funcs = &bxt_pwm_funcs;
 	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
-		panel->backlight.setup = cnp_setup_backlight;
-		panel->backlight.enable = cnp_enable_backlight;
-		panel->backlight.disable = cnp_disable_backlight;
-		panel->backlight.set = bxt_set_backlight;
-		panel->backlight.get = bxt_get_backlight;
-		panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
+		panel->backlight.pwm_funcs = &cnp_pwm_funcs;
 	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
-		panel->backlight.setup = lpt_setup_backlight;
-		panel->backlight.enable = lpt_enable_backlight;
-		panel->backlight.disable = lpt_disable_backlight;
-		panel->backlight.set = lpt_set_backlight;
-		panel->backlight.get = lpt_get_backlight;
 		if (HAS_PCH_LPT(dev_priv))
-			panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
+			panel->backlight.pwm_funcs = &lpt_pwm_funcs;
 		else
-			panel->backlight.hz_to_pwm = spt_hz_to_pwm;
+			panel->backlight.pwm_funcs = &spt_pwm_funcs;
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		panel->backlight.setup = pch_setup_backlight;
-		panel->backlight.enable = pch_enable_backlight;
-		panel->backlight.disable = pch_disable_backlight;
-		panel->backlight.set = pch_set_backlight;
-		panel->backlight.get = pch_get_backlight;
-		panel->backlight.hz_to_pwm = pch_hz_to_pwm;
+		panel->backlight.pwm_funcs = &pch_pwm_funcs;
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
-			panel->backlight.setup = pwm_setup_backlight;
-			panel->backlight.enable = pwm_enable_backlight;
-			panel->backlight.disable = pwm_disable_backlight;
-			panel->backlight.set = pwm_set_backlight;
-			panel->backlight.get = pwm_get_backlight;
+			panel->backlight.pwm_funcs = &ext_pwm_funcs;
 		} else {
-			panel->backlight.setup = vlv_setup_backlight;
-			panel->backlight.enable = vlv_enable_backlight;
-			panel->backlight.disable = vlv_disable_backlight;
-			panel->backlight.set = vlv_set_backlight;
-			panel->backlight.get = vlv_get_backlight;
-			panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
+			panel->backlight.pwm_funcs = &vlv_pwm_funcs;
 		}
 	} else if (IS_GEN(dev_priv, 4)) {
-		panel->backlight.setup = i965_setup_backlight;
-		panel->backlight.enable = i965_enable_backlight;
-		panel->backlight.disable = i965_disable_backlight;
-		panel->backlight.set = i9xx_set_backlight;
-		panel->backlight.get = i9xx_get_backlight;
-		panel->backlight.hz_to_pwm = i965_hz_to_pwm;
+		panel->backlight.pwm_funcs = &i965_pwm_funcs;
 	} else {
-		panel->backlight.setup = i9xx_setup_backlight;
-		panel->backlight.enable = i9xx_enable_backlight;
-		panel->backlight.disable = i9xx_disable_backlight;
-		panel->backlight.set = i9xx_set_backlight;
-		panel->backlight.get = i9xx_get_backlight;
-		panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
+		panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
 	}
+
+	if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+	    intel_dp_aux_init_backlight_funcs(connector) == 0)
+		return;
+
+	/* We're using a standard PWM backlight interface */
+	panel->backlight.funcs = &pwm_bl_funcs;
 }
 
 enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5b813fe90557c17210261722a441d6fb40c88647..1d340f77bffc7926fc2118326e83112a8b856a67 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -49,6 +49,10 @@ struct drm_display_mode *
 intel_panel_edid_fixed_mode(struct intel_connector *connector);
 struct drm_display_mode *
 intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
new file mode 100644
index 0000000000000000000000000000000000000000..c4867a8020a5b700095d07731f372a44ec34a35a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -0,0 +1,1406 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_pps.h"
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+				      enum pipe pipe);
+
+static void pps_init_delays(struct intel_dp *intel_dp);
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	intel_wakeref_t wakeref;
+
+	/*
+	 * See intel_pps_reset_all() why we need a power domain reference here.
+	 */
+	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+	mutex_lock(&dev_priv->pps_mutex);
+
+	return wakeref;
+}
+
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
+				 intel_wakeref_t wakeref)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	mutex_unlock(&dev_priv->pps_mutex);
+	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+	return 0;
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum pipe pipe = intel_dp->pps.pps_pipe;
+	bool pll_enabled, release_cl_override = false;
+	enum dpio_phy phy = DPIO_PHY(pipe);
+	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+	u32 DP;
+
+	if (drm_WARN(&dev_priv->drm,
+		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+		     pipe_name(pipe), dig_port->base.base.base.id,
+		     dig_port->base.base.name))
+		return;
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+		    pipe_name(pipe), dig_port->base.base.base.id,
+		    dig_port->base.base.name);
+
+	/* Preserve the BIOS-computed detected bit. This is
+	 * supposed to be read-only.
+	 */
+	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	DP |= DP_PORT_WIDTH(1);
+	DP |= DP_LINK_TRAIN_PAT_1;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		DP |= DP_PIPE_SEL_CHV(pipe);
+	else
+		DP |= DP_PIPE_SEL(pipe);
+
+	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+	/*
+	 * The DPLL for the pipe must be enabled for this to work.
+	 * So enable temporarily it if it's not already enabled.
+	 */
+	if (!pll_enabled) {
+		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
+			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+			drm_err(&dev_priv->drm,
+				"Failed to force on pll for pipe %c!\n",
+				pipe_name(pipe));
+			return;
+		}
+	}
+
+	/*
+	 * Similar magic as in intel_dp_enable_port().
+	 * We _must_ do this port enable + disable trick
+	 * to make this power sequencer lock onto the port.
+	 * Otherwise even VDD force bit won't work.
+	 */
+	intel_de_write(dev_priv, intel_dp->output_reg, DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+	if (!pll_enabled) {
+		vlv_force_pll_off(dev_priv, pipe);
+
+		if (release_cl_override)
+			chv_phy_powergate_ch(dev_priv, phy, ch, false);
+	}
+}
+
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+	struct intel_encoder *encoder;
+	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+	/*
+	 * We don't have power sequencer currently.
+	 * Pick one that's not used by other ports.
+	 */
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		if (encoder->type == INTEL_OUTPUT_EDP) {
+			drm_WARN_ON(&dev_priv->drm,
+				    intel_dp->pps.active_pipe != INVALID_PIPE &&
+				    intel_dp->pps.active_pipe !=
+				    intel_dp->pps.pps_pipe);
+
+			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+				pipes &= ~(1 << intel_dp->pps.pps_pipe);
+		} else {
+			drm_WARN_ON(&dev_priv->drm,
+				    intel_dp->pps.pps_pipe != INVALID_PIPE);
+
+			if (intel_dp->pps.active_pipe != INVALID_PIPE)
+				pipes &= ~(1 << intel_dp->pps.active_pipe);
+		}
+	}
+
+	if (pipes == 0)
+		return INVALID_PIPE;
+
+	return ffs(pipes) - 1;
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum pipe pipe;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* We should never land here with regular DP ports */
+	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+
+	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+		return intel_dp->pps.pps_pipe;
+
+	pipe = vlv_find_free_pps(dev_priv);
+
+	/*
+	 * Didn't find one. This should not happen since there
+	 * are two power sequencers and up to two eDP ports.
+	 */
+	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+		pipe = PIPE_A;
+
+	vlv_steal_power_sequencer(dev_priv, pipe);
+	intel_dp->pps.pps_pipe = pipe;
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+		    pipe_name(intel_dp->pps.pps_pipe),
+		    dig_port->base.base.base.id,
+		    dig_port->base.base.name);
+
+	/* init power sequencer on this pipe and port */
+	pps_init_delays(intel_dp);
+	pps_init_registers(intel_dp, true);
+
+	/*
+	 * Even vdd force doesn't work until we've made
+	 * the power sequencer lock in on the port.
+	 */
+	vlv_power_sequencer_kick(intel_dp);
+
+	return intel_dp->pps.pps_pipe;
+}
+
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	int backlight_controller = dev_priv->vbt.backlight.controller;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* We should never land here with regular DP ports */
+	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+	if (!intel_dp->pps.pps_reset)
+		return backlight_controller;
+
+	intel_dp->pps.pps_reset = false;
+
+	/*
+	 * Only the HW needs to be reprogrammed, the SW state is fixed and
+	 * has been setup during connector init.
+	 */
+	pps_init_registers(intel_dp, false);
+
+	return backlight_controller;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+			       enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+			       enum pipe pipe)
+{
+	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+				enum pipe pipe)
+{
+	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+			 enum pipe pipe)
+{
+	return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+		     enum port port,
+		     vlv_pipe_check pipe_check)
+{
+	enum pipe pipe;
+
+	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
+			PANEL_PORT_SELECT_MASK;
+
+		if (port_sel != PANEL_PORT_SELECT_VLV(port))
+			continue;
+
+		if (!pipe_check(dev_priv, pipe))
+			continue;
+
+		return pipe;
+	}
+
+	return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum port port = dig_port->base.port;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* try to find a pipe with this port selected */
+	/* first pick one where the panel is on */
+	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+						      vlv_pipe_has_pp_on);
+	/* didn't find one? pick one where vdd is on */
+	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+							      vlv_pipe_has_vdd_on);
+	/* didn't find one? pick one with just the correct port */
+	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+							      vlv_pipe_any);
+
+	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "no initial power sequencer for [ENCODER:%d:%s]\n",
+			    dig_port->base.base.base.id,
+			    dig_port->base.base.name);
+		return;
+	}
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+		    dig_port->base.base.base.id,
+		    dig_port->base.base.name,
+		    pipe_name(intel_dp->pps.pps_pipe));
+}
+
+void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+{
+	struct intel_encoder *encoder;
+
+	if (drm_WARN_ON(&dev_priv->drm,
+			!(IS_VALLEYVIEW(dev_priv) ||
+			  IS_CHERRYVIEW(dev_priv) ||
+			  IS_GEN9_LP(dev_priv))))
+		return;
+
+	/*
+	 * We can't grab pps_mutex here due to deadlock with power_domain
+	 * mutex when power_domain functions are called while holding pps_mutex.
+	 * That also means that in order to use pps_pipe the code needs to
+	 * hold both a power domain reference and pps_mutex, and the power domain
+	 * reference get/put must be done while _not_ holding pps_mutex.
+	 * pps_{lock,unlock}() do these steps in the correct order, so one
+	 * should use them always.
+	 */
+
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		drm_WARN_ON(&dev_priv->drm,
+			    intel_dp->pps.active_pipe != INVALID_PIPE);
+
+		if (encoder->type != INTEL_OUTPUT_EDP)
+			continue;
+
+		if (IS_GEN9_LP(dev_priv))
+			intel_dp->pps.pps_reset = true;
+		else
+			intel_dp->pps.pps_pipe = INVALID_PIPE;
+	}
+}
+
+struct pps_registers {
+	i915_reg_t pp_ctrl;
+	i915_reg_t pp_stat;
+	i915_reg_t pp_on;
+	i915_reg_t pp_off;
+	i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
+				    struct pps_registers *regs)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	int pps_idx = 0;
+
+	memset(regs, 0, sizeof(*regs));
+
+	if (IS_GEN9_LP(dev_priv))
+		pps_idx = bxt_power_sequencer_idx(intel_dp);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		pps_idx = vlv_power_sequencer_pipe(intel_dp);
+
+	regs->pp_ctrl = PP_CONTROL(pps_idx);
+	regs->pp_stat = PP_STATUS(pps_idx);
+	regs->pp_on = PP_ON_DELAYS(pps_idx);
+	regs->pp_off = PP_OFF_DELAYS(pps_idx);
+
+	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+		regs->pp_div = INVALID_MMIO_REG;
+	else
+		regs->pp_div = PP_DIVISOR(pps_idx);
+}
+
+static i915_reg_t
+_pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+	struct pps_registers regs;
+
+	intel_pps_get_registers(intel_dp, &regs);
+
+	return regs.pp_ctrl;
+}
+
+static i915_reg_t
+_pp_stat_reg(struct intel_dp *intel_dp)
+{
+	struct pps_registers regs;
+
+	intel_pps_get_registers(intel_dp, &regs);
+
+	return regs.pp_stat;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	    intel_dp->pps.pps_pipe == INVALID_PIPE)
+		return false;
+
+	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	    intel_dp->pps.pps_pipe == INVALID_PIPE)
+		return false;
+
+	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+		drm_WARN(&dev_priv->drm, 1,
+			 "eDP powered off while attempting aux channel communication.\n");
+		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+	}
+}
+
+#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
+#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
+
+#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+				       u32 mask,
+				       u32 value)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	intel_pps_verify_state(intel_dp);
+
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "mask %08x value %08x status %08x control %08x\n",
+		    mask, value,
+		    intel_de_read(dev_priv, pp_stat_reg),
+		    intel_de_read(dev_priv, pp_ctrl_reg));
+
+	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+				       mask, value, 5000))
+		drm_err(&dev_priv->drm,
+			"Panel status timeout: status %08x control %08x\n",
+			intel_de_read(dev_priv, pp_stat_reg),
+			intel_de_read(dev_priv, pp_ctrl_reg));
+
+	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	ktime_t panel_power_on_time;
+	s64 panel_power_off_duration;
+
+	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+
+	/* take the difference of currrent time and panel power off time
+	 * and then make panel wait for t11_t12 if needed. */
+	panel_power_on_time = ktime_get_boottime();
+	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+
+	/* When we disable the VDD override bit last we have to do the manual
+	 * wait. */
+	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
+		wait_remaining_ms_from_jiffies(jiffies,
+				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
+{
+	intel_wakeref_t wakeref;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	with_intel_pps_lock(intel_dp, wakeref)
+		wait_panel_power_cycle(intel_dp);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
+				       intel_dp->pps.backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
+				       intel_dp->pps.backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 control;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+		control &= ~PANEL_UNLOCK_MASK;
+		control |= PANEL_UNLOCK_REGS;
+	}
+	return control;
+}
+
+/*
+ * Must be paired with intel_pps_vdd_off_unlocked().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	u32 pp;
+	i915_reg_t pp_stat_reg, pp_ctrl_reg;
+	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!intel_dp_is_edp(intel_dp))
+		return false;
+
+	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
+	intel_dp->pps.want_panel_vdd = true;
+
+	if (edp_have_panel_vdd(intel_dp))
+		return need_to_disable;
+
+	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+							    intel_aux_power_domain(dig_port));
+
+	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+		    dig_port->base.base.base.id,
+		    dig_port->base.base.name);
+
+	if (!edp_have_panel_power(intel_dp))
+		wait_panel_power_cycle(intel_dp);
+
+	pp = ilk_get_pp_control(intel_dp);
+	pp |= EDP_FORCE_VDD;
+
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
+	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+		    intel_de_read(dev_priv, pp_stat_reg),
+		    intel_de_read(dev_priv, pp_ctrl_reg));
+	/*
+	 * If the panel wasn't on, delay before accessing aux channel
+	 */
+	if (!edp_have_panel_power(intel_dp)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
+			    dig_port->base.base.base.id,
+			    dig_port->base.base.name);
+		msleep(intel_dp->pps.panel_power_up_delay);
+	}
+
+	return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_pps_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_pps_vdd_on(struct intel_dp *intel_dp)
+{
+	intel_wakeref_t wakeref;
+	bool vdd;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	vdd = false;
+	with_intel_pps_lock(intel_dp, wakeref)
+		vdd = intel_pps_vdd_on_unlocked(intel_dp);
+	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+			dp_to_dig_port(intel_dp)->base.base.base.id,
+			dp_to_dig_port(intel_dp)->base.base.name);
+}
+
+static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port =
+		dp_to_dig_port(intel_dp);
+	u32 pp;
+	i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+
+	if (!edp_have_panel_vdd(intel_dp))
+		return;
+
+	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+		    dig_port->base.base.base.id,
+		    dig_port->base.base.name);
+
+	pp = ilk_get_pp_control(intel_dp);
+	pp &= ~EDP_FORCE_VDD;
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+	/* Make sure sequencer is idle before allowing subsequent activity */
+	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+		    intel_de_read(dev_priv, pp_stat_reg),
+		    intel_de_read(dev_priv, pp_ctrl_reg));
+
+	if ((pp & PANEL_POWER_ON) == 0)
+		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+	intel_display_power_put(dev_priv,
+				intel_aux_power_domain(dig_port),
+				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
+{
+	intel_wakeref_t wakeref;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
+	/*
+	 * vdd might still be enabled due to the delayed vdd off.
+	 * Make sure vdd is actually turned off here.
+	 */
+	with_intel_pps_lock(intel_dp, wakeref)
+		intel_pps_vdd_off_sync_unlocked(intel_dp);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+	struct intel_pps *pps = container_of(to_delayed_work(__work),
+					     struct intel_pps, panel_vdd_work);
+	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
+	intel_wakeref_t wakeref;
+
+	with_intel_pps_lock(intel_dp, wakeref) {
+		if (!intel_dp->pps.want_panel_vdd)
+			intel_pps_vdd_off_sync_unlocked(intel_dp);
+	}
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+	unsigned long delay;
+
+	/*
+	 * Queue the timer to fire a long time from now (relative to the power
+	 * down delay) to keep the panel power up across a sequence of
+	 * operations.
+	 */
+	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
+	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+			dp_to_dig_port(intel_dp)->base.base.base.id,
+			dp_to_dig_port(intel_dp)->base.base.name);
+
+	intel_dp->pps.want_panel_vdd = false;
+
+	if (sync)
+		intel_pps_vdd_off_sync_unlocked(intel_dp);
+	else
+		edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_pps_on_unlocked(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 pp;
+	i915_reg_t pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+		    dp_to_dig_port(intel_dp)->base.base.base.id,
+		    dp_to_dig_port(intel_dp)->base.base.name);
+
+	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+		     "[ENCODER:%d:%s] panel power already on\n",
+		     dp_to_dig_port(intel_dp)->base.base.base.id,
+		     dp_to_dig_port(intel_dp)->base.base.name))
+		return;
+
+	wait_panel_power_cycle(intel_dp);
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+	pp = ilk_get_pp_control(intel_dp);
+	if (IS_GEN(dev_priv, 5)) {
+		/* ILK workaround: disable reset around power sequence */
+		pp &= ~PANEL_POWER_RESET;
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
+	}
+
+	pp |= PANEL_POWER_ON;
+	if (!IS_GEN(dev_priv, 5))
+		pp |= PANEL_POWER_RESET;
+
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+	wait_panel_on(intel_dp);
+	intel_dp->pps.last_power_on = jiffies;
+
+	if (IS_GEN(dev_priv, 5)) {
+		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
+	}
+}
+
+void intel_pps_on(struct intel_dp *intel_dp)
+{
+	intel_wakeref_t wakeref;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	with_intel_pps_lock(intel_dp, wakeref)
+		intel_pps_on_unlocked(intel_dp);
+}
+
+void intel_pps_off_unlocked(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	u32 pp;
+	i915_reg_t pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+		    dig_port->base.base.base.id, dig_port->base.base.name);
+
+	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+		 dig_port->base.base.base.id, dig_port->base.base.name);
+
+	pp = ilk_get_pp_control(intel_dp);
+	/* We need to switch off panel power _and_ force vdd, for otherwise some
+	 * panels get very unhappy and cease to work. */
+	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+		EDP_BLC_ENABLE);
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	intel_dp->pps.want_panel_vdd = false;
+
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+	wait_panel_off(intel_dp);
+	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+	/* We got a reference when we enabled the VDD. */
+	intel_display_power_put(dev_priv,
+				intel_aux_power_domain(dig_port),
+				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_off(struct intel_dp *intel_dp)
+{
+	intel_wakeref_t wakeref;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	with_intel_pps_lock(intel_dp, wakeref)
+		intel_pps_off_unlocked(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+void intel_pps_backlight_on(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	intel_wakeref_t wakeref;
+
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	wait_backlight_on(intel_dp);
+
+	with_intel_pps_lock(intel_dp, wakeref) {
+		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+		u32 pp;
+
+		pp = ilk_get_pp_control(intel_dp);
+		pp |= EDP_BLC_ENABLE;
+
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
+	}
+}
+
+/* Disable backlight in the panel power control. */
+void intel_pps_backlight_off(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	intel_wakeref_t wakeref;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	with_intel_pps_lock(intel_dp, wakeref) {
+		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+		u32 pp;
+
+		pp = ilk_get_pp_control(intel_dp);
+		pp &= ~EDP_BLC_ENABLE;
+
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
+	}
+
+	intel_dp->pps.last_backlight_off = jiffies;
+	edp_wait_backlight_off(intel_dp);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
+{
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	intel_wakeref_t wakeref;
+	bool is_enabled;
+
+	is_enabled = false;
+	with_intel_pps_lock(intel_dp, wakeref)
+		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+	if (is_enabled == enable)
+		return;
+
+	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+		    enable ? "enable" : "disable");
+
+	if (enable)
+		intel_pps_backlight_on(intel_dp);
+	else
+		intel_pps_backlight_off(intel_dp);
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum pipe pipe = intel_dp->pps.pps_pipe;
+	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+
+	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+		return;
+
+	intel_pps_vdd_off_sync_unlocked(intel_dp);
+
+	/*
+	 * VLV seems to get confused when multiple power sequencers
+	 * have the same port selected (even if only one has power/vdd
+	 * enabled). The failure manifests as vlv_wait_port_ready() failing
+	 * CHV on the other hand doesn't seem to mind having the same port
+	 * selected in multiple power sequencers, but let's clear the
+	 * port select always when logically disconnecting a power sequencer
+	 * from a port.
+	 */
+	drm_dbg_kms(&dev_priv->drm,
+		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+		    pipe_name(pipe), dig_port->base.base.base.id,
+		    dig_port->base.base.name);
+	intel_de_write(dev_priv, pp_on_reg, 0);
+	intel_de_posting_read(dev_priv, pp_on_reg);
+
+	intel_dp->pps.pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	struct intel_encoder *encoder;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+			 pipe_name(pipe), encoder->base.base.id,
+			 encoder->base.name);
+
+		if (intel_dp->pps.pps_pipe != pipe)
+			continue;
+
+		drm_dbg_kms(&dev_priv->drm,
+			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+			    pipe_name(pipe), encoder->base.base.id,
+			    encoder->base.name);
+
+		/* make sure vdd is off before we steal it */
+		vlv_detach_power_sequencer(intel_dp);
+	}
+}
+
+void vlv_pps_init(struct intel_encoder *encoder,
+		  const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
+	    intel_dp->pps.pps_pipe != crtc->pipe) {
+		/*
+		 * If another power sequencer was being used on this
+		 * port previously make sure to turn off vdd there while
+		 * we still have control of it.
+		 */
+		vlv_detach_power_sequencer(intel_dp);
+	}
+
+	/*
+	 * We may be stealing the power
+	 * sequencer from another port.
+	 */
+	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+
+	intel_dp->pps.active_pipe = crtc->pipe;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	/* now it's all ours */
+	intel_dp->pps.pps_pipe = crtc->pipe;
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
+		    encoder->base.name);
+
+	/* init power sequencer on this pipe and port */
+	pps_init_delays(intel_dp);
+	pps_init_registers(intel_dp, true);
+}
+
+static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!edp_have_panel_vdd(intel_dp))
+		return;
+
+	/*
+	 * The VDD bit needs a power domain reference, so if the bit is
+	 * already enabled when we boot or resume, grab this reference and
+	 * schedule a vdd off, so we don't hold on to the reference
+	 * indefinitely.
+	 */
+	drm_dbg_kms(&dev_priv->drm,
+		    "VDD left on by BIOS, adjusting state tracking\n");
+	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+							    intel_aux_power_domain(dig_port));
+
+	edp_panel_vdd_schedule_off(intel_dp);
+}
+
+bool intel_pps_have_power(struct intel_dp *intel_dp)
+{
+	intel_wakeref_t wakeref;
+	bool have_power = false;
+
+	with_intel_pps_lock(intel_dp, wakeref) {
+		have_power = edp_have_panel_power(intel_dp) &&
+						  edp_have_panel_vdd(intel_dp);
+	}
+
+	return have_power;
+}
+
+static void pps_init_timestamps(struct intel_dp *intel_dp)
+{
+	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+	intel_dp->pps.last_power_on = jiffies;
+	intel_dp->pps.last_backlight_off = jiffies;
+}
+
+static void
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 pp_on, pp_off, pp_ctl;
+	struct pps_registers regs;
+
+	intel_pps_get_registers(intel_dp, &regs);
+
+	pp_ctl = ilk_get_pp_control(intel_dp);
+
+	/* Ensure PPS is unlocked */
+	if (!HAS_DDI(dev_priv))
+		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+
+	pp_on = intel_de_read(dev_priv, regs.pp_on);
+	pp_off = intel_de_read(dev_priv, regs.pp_off);
+
+	/* Pull timing values out of registers */
+	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+
+	if (i915_mmio_reg_valid(regs.pp_div)) {
+		u32 pp_div;
+
+		pp_div = intel_de_read(dev_priv, regs.pp_div);
+
+		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+	} else {
+		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+	}
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      state_name,
+		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct intel_dp *intel_dp)
+{
+	struct edp_power_seq hw;
+	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+
+	intel_pps_readout_hw_state(intel_dp, &hw);
+
+	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+		DRM_ERROR("PPS state mismatch\n");
+		intel_pps_dump_state("sw", sw);
+		intel_pps_dump_state("hw", &hw);
+	}
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct edp_power_seq cur, vbt, spec,
+		*final = &intel_dp->pps.pps_delays;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* already initialized? */
+	if (final->t11_t12 != 0)
+		return;
+
+	intel_pps_readout_hw_state(intel_dp, &cur);
+
+	intel_pps_dump_state("cur", &cur);
+
+	vbt = dev_priv->vbt.edp.pps;
+	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+	 * of 500ms appears to be too short. Ocassionally the panel
+	 * just fails to power back on. Increasing the delay to 800ms
+	 * seems sufficient to avoid this problem.
+	 */
+	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Increasing T12 panel delay as per the quirk to %d\n",
+			    vbt.t11_t12);
+	}
+	/* T11_T12 delay is special and actually in units of 100ms, but zero
+	 * based in the hw (so we need to add 100 ms). But the sw vbt
+	 * table multiplies it with 1000 to make it in units of 100usec,
+	 * too. */
+	vbt.t11_t12 += 100 * 10;
+
+	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+	 * our hw here, which are all in 100usec. */
+	spec.t1_t3 = 210 * 10;
+	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+	spec.t10 = 500 * 10;
+	/* This one is special and actually in units of 100ms, but zero
+	 * based in the hw (so we need to add 100 ms). But the sw vbt
+	 * table multiplies it with 1000 to make it in units of 100usec,
+	 * too. */
+	spec.t11_t12 = (510 + 100) * 10;
+
+	intel_pps_dump_state("vbt", &vbt);
+
+	/* Use the max of the register settings and vbt. If both are
+	 * unset, fall back to the spec limits. */
+#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
+				       spec.field : \
+				       max(cur.field, vbt.field))
+	assign_final(t1_t3);
+	assign_final(t8);
+	assign_final(t9);
+	assign_final(t10);
+	assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
+	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
+	intel_dp->pps.backlight_on_delay = get_delay(t8);
+	intel_dp->pps.backlight_off_delay = get_delay(t9);
+	intel_dp->pps.panel_power_down_delay = get_delay(t10);
+	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+		    intel_dp->pps.panel_power_up_delay,
+		    intel_dp->pps.panel_power_down_delay,
+		    intel_dp->pps.panel_power_cycle_delay);
+
+	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+		    intel_dp->pps.backlight_on_delay,
+		    intel_dp->pps.backlight_off_delay);
+
+	/*
+	 * We override the HW backlight delays to 1 because we do manual waits
+	 * on them. For T8, even BSpec recommends doing it. For T9, if we
+	 * don't do this, we'll end up waiting for the backlight off delay
+	 * twice: once when we do the manual sleep, and once when we disable
+	 * the panel and wait for the PP_STATUS bit to become zero.
+	 */
+	final->t8 = 1;
+	final->t9 = 1;
+
+	/*
+	 * HW has only a 100msec granularity for t11_t12 so round it up
+	 * accordingly.
+	 */
+	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+}
+
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 pp_on, pp_off, port_sel = 0;
+	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+	struct pps_registers regs;
+	enum port port = dp_to_dig_port(intel_dp)->base.port;
+	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	intel_pps_get_registers(intel_dp, &regs);
+
+	/*
+	 * On some VLV machines the BIOS can leave the VDD
+	 * enabled even on power sequencers which aren't
+	 * hooked up to any port. This would mess up the
+	 * power domain tracking the first time we pick
+	 * one of these power sequencers for use since
+	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
+	 * already on and therefore wouldn't grab the power
+	 * domain reference. Disable VDD first to avoid this.
+	 * This also avoids spuriously turning the VDD on as
+	 * soon as the new power sequencer gets initialized.
+	 */
+	if (force_disable_vdd) {
+		u32 pp = ilk_get_pp_control(intel_dp);
+
+		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+			 "Panel power already on\n");
+
+		if (pp & EDP_FORCE_VDD)
+			drm_dbg_kms(&dev_priv->drm,
+				    "VDD already on, disabling first\n");
+
+		pp &= ~EDP_FORCE_VDD;
+
+		intel_de_write(dev_priv, regs.pp_ctrl, pp);
+	}
+
+	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+
+	/* Haswell doesn't have any port selection bits for the panel
+	 * power sequencer any more. */
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		port_sel = PANEL_PORT_SELECT_VLV(port);
+	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+		switch (port) {
+		case PORT_A:
+			port_sel = PANEL_PORT_SELECT_DPA;
+			break;
+		case PORT_C:
+			port_sel = PANEL_PORT_SELECT_DPC;
+			break;
+		case PORT_D:
+			port_sel = PANEL_PORT_SELECT_DPD;
+			break;
+		default:
+			MISSING_CASE(port);
+			break;
+		}
+	}
+
+	pp_on |= port_sel;
+
+	intel_de_write(dev_priv, regs.pp_on, pp_on);
+	intel_de_write(dev_priv, regs.pp_off, pp_off);
+
+	/*
+	 * Compute the divisor for the pp clock, simply match the Bspec formula.
+	 */
+	if (i915_mmio_reg_valid(regs.pp_div)) {
+		intel_de_write(dev_priv, regs.pp_div,
+			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+	} else {
+		u32 pp_ctl;
+
+		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
+		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+	}
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+		    intel_de_read(dev_priv, regs.pp_on),
+		    intel_de_read(dev_priv, regs.pp_off),
+		    i915_mmio_reg_valid(regs.pp_div) ?
+		    intel_de_read(dev_priv, regs.pp_div) :
+		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+}
+
+void intel_pps_encoder_reset(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	intel_wakeref_t wakeref;
+
+	if (!intel_dp_is_edp(intel_dp))
+		return;
+
+	with_intel_pps_lock(intel_dp, wakeref) {
+		/*
+		 * Reinit the power sequencer also on the resume path, in case
+		 * BIOS did something nasty with it.
+		 */
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+			vlv_initial_power_sequencer_setup(intel_dp);
+
+		pps_init_delays(intel_dp);
+		pps_init_registers(intel_dp, false);
+
+		intel_pps_vdd_sanitize(intel_dp);
+	}
+}
+
+void intel_pps_init(struct intel_dp *intel_dp)
+{
+	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
+
+	pps_init_timestamps(intel_dp);
+
+	intel_pps_encoder_reset(intel_dp);
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+	int pps_num;
+	int pps_idx;
+
+	if (HAS_DDI(dev_priv))
+		return;
+	/*
+	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
+	 * everywhere where registers can be write protected.
+	 */
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		pps_num = 2;
+	else
+		pps_num = 1;
+
+	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
+
+		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
+	}
+}
+
+void intel_pps_setup(struct drm_i915_private *i915)
+{
+	if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+		i915->pps_mmio_base = PCH_PPS_BASE;
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->pps_mmio_base = VLV_PPS_BASE;
+	else
+		i915->pps_mmio_base = PPS_BASE;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
new file mode 100644
index 0000000000000000000000000000000000000000..fbbcca782e7b6f8e5fe2e693d2d4db9cfc1b5ab2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_H__
+#define __INTEL_PPS_H__
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
+
+#define with_intel_pps_lock(dp, wf)						\
+	for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
+
+void intel_pps_backlight_on(struct intel_dp *intel_dp);
+void intel_pps_backlight_off(struct intel_dp *intel_dp);
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable);
+
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync);
+void intel_pps_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_off_unlocked(struct intel_dp *intel_dp);
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
+
+void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_on(struct intel_dp *intel_dp);
+void intel_pps_off(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
+bool intel_pps_have_power(struct intel_dp *intel_dp);
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
+
+void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_encoder_reset(struct intel_dp *intel_dp);
+void intel_pps_reset_all(struct drm_i915_private *i915);
+
+void vlv_pps_init(struct intel_encoder *encoder,
+		  const struct intel_crtc_state *crtc_state);
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
+void intel_pps_setup(struct drm_i915_private *i915);
+
+#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b3631b722de327595b8d1ca57adffdd1037b851f..850cb7f5b332c540a0c7af6c048be50d52a3df49 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,9 +28,10 @@
 #include "i915_drv.h"
 #include "intel_atomic.h"
 #include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
 #include "intel_psr.h"
 #include "intel_sprite.h"
-#include "intel_hdmi.h"
 
 /**
  * DOC: Panel Self Refresh (PSR/SRD)
@@ -305,7 +306,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
 		    intel_dp->psr_dpcd[0]);
 
-	if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "PSR support not currently available for this panel\n");
 		return;
@@ -811,6 +812,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 		&crtc_state->hw.adjusted_mode;
 	int psr_setup_time;
 
+	/*
+	 * Current PSR panels dont work reliably with VRR enabled
+	 * So if VRR is enabled, do not enable PSR.
+	 */
+	if (crtc_state->vrr.enable)
+		return;
+
 	if (!CAN_PSR(dev_priv))
 		return;
 
@@ -1185,7 +1193,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
-	u32 val;
+	const struct drm_rect *clip;
+	u32 val, offset;
+	int ret, x, y;
 
 	if (!crtc_state->enable_psr2_sel_fetch)
 		return;
@@ -1196,16 +1206,25 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
 	if (!val || plane->id == PLANE_CURSOR)
 		return;
 
-	val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
+	clip = &plane_state->psr2_sel_fetch_area;
+
+	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+	val |= plane_state->uapi.dst.x1;
 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
 
-	val = plane_state->color_plane[color_plane].y << 16;
-	val |= plane_state->color_plane[color_plane].x;
+	/* TODO: consider auxiliary surfaces */
+	x = plane_state->uapi.src.x1 >> 16;
+	y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+	if (ret)
+		drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
+			      ret);
+	val = y << 16 | x;
 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
 			  val);
 
 	/* Sizes are 0 based */
-	val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
+	val = (drm_rect_height(clip) - 1) << 16;
 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
 }
@@ -1237,9 +1256,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
 	if (clip->y1 == -1)
 		goto exit;
 
+	drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
 	val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
 	val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
-	val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
+	val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
 exit:
 	crtc_state->psr2_man_track_ctl = val;
 }
@@ -1264,8 +1285,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
 				struct intel_crtc *crtc)
 {
 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
 	struct intel_plane_state *new_plane_state, *old_plane_state;
-	struct drm_rect pipe_clip = { .y1 = -1 };
 	struct intel_plane *plane;
 	bool full_update = false;
 	int i, ret;
@@ -1277,13 +1298,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
 	if (ret)
 		return ret;
 
+	/*
+	 * Calculate minimal selective fetch area of each plane and calculate
+	 * the pipe damaged area.
+	 * In the next loop the plane selective fetch area will actually be set
+	 * using whole pipe damaged area.
+	 */
 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
 					     new_plane_state, i) {
-		struct drm_rect temp;
+		struct drm_rect src, damaged_area = { .y1 = -1 };
+		struct drm_mode_rect *damaged_clips;
+		u32 num_clips, j;
 
 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
 			continue;
 
+		if (!new_plane_state->uapi.visible &&
+		    !old_plane_state->uapi.visible)
+			continue;
+
 		/*
 		 * TODO: Not clear how to handle planes with negative position,
 		 * also planes are not updated if they have a negative X
@@ -1295,18 +1328,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
 			break;
 		}
 
-		if (!new_plane_state->uapi.visible)
-			continue;
+		num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
 
 		/*
-		 * For now doing a selective fetch in the whole plane area,
-		 * optimizations will come in the future.
+		 * If visibility or plane moved, mark the whole plane area as
+		 * damaged as it needs to be complete redraw in the new and old
+		 * position.
 		 */
-		temp.y1 = new_plane_state->uapi.dst.y1;
-		temp.y2 = new_plane_state->uapi.dst.y2;
-		clip_area_update(&pipe_clip, &temp);
+		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+		    !drm_rect_equals(&new_plane_state->uapi.dst,
+				     &old_plane_state->uapi.dst)) {
+			if (old_plane_state->uapi.visible) {
+				damaged_area.y1 = old_plane_state->uapi.dst.y1;
+				damaged_area.y2 = old_plane_state->uapi.dst.y2;
+				clip_area_update(&pipe_clip, &damaged_area);
+			}
+
+			if (new_plane_state->uapi.visible) {
+				damaged_area.y1 = new_plane_state->uapi.dst.y1;
+				damaged_area.y2 = new_plane_state->uapi.dst.y2;
+				clip_area_update(&pipe_clip, &damaged_area);
+			}
+			continue;
+		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
+			   (!num_clips &&
+			    new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
+			/*
+			 * If the plane don't have damaged areas but the
+			 * framebuffer changed or alpha changed, mark the whole
+			 * plane area as damaged.
+			 */
+			damaged_area.y1 = new_plane_state->uapi.dst.y1;
+			damaged_area.y2 = new_plane_state->uapi.dst.y2;
+			clip_area_update(&pipe_clip, &damaged_area);
+			continue;
+		}
+
+		drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+		damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
+
+		for (j = 0; j < num_clips; j++) {
+			struct drm_rect clip;
+
+			clip.x1 = damaged_clips[j].x1;
+			clip.y1 = damaged_clips[j].y1;
+			clip.x2 = damaged_clips[j].x2;
+			clip.y2 = damaged_clips[j].y2;
+			if (drm_rect_intersect(&clip, &src))
+				clip_area_update(&damaged_area, &clip);
+		}
+
+		if (damaged_area.y1 == -1)
+			continue;
+
+		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+		clip_area_update(&pipe_clip, &damaged_area);
+	}
+
+	if (full_update)
+		goto skip_sel_fetch_set_loop;
+
+	/* It must be aligned to 4 lines */
+	pipe_clip.y1 -= pipe_clip.y1 % 4;
+	if (pipe_clip.y2 % 4)
+		pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+
+	/*
+	 * Now that we have the pipe damaged area check if it intersect with
+	 * every plane, if it does set the plane selective fetch area.
+	 */
+	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+					     new_plane_state, i) {
+		struct drm_rect *sel_fetch_area, inter;
+
+		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+		    !new_plane_state->uapi.visible)
+			continue;
+
+		inter = pipe_clip;
+		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+			continue;
+
+		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
 	}
 
+skip_sel_fetch_set_loop:
 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 3da2544fa1c0b6dfdadb206fb1ee9cd48762a359..993543334a1ef39c1bb8fc3e6f0456b17b0dd595 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -49,6 +49,8 @@
 #include "intel_psr.h"
 #include "intel_dsi.h"
 #include "intel_sprite.h"
+#include "i9xx_plane.h"
+#include "intel_vrr.h"
 
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
 			     int usecs)
@@ -61,13 +63,15 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
 			    1000 * adjusted_mode->crtc_htotal);
 }
 
-/* FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio. */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+	int vblank_start = mode->crtc_vblank_start;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+	return vblank_start;
+}
 
 /**
  * intel_pipe_update_start() - start update of a set of display registers
@@ -97,9 +101,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	if (new_crtc_state->uapi.async_flip)
 		return;
 
-	vblank_start = adjusted_mode->crtc_vblank_start;
-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-		vblank_start = DIV_ROUND_UP(vblank_start, 2);
+	if (new_crtc_state->vrr.enable)
+		vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+	else
+		vblank_start = intel_mode_vblank_start(adjusted_mode);
 
 	/* FIXME needs to be calibrated sensibly */
 	min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@@ -187,6 +192,36 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	local_irq_disable();
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+	u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+	unsigned int h;
+
+	h = ilog2(delta >> 9);
+	if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+		h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+	crtc->debug.vbl.times[h]++;
+
+	crtc->debug.vbl.sum += delta;
+	if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+		crtc->debug.vbl.min = delta;
+	if (delta > crtc->debug.vbl.max)
+		crtc->debug.vbl.max = delta;
+
+	if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+		drm_dbg_kms(crtc->base.dev,
+			    "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+			    pipe_name(crtc->pipe),
+			    div_u64(delta, 1000),
+			    VBLANK_EVASION_TIME_US);
+		crtc->debug.vbl.over++;
+	}
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
 /**
  * intel_pipe_update_end() - end update of a set of display registers
  * @new_crtc_state: the new crtc state
@@ -235,6 +270,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 
 	local_irq_enable();
 
+	/* Send VRR Push to terminate Vblank */
+	intel_vrr_send_push(new_crtc_state);
+
 	if (intel_vgpu_active(dev_priv))
 		return;
 
@@ -249,15 +287,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 			crtc->debug.min_vbl, crtc->debug.max_vbl,
 			crtc->debug.scanline_start, scanline_end);
 	}
-#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
-	else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
-		 VBLANK_EVASION_TIME_US)
-		drm_warn(&dev_priv->drm,
-			 "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
-			 pipe_name(pipe),
-			 ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
-			 VBLANK_EVASION_TIME_US);
-#endif
+
+	dbg_vblank_evade(crtc, end_vbl_time);
 }
 
 int intel_plane_check_stride(const struct intel_plane_state *plane_state)
@@ -710,7 +741,8 @@ icl_program_input_csc(struct intel_plane *plane,
 static void
 skl_plane_async_flip(struct intel_plane *plane,
 		     const struct intel_crtc_state *crtc_state,
-		     const struct intel_plane_state *plane_state)
+		     const struct intel_plane_state *plane_state,
+		     bool async_flip)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	unsigned long irqflags;
@@ -721,6 +753,9 @@ skl_plane_async_flip(struct intel_plane *plane,
 
 	plane_ctl |= skl_plane_ctl_crtc(crtc_state);
 
+	if (async_flip)
+		plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
@@ -806,6 +841,10 @@ skl_program_plane(struct intel_plane *plane,
 	if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
 		icl_program_input_csc(plane, crtc_state, plane_state);
 
+	if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+		intel_uncore_write64_fw(&dev_priv->uncore,
+					PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
 	skl_write_plane_wm(plane, crtc_state);
 
 	intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
@@ -897,6 +936,28 @@ skl_plane_get_hw_state(struct intel_plane *plane,
 	return ret;
 }
 
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+
+	spin_lock_irq(&i915->irq_lock);
+	bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+	spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+
+	spin_lock_irq(&i915->irq_lock);
+	bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+	spin_unlock_irq(&i915->irq_lock);
+}
+
 static void i9xx_plane_linear_gamma(u16 gamma[8])
 {
 	/* The points are not evenly spaced. */
@@ -1790,7 +1851,26 @@ g4x_sprite_max_stride(struct intel_plane *plane,
 		      u32 pixel_format, u64 modifier,
 		      unsigned int rotation)
 {
-	return 16384;
+	const struct drm_format_info *info = drm_format_info(pixel_format);
+	int cpp = info->cpp[0];
+
+	/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+	if (modifier == I915_FORMAT_MOD_X_TILED)
+		return min(4096 * cpp, 16 * 1024);
+	else
+		return 16 * 1024;
+}
+
+static unsigned int
+hsw_sprite_max_stride(struct intel_plane *plane,
+		      u32 pixel_format, u64 modifier,
+		      unsigned int rotation)
+{
+	const struct drm_format_info *info = drm_format_info(pixel_format);
+	int cpp = info->cpp[0];
+
+	/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+	return min(8192 * cpp, 16 * 1024);
 }
 
 static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
@@ -2305,7 +2385,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 	     fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 	     fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
 	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
-	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
+	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "Y/Yf tiling not supported in IF-ID mode\n");
 		return -EINVAL;
@@ -2795,6 +2876,7 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
 static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
 	I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
 	I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+	I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
 	I915_FORMAT_MOD_Y_TILED,
 	I915_FORMAT_MOD_X_TILED,
 	DRM_FORMAT_MOD_LINEAR,
@@ -2803,6 +2885,7 @@ static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
 
 static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
 	I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+	I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
 	I915_FORMAT_MOD_Y_TILED,
 	I915_FORMAT_MOD_X_TILED,
 	DRM_FORMAT_MOD_LINEAR,
@@ -2993,6 +3076,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
 	case I915_FORMAT_MOD_X_TILED:
 	case I915_FORMAT_MOD_Y_TILED:
 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
 		break;
 	default:
 		return false;
@@ -3229,7 +3313,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
 	plane->get_hw_state = skl_plane_get_hw_state;
 	plane->check_plane = skl_plane_check;
 	plane->min_cdclk = skl_plane_min_cdclk;
-	plane->async_flip = skl_plane_async_flip;
+
+	if (plane_id == PLANE_PRIMARY) {
+		plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
+		plane->async_flip = skl_plane_async_flip;
+		plane->enable_flip_done = skl_plane_enable_flip_done;
+		plane->disable_flip_done = skl_plane_disable_flip_done;
+	}
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		formats = icl_get_plane_formats(dev_priv, pipe,
@@ -3337,11 +3427,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 		return plane;
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		plane->max_stride = i9xx_plane_max_stride;
 		plane->update_plane = vlv_update_plane;
 		plane->disable_plane = vlv_disable_plane;
 		plane->get_hw_state = vlv_plane_get_hw_state;
 		plane->check_plane = vlv_sprite_check;
+		plane->max_stride = i965_plane_max_stride;
 		plane->min_cdclk = vlv_plane_min_cdclk;
 
 		if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
@@ -3355,16 +3445,18 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
 		plane_funcs = &vlv_sprite_funcs;
 	} else if (INTEL_GEN(dev_priv) >= 7) {
-		plane->max_stride = g4x_sprite_max_stride;
 		plane->update_plane = ivb_update_plane;
 		plane->disable_plane = ivb_disable_plane;
 		plane->get_hw_state = ivb_plane_get_hw_state;
 		plane->check_plane = g4x_sprite_check;
 
-		if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+			plane->max_stride = hsw_sprite_max_stride;
 			plane->min_cdclk = hsw_plane_min_cdclk;
-		else
+		} else {
+			plane->max_stride = g4x_sprite_max_stride;
 			plane->min_cdclk = ivb_sprite_min_cdclk;
+		}
 
 		formats = snb_plane_formats;
 		num_formats = ARRAY_SIZE(snb_plane_formats);
@@ -3372,11 +3464,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
 		plane_funcs = &snb_sprite_funcs;
 	} else {
-		plane->max_stride = g4x_sprite_max_stride;
 		plane->update_plane = g4x_update_plane;
 		plane->disable_plane = g4x_disable_plane;
 		plane->get_hw_state = g4x_plane_get_hw_state;
 		plane->check_plane = g4x_sprite_check;
+		plane->max_stride = g4x_sprite_max_stride;
 		plane->min_cdclk = g4x_sprite_min_cdclk;
 
 		modifiers = i9xx_plane_format_modifiers;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index cd2104ba1ca196ba31377e83851188f320972d1d..76126dd8d584f74382d1913f537bb53483f8ed25 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,6 +17,16 @@ struct drm_i915_private;
 struct intel_crtc_state;
 struct intel_plane_state;
 
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
 			     int usecs);
 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 8b6f16f9d0d1781a884cfa2eba08f07301fd463f..2cefc13535a0f7aa3c2f0c3fc0d81c1cfdcb7488 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -232,7 +232,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
 		mask |= BIT(TC_PORT_LEGACY);
 
 	/* The sink can be connected only in a single mode. */
-	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+	if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
 		tc_port_fixup_legacy_flag(dig_port, mask);
 
 	return mask;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 49b4b5fca94111bb0877573a359433c9afc8887a..187ec573de59d1e37c723c74e6bb547317bb871d 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -319,6 +319,8 @@ enum vbt_gmbus_ddi {
 	ICL_DDC_BUS_DDI_A = 0x1,
 	ICL_DDC_BUS_DDI_B,
 	TGL_DDC_BUS_DDI_C,
+	RKL_DDC_BUS_DDI_D = 0x3,
+	RKL_DDC_BUS_DDI_E,
 	ICL_DDC_BUS_PORT_1 = 0x4,
 	ICL_DDC_BUS_PORT_2,
 	ICL_DDC_BUS_PORT_3,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index e2716a67b2816d94f3b32ec2c02456ec5c6f9402..f58cc5700784e2dd46bcbe73fa30ef49909b8bf3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -454,8 +454,6 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
 	else if (vdsc_cfg->bits_per_component == 12)
 		vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
 
-	/* RC_MODEL_SIZE is a constant across all configurations */
-	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
 	/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
 	vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
 		(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
@@ -741,7 +739,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
 
 	/* Populate PICTURE_PARAMETER_SET_9 registers */
 	pps_val = 0;
-	pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+	pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
 		DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
 	drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
new file mode 100644
index 0000000000000000000000000000000000000000..a9c2b2fd92523b0faf5d7b6912773751d9092105
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_vrr.h"
+
+bool intel_vrr_is_capable(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp;
+	const struct drm_display_info *info = &connector->display_info;
+	struct drm_i915_private *i915 = to_i915(connector->dev);
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+	    connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+		return false;
+
+	intel_dp = intel_attached_dp(to_intel_connector(connector));
+	/*
+	 * DP Sink is capable of VRR video timings if
+	 * Ignore MSA bit is set in DPCD.
+	 * EDID monitor range also should be atleast 10 for reasonable
+	 * Adaptive Sync or Variable Refresh Rate end user experience.
+	 */
+	return HAS_VRR(i915) &&
+		drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
+		info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+}
+
+void
+intel_vrr_check_modeset(struct intel_atomic_state *state)
+{
+	int i;
+	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+	struct intel_crtc *crtc;
+
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		if (new_crtc_state->uapi.vrr_enabled !=
+		    old_crtc_state->uapi.vrr_enabled)
+			new_crtc_state->uapi.mode_changed = true;
+	}
+}
+
+/*
+ * Without VRR registers get latched at:
+ *  vblank_start
+ *
+ * With VRR the earliest registers can get latched is:
+ *  intel_vrr_vmin_vblank_start(), which if we want to maintain
+ *  the correct min vtotal is >=vblank_start+1
+ *
+ * The latest point registers can get latched is the vmax decision boundary:
+ *  intel_vrr_vmax_vblank_start()
+ *
+ * Between those two points the vblank exit starts (and hence registers get
+ * latched) ASAP after a push is sent.
+ *
+ * framestart_delay is programmable 0-3.
+ */
+static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+	/* The hw imposes the extra scanline before frame start */
+	return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+}
+
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+	/* Min vblank actually determined by flipline that is always >=vmin+1 */
+	return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+	return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+void
+intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+			 struct drm_connector_state *conn_state)
+{
+	struct intel_connector *connector =
+		to_intel_connector(conn_state->connector);
+	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+	const struct drm_display_info *info = &connector->base.display_info;
+	int vmin, vmax;
+
+	if (!intel_vrr_is_capable(&connector->base))
+		return;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return;
+
+	if (!crtc_state->uapi.vrr_enabled)
+		return;
+
+	vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
+			    adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
+	vmax = adjusted_mode->crtc_clock * 1000 /
+		(adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+
+	vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
+	vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+	if (vmin >= vmax)
+		return;
+
+	/*
+	 * flipline determines the min vblank length the hardware will
+	 * generate, and flipline>=vmin+1, hence we reduce vmin by one
+	 * to make sure we can get the actual min vblank length.
+	 */
+	crtc_state->vrr.vmin = vmin - 1;
+	crtc_state->vrr.vmax = vmax;
+	crtc_state->vrr.enable = true;
+
+	crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+
+	/*
+	 * FIXME: s/4/framestart_delay+1/ to get consistent
+	 * earliest/latest points for register latching regardless
+	 * of the framestart_delay used?
+	 *
+	 * FIXME: this really needs the extra scanline to provide consistent
+	 * behaviour for all framestart_delay values. Otherwise with
+	 * framestart_delay==3 we will end up extending the min vblank by
+	 * one extra line.
+	 */
+	crtc_state->vrr.pipeline_full =
+		min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+
+	crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+void intel_vrr_enable(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	u32 trans_vrr_ctl;
+
+	if (!crtc_state->vrr.enable)
+		return;
+
+	trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+		VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+		VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+		VRR_CTL_PIPELINE_FULL_OVERRIDE;
+
+	intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
+	intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
+	intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+	intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
+	intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+}
+
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+	if (!crtc_state->vrr.enable)
+		return;
+
+	intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
+		       TRANS_PUSH_EN | TRANS_PUSH_SEND);
+}
+
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+	if (!old_crtc_state->vrr.enable)
+		return;
+
+	intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+	intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+}
+
+void intel_vrr_get_config(struct intel_crtc *crtc,
+			  struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	u32 trans_vrr_ctl;
+
+	trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+	crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+	if (!crtc_state->vrr.enable)
+		return;
+
+	if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+		crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+	if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+		crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
+	crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+	crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+
+	crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
new file mode 100644
index 0000000000000000000000000000000000000000..fac01bf4ab5008f81ade1922d43d378264e3237d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VRR_H__
+#define __INTEL_VRR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_crtc;
+
+bool intel_vrr_is_capable(struct drm_connector *connector);
+void intel_vrr_check_modeset(struct intel_atomic_state *state);
+void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+			      struct drm_connector_state *conn_state);
+void intel_vrr_enable(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_get_config(struct intel_crtc *crtc,
+			  struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
new file mode 100644
index 0000000000000000000000000000000000000000..9e508e7d4629feea339df8d221eee04dcd439b42
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "dma_resv_utils.h"
+
+void dma_resv_prune(struct dma_resv *resv)
+{
+	if (dma_resv_trylock(resv)) {
+		if (dma_resv_test_signaled_rcu(resv, true))
+			dma_resv_add_excl_fence(resv, NULL);
+		dma_resv_unlock(resv);
+	}
+}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..b9d8fb5f8367d3091b9a636dae11a4b248117433
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DMA_RESV_UTILS_H
+#define DMA_RESV_UTILS_H
+
+struct dma_resv;
+
+void dma_resv_prune(struct dma_resv *resv);
+
+#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4fd38101bb5655fd72176ae9d6831d924165578e..4d2f40cf237bd455fa90f6d28375e8deb30e8b64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -72,6 +72,8 @@
 #include "gt/intel_context_param.h"
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h" /* virtual_engine */
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_ring.h"
 
 #include "i915_gem_context.h"
@@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 	return e;
 }
 
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+void i915_gem_context_release(struct kref *ref)
 {
-	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
 
-	spin_lock(&ctx->i915->gem.contexts.lock);
-	list_del(&ctx->link);
-	spin_unlock(&ctx->i915->gem.contexts.lock);
+	trace_i915_context_free(ctx);
+	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
 	mutex_destroy(&ctx->engines_mutex);
 	mutex_destroy(&ctx->lut_mutex);
@@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	kfree_rcu(ctx, rcu);
 }
 
-static void contexts_free_all(struct llist_node *list)
-{
-	struct i915_gem_context *ctx, *cn;
-
-	llist_for_each_entry_safe(ctx, cn, list, free_link)
-		i915_gem_context_free(ctx);
-}
-
-static void contexts_flush_free(struct i915_gem_contexts *gc)
-{
-	contexts_free_all(llist_del_all(&gc->free_list));
-}
-
-static void contexts_free_worker(struct work_struct *work)
-{
-	struct i915_gem_contexts *gc =
-		container_of(work, typeof(*gc), free_work);
-
-	contexts_flush_free(gc);
-}
-
-void i915_gem_context_release(struct kref *ref)
-{
-	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
-	struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
-
-	trace_i915_context_free(ctx);
-	if (llist_add(&ctx->free_link, &gc->free_list))
-		schedule_work(&gc->free_work);
-}
-
 static inline struct i915_gem_engines *
 __context_engines_static(const struct i915_gem_context *ctx)
 {
@@ -438,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
 	}
 
 	if (i915_request_is_active(rq)) {
-		if (!i915_request_completed(rq))
+		if (!__i915_request_is_complete(rq))
 			*active = locked;
 		ret = true;
 	}
@@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
 	struct intel_engine_cs *engine = NULL;
 	struct i915_request *rq;
 
+	if (intel_context_has_inflight(ce))
+		return intel_context_inflight(ce);
+
 	if (!ce->timeline)
 		return NULL;
 
@@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx)
 	 */
 	lut_close(ctx);
 
+	spin_lock(&ctx->i915->gem.contexts.lock);
+	list_del(&ctx->link);
+	spin_unlock(&ctx->i915->gem.contexts.lock);
+
 	mutex_unlock(&ctx->mutex);
 
 	/*
@@ -740,7 +717,8 @@ __create_context(struct drm_i915_private *i915)
 }
 
 static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
+__context_engines_await(const struct i915_gem_context *ctx,
+			bool *user_engines)
 {
 	struct i915_gem_engines *engines;
 
@@ -749,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx)
 		engines = rcu_dereference(ctx->engines);
 		GEM_BUG_ON(!engines);
 
+		if (user_engines)
+			*user_engines = i915_gem_context_user_engines(ctx);
+
+		/* successful await => strong mb */
 		if (unlikely(!i915_sw_fence_await(&engines->fence)))
 			continue;
 
@@ -772,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx,
 	struct intel_context *ce;
 	int err = 0;
 
-	e = __context_engines_await(ctx);
+	e = __context_engines_await(ctx, NULL);
 	for_each_gem_engine(ce, e, it) {
 		err = fn(ce, data);
 		if (err)
@@ -849,9 +831,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 	    !HAS_EXECLISTS(i915))
 		return ERR_PTR(-EINVAL);
 
-	/* Reap the stale contexts */
-	contexts_flush_free(&i915->gem.contexts);
-
 	ctx = __create_context(i915);
 	if (IS_ERR(ctx))
 		return ctx;
@@ -896,23 +875,11 @@ static void init_contexts(struct i915_gem_contexts *gc)
 {
 	spin_lock_init(&gc->lock);
 	INIT_LIST_HEAD(&gc->list);
-
-	INIT_WORK(&gc->free_work, contexts_free_worker);
-	init_llist_head(&gc->free_list);
 }
 
 void i915_gem_init__contexts(struct drm_i915_private *i915)
 {
 	init_contexts(&i915->gem.contexts);
-	drm_dbg(&i915->drm, "%s context support initialized\n",
-		DRIVER_CAPS(i915)->has_logical_contexts ?
-		"logical" : "fake");
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
-	flush_work(&i915->gem.contexts.free_work);
-	rcu_barrier(); /* and flush the left over RCU frees */
 }
 
 static int gem_context_register(struct i915_gem_context *ctx,
@@ -988,7 +955,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 void i915_gem_context_close(struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
-	struct drm_i915_private *i915 = file_priv->dev_priv;
 	struct i915_address_space *vm;
 	struct i915_gem_context *ctx;
 	unsigned long idx;
@@ -1000,8 +966,6 @@ void i915_gem_context_close(struct drm_file *file)
 	xa_for_each(&file_priv->vm_xa, idx, vm)
 		i915_vm_put(vm);
 	xa_destroy(&file_priv->vm_xa);
-
-	contexts_flush_free(&i915->gem.contexts);
 }
 
 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -1116,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 		return err;
 	}
 
-	e = __context_engines_await(ctx);
+	e = __context_engines_await(ctx, NULL);
 	if (!e) {
 		i915_active_release(&cb->base);
 		return -ENOENT;
@@ -1879,27 +1843,6 @@ set_engines(struct i915_gem_context *ctx,
 	return 0;
 }
 
-static struct i915_gem_engines *
-__copy_engines(struct i915_gem_engines *e)
-{
-	struct i915_gem_engines *copy;
-	unsigned int n;
-
-	copy = alloc_engines(e->num_engines);
-	if (!copy)
-		return ERR_PTR(-ENOMEM);
-
-	for (n = 0; n < e->num_engines; n++) {
-		if (e->engines[n])
-			copy->engines[n] = intel_context_get(e->engines[n]);
-		else
-			copy->engines[n] = NULL;
-	}
-	copy->num_engines = n;
-
-	return copy;
-}
-
 static int
 get_engines(struct i915_gem_context *ctx,
 	    struct drm_i915_gem_context_param *args)
@@ -1907,19 +1850,17 @@ get_engines(struct i915_gem_context *ctx,
 	struct i915_context_param_engines __user *user;
 	struct i915_gem_engines *e;
 	size_t n, count, size;
+	bool user_engines;
 	int err = 0;
 
-	err = mutex_lock_interruptible(&ctx->engines_mutex);
-	if (err)
-		return err;
+	e = __context_engines_await(ctx, &user_engines);
+	if (!e)
+		return -ENOENT;
 
-	e = NULL;
-	if (i915_gem_context_user_engines(ctx))
-		e = __copy_engines(i915_gem_context_engines(ctx));
-	mutex_unlock(&ctx->engines_mutex);
-	if (IS_ERR_OR_NULL(e)) {
+	if (!user_engines) {
+		i915_sw_fence_complete(&e->fence);
 		args->size = 0;
-		return PTR_ERR_OR_ZERO(e);
+		return 0;
 	}
 
 	count = e->num_engines;
@@ -1970,7 +1911,7 @@ get_engines(struct i915_gem_context *ctx,
 	args->size = size;
 
 err_free:
-	free_engines(e);
+	i915_sw_fence_complete(&e->fence);
 	return err;
 }
 
@@ -2136,11 +2077,14 @@ static int copy_ring_size(struct intel_context *dst,
 static int clone_engines(struct i915_gem_context *dst,
 			 struct i915_gem_context *src)
 {
-	struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
-	struct i915_gem_engines *clone;
+	struct i915_gem_engines *clone, *e;
 	bool user_engines;
 	unsigned long n;
 
+	e = __context_engines_await(src, &user_engines);
+	if (!e)
+		return -ENOENT;
+
 	clone = alloc_engines(e->num_engines);
 	if (!clone)
 		goto err_unlock;
@@ -2182,9 +2126,7 @@ static int clone_engines(struct i915_gem_context *dst,
 		}
 	}
 	clone->num_engines = n;
-
-	user_engines = i915_gem_context_user_engines(src);
-	i915_gem_context_unlock_engines(src);
+	i915_sw_fence_complete(&e->fence);
 
 	/* Serialised by constructor */
 	engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
@@ -2195,7 +2137,7 @@ static int clone_engines(struct i915_gem_context *dst,
 	return 0;
 
 err_unlock:
-	i915_gem_context_unlock_engines(src);
+	i915_sw_fence_complete(&e->fence);
 	return -ENOMEM;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index a133f92bbedb7a51a958138030b2713a2642549f..b5c908f3f4f22a1fba5806b3a5e94f82820dd876 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
 
 /* i915_gem_context.c */
 void i915_gem_init__contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index ae14ca24a11f2e425a6da93adc067f7163e70453..1449f54924e03086be6c9dfb2d03c548c5506fab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -108,7 +108,6 @@ struct i915_gem_context {
 
 	/** link: place with &drm_i915_private.context_list */
 	struct list_head link;
-	struct llist_node free_link;
 
 	/**
 	 * @ref: reference count
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
new file mode 100644
index 0000000000000000000000000000000000000000..45d60e3d98e3bb934af08d8f4917145771abe183
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+
+static int
+i915_gem_create(struct drm_file *file,
+		struct intel_memory_region *mr,
+		u64 *size_p,
+		u32 *handle_p)
+{
+	struct drm_i915_gem_object *obj;
+	u32 handle;
+	u64 size;
+	int ret;
+
+	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+	size = round_up(*size_p, mr->min_page_size);
+	if (size == 0)
+		return -EINVAL;
+
+	/* For most of the ABI (e.g. mmap) we think in system pages */
+	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+	/* Allocate the new object */
+	obj = i915_gem_object_create_region(mr, size, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	GEM_BUG_ON(size != obj->base.size);
+
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
+	/* drop reference from allocate - handle holds it now */
+	i915_gem_object_put(obj);
+	if (ret)
+		return ret;
+
+	*handle_p = handle;
+	*size_p = size;
+	return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+		     struct drm_device *dev,
+		     struct drm_mode_create_dumb *args)
+{
+	enum intel_memory_type mem_type;
+	int cpp = DIV_ROUND_UP(args->bpp, 8);
+	u32 format;
+
+	switch (cpp) {
+	case 1:
+		format = DRM_FORMAT_C8;
+		break;
+	case 2:
+		format = DRM_FORMAT_RGB565;
+		break;
+	case 4:
+		format = DRM_FORMAT_XRGB8888;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* have to work out size/pitch and return them */
+	args->pitch = ALIGN(args->width * cpp, 64);
+
+	/* align stride to page size so that we can remap */
+	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+						    DRM_FORMAT_MOD_LINEAR))
+		args->pitch = ALIGN(args->pitch, 4096);
+
+	if (args->pitch < args->width)
+		return -EINVAL;
+
+	args->size = mul_u32_u32(args->pitch, args->height);
+
+	mem_type = INTEL_MEMORY_SYSTEM;
+	if (HAS_LMEM(to_i915(dev)))
+		mem_type = INTEL_MEMORY_LOCAL;
+
+	return i915_gem_create(file,
+			       intel_memory_region_by_type(to_i915(dev),
+							   mem_type),
+			       &args->size, &args->handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_i915_gem_create *args = data;
+
+	i915_gem_flush_free_objects(i915);
+
+	return i915_gem_create(file,
+			       intel_memory_region_by_type(i915,
+							   INTEL_MEMORY_SYSTEM),
+			       &args->size, &args->handle);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 3d435bfff7649aeaf279164b8b16ce3fe6391c54..36f54cedaaebb5dfcddeb7ba2623bcfd7487ac08 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -5,6 +5,7 @@
  */
 
 #include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
 
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
@@ -15,13 +16,58 @@
 #include "i915_gem_lmem.h"
 #include "i915_gem_mman.h"
 
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	return !(obj->cache_level == I915_CACHE_NONE ||
+		 obj->cache_level == I915_CACHE_WT);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+	struct i915_vma *vma;
+
+	assert_object_held(obj);
+
+	if (!(obj->write_domain & flush_domains))
+		return;
+
+	switch (obj->write_domain) {
+	case I915_GEM_DOMAIN_GTT:
+		spin_lock(&obj->vma.lock);
+		for_each_ggtt_vma(vma, obj) {
+			if (i915_vma_unset_ggtt_write(vma))
+				intel_gt_flush_ggtt_writes(vma->vm->gt);
+		}
+		spin_unlock(&obj->vma.lock);
+
+		i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+		break;
+
+	case I915_GEM_DOMAIN_WC:
+		wmb();
+		break;
+
+	case I915_GEM_DOMAIN_CPU:
+		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+		break;
+
+	case I915_GEM_DOMAIN_RENDER:
+		if (gpu_write_needs_clflush(obj))
+			obj->cache_dirty = true;
+		break;
+	}
+
+	obj->write_domain = 0;
+}
+
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 {
 	/*
 	 * We manually flush the CPU domain so that we can override and
 	 * force the flush for the display, and perform it asyncrhonously.
 	 */
-	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 	if (obj->cache_dirty)
 		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
 	obj->write_domain = 0;
@@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 	if (ret)
 		return ret;
 
-	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
 
 	/* Serialise direct access to this object with the barriers for
 	 * coherent writes from the GPU, by effectively invalidating the
@@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (ret)
 		return ret;
 
-	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
 
 	/* Serialise direct access to this object with the barriers for
 	 * coherent writes from the GPU, by effectively invalidating the
@@ -370,6 +416,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	}
 
 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+	i915_vma_mark_scanout(vma);
 
 	i915_gem_object_flush_if_display_locked(obj);
 
@@ -409,7 +456,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	if (ret)
 		return ret;
 
-	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
 	/* Flush the CPU cache if it's still invalid. */
 	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -574,7 +621,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 			goto out;
 	}
 
-	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
 	/* If we're not in the cpu read domain, set ourself into the gtt
 	 * read domain and manually flush cachelines (if required). This
@@ -625,7 +672,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 			goto out;
 	}
 
-	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
 	/* If we're not in the cpu write domain, set ourself into the
 	 * gtt write domain and manually flush cachelines (as required).
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bd3046e5a934801c399a8e77055b17fa5a937afe..d70ca36f74f69d7e8c44eec50c95d1b279e4b04c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,6 +15,7 @@
 
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_buffer_pool.h"
 #include "gt/intel_gt_pm.h"
@@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb,
 	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
 	struct eb_vma *ev = &eb->vma[i];
 
-	GEM_BUG_ON(i915_vma_is_closed(vma));
-
 	ev->vma = vma;
 	ev->exec = entry;
 	ev->flags = entry->flags;
@@ -1277,7 +1276,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	int err;
 
 	if (!pool) {
-		pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+		pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
+						cache->has_llc ?
+						I915_MAP_WB :
+						I915_MAP_WC);
 		if (IS_ERR(pool))
 			return PTR_ERR(pool);
 	}
@@ -1287,10 +1289,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (err)
 		goto err_pool;
 
-	cmd = i915_gem_object_pin_map(pool->obj,
-				      cache->has_llc ?
-				      I915_MAP_FORCE_WB :
-				      I915_MAP_FORCE_WC);
+	cmd = i915_gem_object_pin_map(pool->obj, pool->type);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
 		goto err_pool;
@@ -2459,7 +2458,8 @@ static int eb_parse(struct i915_execbuffer *eb)
 		return -EINVAL;
 
 	if (!pool) {
-		pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+		pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+						I915_MAP_WB);
 		if (IS_ERR(pool))
 			return PTR_ERR(pool);
 		eb->batch_pool = pool;
@@ -2535,6 +2535,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 {
 	int err;
 
+	if (intel_context_nopreempt(eb->context))
+		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
+
 	err = eb_move_to_gpu(eb);
 	if (err)
 		return err;
@@ -2575,15 +2578,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 			return err;
 	}
 
-	if (intel_context_nopreempt(eb->context))
-		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
 	return 0;
 }
 
 static int num_vcs_engines(const struct drm_i915_private *i915)
 {
-	return hweight64(VDBOX_MASK(&i915->gt));
+	return hweight_long(VDBOX_MASK(&i915->gt));
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 932ee21e6609c61935d94d1e801cf0ef1b92d8f4..194f3534271010e01d974c900eb5a6fa2014a997 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -31,18 +31,13 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
 					     size, flags);
 }
 
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
-			      resource_size_t size,
-			      unsigned int flags)
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+				struct drm_i915_gem_object *obj,
+				resource_size_t size,
+				unsigned int flags)
 {
 	static struct lock_class_key lock_class;
 	struct drm_i915_private *i915 = mem->i915;
-	struct drm_i915_gem_object *obj;
-
-	obj = i915_gem_object_alloc();
-	if (!obj)
-		return ERR_PTR(-ENOMEM);
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
 	i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
@@ -53,5 +48,5 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
 
 	i915_gem_object_init_memory_region(obj, mem, flags);
 
-	return obj;
+	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3850d7f6153e9d8cb8912a8c793d5..036d53c01de9543551ccb4039a729f8b8376ceb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -21,9 +21,9 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
 			    resource_size_t size,
 			    unsigned int flags);
 
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
-			      resource_size_t size,
-			      unsigned int flags);
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+				struct drm_i915_gem_object *obj,
+				resource_size_t size,
+				unsigned int flags);
 
 #endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 00d24000b5e8cddfceee8633f21840df0d167741..70f798405f7f21671c3561856c8d6119fd097124 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,13 +25,13 @@
 #include <linux/sched/mm.h>
 
 #include "display/intel_frontbuffer.h"
-#include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 #include "i915_globals.h"
+#include "i915_memcpy.h"
 #include "i915_trace.h"
 
 static struct i915_global_object {
@@ -313,52 +313,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
 		queue_work(i915->wq, &i915->mm.free_work);
 }
 
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
-	return !(obj->cache_level == I915_CACHE_NONE ||
-		 obj->cache_level == I915_CACHE_WT);
-}
-
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
-				   unsigned int flush_domains)
-{
-	struct i915_vma *vma;
-
-	assert_object_held(obj);
-
-	if (!(obj->write_domain & flush_domains))
-		return;
-
-	switch (obj->write_domain) {
-	case I915_GEM_DOMAIN_GTT:
-		spin_lock(&obj->vma.lock);
-		for_each_ggtt_vma(vma, obj) {
-			if (i915_vma_unset_ggtt_write(vma))
-				intel_gt_flush_ggtt_writes(vma->vm->gt);
-		}
-		spin_unlock(&obj->vma.lock);
-
-		i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
-		break;
-
-	case I915_GEM_DOMAIN_WC:
-		wmb();
-		break;
-
-	case I915_GEM_DOMAIN_CPU:
-		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
-		break;
-
-	case I915_GEM_DOMAIN_RENDER:
-		if (gpu_write_needs_clflush(obj))
-			obj->cache_dirty = true;
-		break;
-	}
-
-	obj->write_domain = 0;
-}
-
 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 					 enum fb_op_origin origin)
 {
@@ -383,6 +337,70 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 	}
 }
 
+static void
+i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+	void *src_map;
+	void *src_ptr;
+
+	src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+
+	src_ptr = src_map + offset_in_page(offset);
+	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+		drm_clflush_virt_range(src_ptr, size);
+	memcpy(dst, src_ptr, size);
+
+	kunmap_atomic(src_map);
+}
+
+static void
+i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+	void __iomem *src_map;
+	void __iomem *src_ptr;
+	dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
+
+	src_map = io_mapping_map_wc(&obj->mm.region->iomap,
+				    dma - obj->mm.region->region.start,
+				    PAGE_SIZE);
+
+	src_ptr = src_map + offset_in_page(offset);
+	if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
+		memcpy_fromio(dst, src_ptr, size);
+
+	io_mapping_unmap(src_map);
+}
+
+/**
+ * i915_gem_object_read_from_page - read data from the page of a GEM object
+ * @obj: GEM object to read from
+ * @offset: offset within the object
+ * @dst: buffer to store the read data
+ * @size: size to read
+ *
+ * Reads data from @obj at the specified offset. The requested region to read
+ * from can't cross a page boundary. The caller must ensure that @obj pages
+ * are pinned and that @obj is synced wrt. any related writes.
+ *
+ * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * unsupported.
+ */
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+	GEM_BUG_ON(offset >= obj->base.size);
+	GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+	if (i915_gem_object_has_struct_page(obj))
+		i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
+	else if (i915_gem_object_has_iomem(obj))
+		i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
+	else
+		return -ENODEV;
+
+	return 0;
+}
+
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 4556afe18f16d41779e043d904ca02408649b013..d0ae834d787afdcb5a9ff657517a512379922f91 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -187,6 +187,24 @@ i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
 	obj->flags |= I915_BO_ALLOC_VOLATILE;
 }
 
+static inline bool
+i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
 static inline bool
 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 			 unsigned long flags)
@@ -200,6 +218,12 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
 }
 
+static inline bool
+i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+}
+
 static inline bool
 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 {
@@ -384,14 +408,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 
-enum i915_map_type {
-	I915_MAP_WB = 0,
-	I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
-	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
-	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
 /**
  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
  * @obj: the object to map into kernel address space
@@ -435,10 +451,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 
 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
 
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
-				   unsigned int flush_domains);
-
 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 				 unsigned int *needs_clflush);
 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
@@ -511,6 +523,9 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
 		obj->cache_dirty = true;
 }
 
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+				  const struct i915_sched_attr *attr);
+
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 unsigned int flags,
 			 long timeout);
@@ -539,4 +554,8 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
 }
 
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index aee7ad3cc3c68d4d3aac981a6903a81bddea4c5e..d6dac21fce0bc8b8d05dfe7f53b51d2fb914c4c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,6 +6,7 @@
 #include "i915_drv.h"
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_buffer_pool.h"
 #include "gt/intel_ring.h"
@@ -34,7 +35,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
 	count = div_u64(round_up(vma->size, block_size), block_size);
 	size = (1 + 8 * count) * sizeof(u32);
 	size = round_up(size, PAGE_SIZE);
-	pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+	pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
 	if (IS_ERR(pool)) {
 		err = PTR_ERR(pool);
 		goto out_pm;
@@ -54,7 +55,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
 	if (unlikely(err))
 		goto out_put;
 
-	cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+	cmd = i915_gem_object_pin_map(pool->obj, pool->type);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
 		goto out_unpin;
@@ -256,7 +257,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
 	count = div_u64(round_up(dst->size, block_size), block_size);
 	size = (1 + 11 * count) * sizeof(u32);
 	size = round_up(size, PAGE_SIZE);
-	pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+	pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
 	if (IS_ERR(pool)) {
 		err = PTR_ERR(pool);
 		goto out_pm;
@@ -276,7 +277,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
 	if (unlikely(err))
 		goto out_put;
 
-	cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+	cmd = i915_gem_object_pin_map(pool->obj, pool->type);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
 		goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e2d9b7e1e152da138d48f71fe8596877d41cfc36..0438e00d4ca7d91a4b5100085453d18d6f21f63d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -67,6 +67,14 @@ struct drm_i915_gem_object_ops {
 	const char *name; /* friendly name for debug, e.g. lockdep classes */
 };
 
+enum i915_map_type {
+	I915_MAP_WB = 0,
+	I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
 enum i915_mmap_type {
 	I915_MMAP_TYPE_GTT = 0,
 	I915_MMAP_TYPE_WC,
@@ -142,8 +150,6 @@ struct drm_i915_gem_object {
 	 */
 	struct list_head obj_link;
 
-	/** Stolen memory for this object, instead of being backed by shmem. */
-	struct drm_mm_node *stolen;
 	union {
 		struct rcu_head rcu;
 		struct llist_node freed;
@@ -167,6 +173,7 @@ struct drm_i915_gem_object {
 #define I915_BO_ALLOC_VOLATILE   BIT(1)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 #define I915_BO_READONLY         BIT(2)
+#define I915_TILING_QUIRK_BIT    3 /* unknown swizzling; do not release! */
 
 	/*
 	 * Is the object to be mapped as read-only to the GPU
@@ -275,12 +282,6 @@ struct drm_i915_gem_object {
 		 * pages were last acquired.
 		 */
 		bool dirty:1;
-
-		/**
-		 * This is set if the object has been pinned due to unknown
-		 * swizzling.
-		 */
-		bool quirked:1;
 	} mm;
 
 	/** Record of address bit 17 of each page at last unbind. */
@@ -295,6 +296,8 @@ struct drm_i915_gem_object {
 			struct work_struct *work;
 		} userptr;
 
+		struct drm_mm_node *stolen;
+
 		unsigned long scratch;
 		u64 encode;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e2c7b2a7895fff25d7c4d13a24112ce95939c3a3..43028f3539a6dc22e69e30a0709327ba0fcc8020 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -16,6 +16,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
+	bool shrinkable;
 	int i;
 
 	lockdep_assert_held(&obj->mm.lock);
@@ -38,13 +39,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 
 	obj->mm.pages = pages;
 
-	if (i915_gem_object_is_tiled(obj) &&
-	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-		GEM_BUG_ON(obj->mm.quirked);
-		__i915_gem_object_pin_pages(obj);
-		obj->mm.quirked = true;
-	}
-
 	GEM_BUG_ON(!sg_page_sizes);
 	obj->mm.page_sizes.phys = sg_page_sizes;
 
@@ -63,7 +57,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	}
 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
 
-	if (i915_gem_object_is_shrinkable(obj)) {
+	shrinkable = i915_gem_object_is_shrinkable(obj);
+
+	if (i915_gem_object_is_tiled(obj) &&
+	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+		i915_gem_object_set_tiling_quirk(obj);
+		shrinkable = false;
+	}
+
+	if (shrinkable) {
 		struct list_head *list;
 		unsigned long flags;
 
@@ -238,7 +241,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
-		enum i915_map_type type)
+				      enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
 	struct page *stack[32], **pages = stack, *page;
@@ -281,7 +284,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
 		/* Too big for stack -- allocate temporary array instead */
 		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
 		if (!pages)
-			return NULL;
+			return ERR_PTR(-ENOMEM);
 	}
 
 	i = 0;
@@ -290,11 +293,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
 	vaddr = vmap(pages, n_pages, 0, pgprot);
 	if (pages != stack)
 		kvfree(pages);
-	return vaddr;
+
+	return vaddr ?: ERR_PTR(-ENOMEM);
 }
 
 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
-		enum i915_map_type type)
+				     enum i915_map_type type)
 {
 	resource_size_t iomap = obj->mm.region->iomap.base -
 		obj->mm.region->region.start;
@@ -305,13 +309,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
 	void *vaddr;
 
 	if (type != I915_MAP_WC)
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	if (n_pfn > ARRAY_SIZE(stack)) {
 		/* Too big for stack -- allocate temporary array instead */
 		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
 		if (!pfns)
-			return NULL;
+			return ERR_PTR(-ENOMEM);
 	}
 
 	i = 0;
@@ -320,7 +324,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
 	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
 	if (pfns != stack)
 		kvfree(pfns);
-	return vaddr;
+
+	return vaddr ?: ERR_PTR(-ENOMEM);
 }
 
 /* get, pin, and map the pages of the object into kernel space */
@@ -349,8 +354,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
 			err = ____i915_gem_object_get_pages(obj);
-			if (err)
-				goto err_unlock;
+			if (err) {
+				ptr = ERR_PTR(err);
+				goto out_unlock;
+			}
 
 			smp_mb__before_atomic();
 		}
@@ -362,7 +369,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
 	if (ptr && has_type != type) {
 		if (pinned) {
-			err = -EBUSY;
+			ptr = ERR_PTR(-EBUSY);
 			goto err_unpin;
 		}
 
@@ -374,15 +381,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	if (!ptr) {
 		if (GEM_WARN_ON(type == I915_MAP_WC &&
 				!static_cpu_has(X86_FEATURE_PAT)))
-			ptr = NULL;
+			ptr = ERR_PTR(-ENODEV);
 		else if (i915_gem_object_has_struct_page(obj))
 			ptr = i915_gem_object_map_page(obj, type);
 		else
 			ptr = i915_gem_object_map_pfn(obj, type);
-		if (!ptr) {
-			err = -ENOMEM;
+		if (IS_ERR(ptr))
 			goto err_unpin;
-		}
 
 		obj->mm.mapping = page_pack_bits(ptr, type);
 	}
@@ -393,8 +398,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
 err_unpin:
 	atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
-	ptr = ERR_PTR(err);
 	goto out_unlock;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3a4dfe2ef1da5b9d847105f49bfacd50524a3252..3c0b157e2a355dd25945e73b52706cc2fee4c131 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -213,7 +213,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (obj->ops == &i915_gem_phys_ops)
 		return 0;
 
-	if (obj->ops != &i915_gem_shmem_ops)
+	if (!i915_gem_object_is_shmem(obj))
 		return -EINVAL;
 
 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
@@ -227,7 +227,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 		goto err_unlock;
 	}
 
-	if (obj->mm.quirked) {
+	if (i915_gem_object_has_tiling_quirk(obj)) {
 		err = -EFAULT;
 		goto err_unlock;
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 40d3e40500fa002ce0b4dcdbfb9840314f9550fc..000e1cd8e9201d282ca23b78cd77365434ecc635 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,6 +11,13 @@
 
 #include "i915_drv.h"
 
+#if defined(CONFIG_X86)
+#include <asm/smp.h>
+#else
+#define wbinvd_on_all_cpus() \
+	pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
+#endif
+
 void i915_gem_suspend(struct drm_i915_private *i915)
 {
 	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	i915_gem_drain_freed_objects(i915);
 }
 
-static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
-{
-	return list_first_entry_or_null(list,
-					struct drm_i915_gem_object,
-					mm.link);
-}
-
 void i915_gem_suspend_late(struct drm_i915_private *i915)
 {
 	struct drm_i915_gem_object *obj;
@@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 		NULL
 	}, **phase;
 	unsigned long flags;
+	bool flush = false;
 
 	/*
 	 * Neither the BIOS, ourselves or any other kernel
@@ -73,29 +74,56 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 
 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
 	for (phase = phases; *phase; phase++) {
-		LIST_HEAD(keep);
+		list_for_each_entry(obj, *phase, mm.link) {
+			if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+				flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
+			__start_cpu_write(obj); /* presume auto-hibernate */
+		}
+	}
+	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	if (flush)
+		wbinvd_on_all_cpus();
+}
 
-		while ((obj = first_mm_object(*phase))) {
-			list_move_tail(&obj->mm.link, &keep);
+int i915_gem_freeze(struct drm_i915_private *i915)
+{
+	/* Discard all purgeable objects, let userspace recover those as
+	 * required after resuming.
+	 */
+	i915_gem_shrink_all(i915);
 
-			/* Beware the background _i915_gem_free_objects */
-			if (!kref_get_unless_zero(&obj->base.refcount))
-				continue;
+	return 0;
+}
 
-			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+int i915_gem_freeze_late(struct drm_i915_private *i915)
+{
+	struct drm_i915_gem_object *obj;
+	intel_wakeref_t wakeref;
 
-			i915_gem_object_lock(obj, NULL);
-			drm_WARN_ON(&i915->drm,
-			    i915_gem_object_set_to_gtt_domain(obj, false));
-			i915_gem_object_unlock(obj);
-			i915_gem_object_put(obj);
+	/*
+	 * Called just before we write the hibernation image.
+	 *
+	 * We need to update the domain tracking to reflect that the CPU
+	 * will be accessing all the pages to create and restore from the
+	 * hibernation, and so upon restoration those pages will be in the
+	 * CPU domain.
+	 *
+	 * To make sure the hibernation image contains the latest state,
+	 * we update that state just before writing out the image.
+	 *
+	 * To try and reduce the hibernation image, we manually shrink
+	 * the objects as well, see i915_gem_freeze()
+	 */
 
-			spin_lock_irqsave(&i915->mm.obj_lock, flags);
-		}
+	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+		i915_gem_shrink(i915, -1UL, NULL, ~0);
+	i915_gem_drain_freed_objects(i915);
 
-		list_splice_tail(&keep, *phase);
-	}
-	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	wbinvd_on_all_cpus();
+	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
+		__start_cpu_write(obj);
+
+	return 0;
 }
 
 void i915_gem_resume(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 26b78dbdc225665360784ad6c0741b5718bd3b74..c9a66630e92e05572dc3b40e722f9485677bdbab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -19,4 +19,7 @@ void i915_gem_idle_work_handler(struct work_struct *work);
 void i915_gem_suspend(struct drm_i915_private *i915);
 void i915_gem_suspend_late(struct drm_i915_private *i915);
 
+int i915_gem_freeze(struct drm_i915_private *i915);
+int i915_gem_freeze_late(struct drm_i915_private *i915);
+
 #endif /* __I915_GEM_PM_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1515384d7e0efacc2c70456aaa9b5f663b1f1d04..3e3dad22a683146984c2102714c7571d1b34d79c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 {
+	const u64 max_segment = i915_sg_segment_size();
 	struct intel_memory_region *mem = obj->mm.region;
 	struct list_head *blocks = &obj->mm.blocks;
 	resource_size_t size = obj->base.size;
@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	if (!st)
 		return -ENOMEM;
 
-	if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+	if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
 		kfree(st);
 		return -ENOMEM;
 	}
@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 				   i915_buddy_block_size(&mem->mm, block));
 		offset = i915_buddy_block_offset(block);
 
-		GEM_BUG_ON(overflows_type(block_size, sg->length));
+		while (block_size) {
+			u64 len;
 
-		if (offset != prev_end ||
-		    add_overflows_t(typeof(sg->length), sg->length, block_size)) {
-			if (st->nents) {
-				sg_page_sizes |= sg->length;
-				sg = __sg_next(sg);
+			if (offset != prev_end || sg->length >= max_segment) {
+				if (st->nents) {
+					sg_page_sizes |= sg->length;
+					sg = __sg_next(sg);
+				}
+
+				sg_dma_address(sg) = mem->region.start + offset;
+				sg_dma_len(sg) = 0;
+				sg->length = 0;
+				st->nents++;
 			}
 
-			sg_dma_address(sg) = mem->region.start + offset;
-			sg_dma_len(sg) = block_size;
+			len = min(block_size, max_segment - sg->length);
+			sg->length += len;
+			sg_dma_len(sg) += len;
 
-			sg->length = block_size;
+			offset += len;
+			block_size -= len;
 
-			st->nents++;
-		} else {
-			sg->length += block_size;
-			sg_dma_len(sg) += block_size;
+			prev_end = offset;
 		}
-
-		prev_end = offset + block_size;
 	}
 
 	sg_page_sizes |= sg->length;
@@ -139,6 +143,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
 			      unsigned int flags)
 {
 	struct drm_i915_gem_object *obj;
+	int err;
 
 	/*
 	 * NB: Our use of resource_size_t for the size stems from using struct
@@ -169,9 +174,18 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = mem->ops->create_object(mem, size, flags);
-	if (!IS_ERR(obj))
-		trace_i915_gem_object_create(obj);
+	obj = i915_gem_object_alloc();
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
 
+	err = mem->ops->init_object(mem, obj, size, flags);
+	if (err)
+		goto err_object_free;
+
+	trace_i915_gem_object_create(obj);
 	return obj;
+
+err_object_free:
+	i915_gem_object_free(obj);
+	return ERR_PTR(err);
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 75e8b71c18b9254bd8aeb423016409a5a2d61b19..cf83c208688cbfd4bd288b002b04de1c2fc497f9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -464,26 +464,21 @@ static int __create_shmem(struct drm_i915_private *i915,
 	return 0;
 }
 
-static struct drm_i915_gem_object *
-create_shmem(struct intel_memory_region *mem,
-	     resource_size_t size,
-	     unsigned int flags)
+static int shmem_object_init(struct intel_memory_region *mem,
+			     struct drm_i915_gem_object *obj,
+			     resource_size_t size,
+			     unsigned int flags)
 {
 	static struct lock_class_key lock_class;
 	struct drm_i915_private *i915 = mem->i915;
-	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
 	unsigned int cache_level;
 	gfp_t mask;
 	int ret;
 
-	obj = i915_gem_object_alloc();
-	if (!obj)
-		return ERR_PTR(-ENOMEM);
-
 	ret = __create_shmem(i915, &obj->base, size);
 	if (ret)
-		goto fail;
+		return ret;
 
 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
 	if (IS_I965GM(i915) || IS_I965G(i915)) {
@@ -522,11 +517,7 @@ create_shmem(struct intel_memory_region *mem,
 
 	i915_gem_object_init_memory_region(obj, mem, 0);
 
-	return obj;
-
-fail:
-	i915_gem_object_free(obj);
-	return ERR_PTR(ret);
+	return 0;
 }
 
 struct drm_i915_gem_object *
@@ -611,7 +602,7 @@ static void release_shmem(struct intel_memory_region *mem)
 static const struct intel_memory_region_ops shmem_region_ops = {
 	.init = init_shmem,
 	.release = release_shmem,
-	.create_object = create_shmem,
+	.init_object = shmem_object_init,
 };
 
 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
@@ -621,3 +612,8 @@ struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
 					  PAGE_SIZE, 0,
 					  &shmem_region_ops);
 }
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
+{
+	return obj->ops == &i915_gem_shmem_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index dc8f052a0ffeba2192c6e490001ceb9efcd4a575..c2dba1cd9532becb1a4ae4210442a9c250d32bec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,6 +15,7 @@
 
 #include "gt/intel_gt_requests.h"
 
+#include "dma_resv_utils.h"
 #include "i915_trace.h"
 
 static bool swap_available(void)
@@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
 				mutex_unlock(&obj->mm.lock);
 			}
 
+			dma_resv_prune(obj->base.resv);
+
 			scanned += obj->base.size >> PAGE_SHIFT;
 			i915_gem_object_put(obj);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 29bffc6afcc151f4db43896577457f593239e167..a1e197a6e999d9ca030591a496ab24ef475f57b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
 
 	GEM_BUG_ON(!stolen);
-
-	i915_gem_object_release_memory_region(obj);
-
 	i915_gem_stolen_remove_node(i915, stolen);
 	kfree(stolen);
+
+	i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -622,18 +621,13 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
 	.release = i915_gem_object_release_stolen,
 };
 
-static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct intel_memory_region *mem,
-				struct drm_mm_node *stolen)
+static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
+					   struct drm_i915_gem_object *obj,
+					   struct drm_mm_node *stolen)
 {
 	static struct lock_class_key lock_class;
-	struct drm_i915_gem_object *obj;
 	unsigned int cache_level;
-	int err = -ENOMEM;
-
-	obj = i915_gem_object_alloc();
-	if (!obj)
-		goto err;
+	int err;
 
 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
@@ -645,55 +639,47 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
 
 	err = i915_gem_object_pin_pages(obj);
 	if (err)
-		goto cleanup;
+		return err;
 
 	i915_gem_object_init_memory_region(obj, mem, 0);
 
-	return obj;
-
-cleanup:
-	i915_gem_object_free(obj);
-err:
-	return ERR_PTR(err);
+	return 0;
 }
 
-static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct intel_memory_region *mem,
-			       resource_size_t size,
-			       unsigned int flags)
+static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
+					struct drm_i915_gem_object *obj,
+					resource_size_t size,
+					unsigned int flags)
 {
 	struct drm_i915_private *i915 = mem->i915;
-	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
 	int ret;
 
 	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 
 	if (size == 0)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
 	if (!stolen)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
-	if (ret) {
-		obj = ERR_PTR(ret);
+	if (ret)
 		goto err_free;
-	}
 
-	obj = __i915_gem_object_create_stolen(mem, stolen);
-	if (IS_ERR(obj))
+	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+	if (ret)
 		goto err_remove;
 
-	return obj;
+	return 0;
 
 err_remove:
 	i915_gem_stolen_remove_node(i915, stolen);
 err_free:
 	kfree(stolen);
-	return obj;
+	return ret;
 }
 
 struct drm_i915_gem_object *
@@ -723,7 +709,7 @@ static void release_stolen(struct intel_memory_region *mem)
 static const struct intel_memory_region_ops i915_region_stolen_ops = {
 	.init = init_stolen,
 	.release = release_stolen,
-	.create_object = _i915_gem_object_create_stolen,
+	.init_object = _i915_gem_object_stolen_init,
 };
 
 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
@@ -767,21 +753,32 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 	mutex_lock(&i915->mm.stolen_lock);
 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
 	mutex_unlock(&i915->mm.stolen_lock);
-	if (ret) {
-		obj = ERR_PTR(ret);
+	if (ret)
 		goto err_free;
-	}
 
-	obj = __i915_gem_object_create_stolen(mem, stolen);
-	if (IS_ERR(obj))
+	obj = i915_gem_object_alloc();
+	if (!obj) {
+		ret = -ENOMEM;
 		goto err_stolen;
+	}
+
+	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+	if (ret)
+		goto err_object_free;
 
 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
 	return obj;
 
+err_object_free:
+	i915_gem_object_free(obj);
 err_stolen:
 	i915_gem_stolen_remove_node(i915, stolen);
 err_free:
 	kfree(stolen);
-	return obj;
+	return ERR_PTR(ret);
+}
+
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
+{
+	return obj->ops == &i915_gem_object_stolen_ops;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 61e028063f9fb0638fc3d3355177dacc51d8c160..b03489706796934aecbb9492c8f6a5787648cdc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -30,6 +30,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 					       resource_size_t stolen_offset,
 					       resource_size_t size);
 
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
+
 #define I915_GEM_STOLEN_BIAS SZ_128K
 
 #endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ffcaee74a2493beec3d58fb2b224ca75b711118f..d589d3d81085dc4c1e524708d412e533fbbe01dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -270,14 +270,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 	    obj->mm.madv == I915_MADV_WILLNEED &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (tiling == I915_TILING_NONE) {
-			GEM_BUG_ON(!obj->mm.quirked);
-			__i915_gem_object_unpin_pages(obj);
-			obj->mm.quirked = false;
+			GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+			i915_gem_object_clear_tiling_quirk(obj);
+			i915_gem_object_make_shrinkable(obj);
 		}
 		if (!i915_gem_object_is_tiled(obj)) {
-			GEM_BUG_ON(obj->mm.quirked);
-			__i915_gem_object_pin_pages(obj);
-			obj->mm.quirked = true;
+			GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+			i915_gem_object_make_unshrinkable(obj);
+			i915_gem_object_set_tiling_quirk(obj);
 		}
 	}
 	mutex_unlock(&obj->mm.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 8af55cd3e6902edb72ea52ed6a9b879f1260adef..4b9856d5ba14f3f1ef3975af557d925cd14e58cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -5,10 +5,12 @@
  */
 
 #include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
 #include <linux/jiffies.h>
 
 #include "gt/intel_engine.h"
 
+#include "dma_resv_utils.h"
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
 
@@ -43,8 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 		unsigned int count, i;
 		int ret;
 
-		ret = dma_resv_get_fences_rcu(resv,
-							&excl, &count, &shared);
+		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
 		if (ret)
 			return ret;
 
@@ -84,17 +85,14 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 	 * Opportunistically prune the fences iff we know they have *all* been
 	 * signaled.
 	 */
-	if (prune_fences && dma_resv_trylock(resv)) {
-		if (dma_resv_test_signaled_rcu(resv, true))
-			dma_resv_add_excl_fence(resv, NULL);
-		dma_resv_unlock(resv);
-	}
+	if (prune_fences)
+		dma_resv_prune(resv);
 
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence,
-				 const struct i915_sched_attr *attr)
+static void fence_set_priority(struct dma_fence *fence,
+			       const struct i915_sched_attr *attr)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -105,27 +103,47 @@ static void __fence_set_priority(struct dma_fence *fence,
 	rq = to_request(fence);
 	engine = rq->engine;
 
-	local_bh_disable();
 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
 	if (engine->schedule)
 		engine->schedule(rq, attr);
 	rcu_read_unlock();
-	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
 }
 
-static void fence_set_priority(struct dma_fence *fence,
-			       const struct i915_sched_attr *attr)
+static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
 {
+	return fence->ops == &dma_fence_chain_ops;
+}
+
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+				  const struct i915_sched_attr *attr)
+{
+	if (dma_fence_is_signaled(fence))
+		return;
+
+	local_bh_disable();
+
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
 		struct dma_fence_array *array = to_dma_fence_array(fence);
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], attr);
+			fence_set_priority(array->fences[i], attr);
+	} else if (__dma_fence_is_chain(fence)) {
+		struct dma_fence *iter;
+
+		/* The chain is ordered; if we boost the last, we boost all */
+		dma_fence_chain_for_each(iter, fence) {
+			fence_set_priority(to_dma_fence_chain(iter)->fence,
+					   attr);
+			break;
+		}
+		dma_fence_put(iter);
 	} else {
-		__fence_set_priority(fence, attr);
+		fence_set_priority(fence, attr);
 	}
+
+	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
 }
 
 int
@@ -141,12 +159,12 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 		int ret;
 
 		ret = dma_resv_get_fences_rcu(obj->base.resv,
-							&excl, &count, &shared);
+					      &excl, &count, &shared);
 		if (ret)
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], attr);
+			i915_gem_fence_wait_priority(shared[i], attr);
 			dma_fence_put(shared[i]);
 		}
 
@@ -156,7 +174,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, attr);
+		i915_gem_fence_wait_priority(excl, attr);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index a768ec61e966326302eaea9d656d49f7022f06c8..2fb501a78a85f7d46e8cf83edbb7851c9e7a027d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
 
 static int huge_get_pages(struct drm_i915_gem_object *obj)
 {
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
 	const unsigned long nreal = obj->scratch / PAGE_SIZE;
 	const unsigned long npages = obj->base.size / PAGE_SIZE;
 	struct scatterlist *sg, *src, *end;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1f35e71429b47e1507d7da62ddbe63f66e76dc66..aacf4856ccb4c0a482c3133ee55fafcaad0a200e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma)
 		err = -EINVAL;
 	}
 
+	/*
+	 * The dma-api is like a box of chocolates when it comes to the
+	 * alignment of dma addresses, however for LMEM we have total control
+	 * and so can guarantee alignment, likewise when we allocate our blocks
+	 * they should appear in descending order, and if we know that we align
+	 * to the largest page size for the GTT address, we should be able to
+	 * assert that if we see 2M physical pages then we should also get 2M
+	 * GTT pages. If we don't then something might be wrong in our
+	 * construction of the backing pages.
+	 *
+	 * Maintaining alignment is required to utilise huge pages in the ppGGT.
+	 */
+	if (i915_gem_object_is_lmem(obj) &&
+	    IS_ALIGNED(vma->node.start, SZ_2M) &&
+	    vma->page_sizes.sg & SZ_2M &&
+	    vma->page_sizes.gtt < SZ_2M) {
+		pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
+		       vma->page_sizes.sg, vma->page_sizes.gtt);
+		err = -EINVAL;
+	}
+
 	if (obj->mm.page_sizes.gtt) {
 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
 		       obj->mm.page_sizes.gtt);
@@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg)
 		unsigned int flags;
 	} backends[] = {
 		{ igt_create_system, 0,                        },
+		{ igt_create_local,  0,                        },
 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
 	};
 	struct {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 4e36d4897ea6512b7bdda79f81711741a3243cc9..6a674a7994dfc2ac22f2bb5cfa69fcafc056c06c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce = engine->kernel_context;
 	struct drm_i915_gem_object *obj;
-	struct rnd_state prng;
+	I915_RND_STATE(prng);
 	IGT_TIMEOUT(end);
 	u32 *vaddr;
 	int err = 0;
 
-	prandom_seed_state(&prng, i915_selftest.random_seed);
-
 	intel_engine_pm_get(engine);
 	do {
 		const u32 max_block_size = S16_MAX * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 7049a6bbc03dd2430eecd4fdfce55e3477a22b23..1117d2a44518fd9374df27b9cf03f74eb1850f62 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,6 +7,7 @@
 #include <linux/prime_numbers.h>
 
 #include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index d27d87a678c8fd1e7f8714550575c7ee68bd212a..d429c7643ff20b9243fc891e7977217405cb5655 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -7,6 +7,7 @@
 #include <linux/prime_numbers.h>
 
 #include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index e21b5023ca7d18edda6eadf289c26e6618a6da6e..d6783061bc72c64806ccb5d1bb67b7215b5bbe4a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 #include "i915_vma.h"
 #include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 174a245533223d1d065173f0e592b80d629ec774..d4f4452ce5edb43ea03d22fc64103af9215f137a 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -11,6 +11,7 @@
 #include "i915_drv.h"
 #include "intel_gt.h"
 #include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
 #include "intel_llc.h"
 #include "intel_rc6.h"
 #include "intel_rps.h"
@@ -403,34 +404,34 @@ static int frequency_show(struct seq_file *m, void *unused)
 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 		seq_printf(m, "CAGF: %dMHz\n", cagf);
-		seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+		seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
 			   rpcurupei,
 			   intel_gt_pm_interval_to_ns(gt, rpcurupei));
-		seq_printf(m, "RP CUR UP: %d (%dns)\n",
+		seq_printf(m, "RP CUR UP: %d (%lldns)\n",
 			   rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
-		seq_printf(m, "RP PREV UP: %d (%dns)\n",
+		seq_printf(m, "RP PREV UP: %d (%lldns)\n",
 			   rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
 		seq_printf(m, "Up threshold: %d%%\n",
 			   rps->power.up_threshold);
-		seq_printf(m, "RP UP EI: %d (%dns)\n",
+		seq_printf(m, "RP UP EI: %d (%lldns)\n",
 			   rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
-		seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+		seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
 			   rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
 
-		seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+		seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
 			   rpcurdownei,
 			   intel_gt_pm_interval_to_ns(gt, rpcurdownei));
-		seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+		seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
 			   rpcurdown,
 			   intel_gt_pm_interval_to_ns(gt, rpcurdown));
-		seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+		seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
 			   rpprevdown,
 			   intel_gt_pm_interval_to_ns(gt, rpprevdown));
 		seq_printf(m, "Down threshold: %d%%\n",
 			   rps->power.down_threshold);
-		seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+		seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
 			   rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
-		seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+		seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
 			   rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
 
 		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
@@ -558,7 +559,9 @@ static int rps_boost_show(struct seq_file *m, void *data)
 
 	seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
 	seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
-	seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+	seq_printf(m, "GPU busy? %s, %llums\n",
+		   yesno(gt->awake),
+		   ktime_to_ms(intel_gt_get_awake_time(gt)));
 	seq_printf(m, "Boosts outstanding? %d\n",
 		   atomic_read(&rps->num_waiters));
 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -575,7 +578,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
 		   intel_gpu_freq(rps, rps->efficient_freq),
 		   intel_gpu_freq(rps, rps->boost_freq));
 
-	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
 
 	if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
 		struct intel_uncore *uncore = gt->uncore;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 680bd9442eb0cbfe2bcb3817343540b2492268b2..e08dff37633900eddfab4c284469374e699a2e40 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -12,9 +12,9 @@
 #include "intel_gt.h"
 
 /* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
-				  const unsigned int pde,
-				  const struct i915_page_table *pt)
+static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+			   const unsigned int pde,
+			   const struct i915_page_table *pt)
 {
 	dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
 
@@ -27,8 +27,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
 {
 	struct drm_i915_private *i915 = gt->i915;
 	struct intel_uncore *uncore = gt->uncore;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
 	u32 ecochk;
 
 	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
@@ -41,13 +39,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
 		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
 	}
 	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
-	for_each_engine(engine, gt, id) {
-		/* GFX_MODE is per-ring on gen7+ */
-		ENGINE_WRITE(engine,
-			     RING_MODE_GEN7,
-			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-	}
 }
 
 void gen6_ppgtt_enable(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index e961ad6a312944a89c09adcd5c57e29466d7e43d..de575fdb033f5acb9b3c4ccf47e45af3228e586e 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -40,7 +40,7 @@ struct batch_vals {
 	u32 size;
 };
 
-static inline int num_primitives(const struct batch_vals *bv)
+static int num_primitives(const struct batch_vals *bv)
 {
 	/*
 	 * We need to saturate the GPU with work in order to dispatch
@@ -240,7 +240,7 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
 	/* general */
 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
 	/* surface */
-	*cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+	*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
 	/* dynamic */
 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
 	/* indirect */
@@ -353,19 +353,21 @@ static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
 
 static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
 {
-	u32 *cs = batch_alloc_items(batch, 0, 8);
+	u32 *cs = batch_alloc_items(batch, 0, 10);
 
 	/* ivb: Stall before STATE_CACHE_INVALIDATE */
-	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
 	*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
 		PIPE_CONTROL_CS_STALL;
 	*cs++ = 0;
 	*cs++ = 0;
+	*cs++ = 0;
 
-	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
 	*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 	*cs++ = 0;
 	*cs++ = 0;
+	*cs++ = 0;
 
 	batch_advance(batch, cs);
 }
@@ -391,12 +393,14 @@ static void emit_batch(struct i915_vma * const vma,
 						     desc_count);
 
 	/* Reset inherited context registers */
+	gen7_emit_pipeline_flush(&cmds);
 	gen7_emit_pipeline_invalidate(&cmds);
 	batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
 	batch_add(&cmds, 0xffff0000);
 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
 	batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+	gen7_emit_pipeline_invalidate(&cmds);
 	gen7_emit_pipeline_flush(&cmds);
 
 	/* Switch to the media pipeline and our base address */
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
new file mode 100644
index 0000000000000000000000000000000000000000..07ba524da90b303ac4b523e5ec078d8deea760de
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_lrc.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+	bool vf_flush_wa = false, dc_flush_wa = false;
+	u32 *cs, flags = 0;
+	int len;
+
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	if (mode & EMIT_FLUSH) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+	}
+
+	if (mode & EMIT_INVALIDATE) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+		/*
+		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+		 * pipe control.
+		 */
+		if (IS_GEN(rq->engine->i915, 9))
+			vf_flush_wa = true;
+
+		/* WaForGAMHang:kbl */
+		if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+			dc_flush_wa = true;
+	}
+
+	len = 6;
+
+	if (vf_flush_wa)
+		len += 6;
+
+	if (dc_flush_wa)
+		len += 12;
+
+	cs = intel_ring_begin(rq, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	if (vf_flush_wa)
+		cs = gen8_emit_pipe_control(cs, 0, 0);
+
+	if (dc_flush_wa)
+		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+					    0);
+
+	cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+	if (dc_flush_wa)
+		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+	u32 cmd, *cs;
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	cmd = MI_FLUSH_DW + 1;
+
+	/*
+	 * We always require a command barrier so that subsequent
+	 * commands, such as breadcrumb interrupts, are strictly ordered
+	 * wrt the contents of the write cache being flushed to memory
+	 * (and thus being coherent from the CPU).
+	 */
+	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+	if (mode & EMIT_INVALIDATE) {
+		cmd |= MI_INVALIDATE_TLB;
+		if (rq->engine->class == VIDEO_DECODE_CLASS)
+			cmd |= MI_INVALIDATE_BSD;
+	}
+
+	*cs++ = cmd;
+	*cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+	*cs++ = 0; /* upper addr */
+	*cs++ = 0; /* value */
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+	if (mode & EMIT_FLUSH) {
+		u32 *cs;
+		u32 flags = 0;
+
+		flags |= PIPE_CONTROL_CS_STALL;
+
+		flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+		cs = intel_ring_begin(rq, 6);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+		intel_ring_advance(rq, cs);
+	}
+
+	if (mode & EMIT_INVALIDATE) {
+		u32 *cs;
+		u32 flags = 0;
+
+		flags |= PIPE_CONTROL_CS_STALL;
+
+		flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+		cs = intel_ring_begin(rq, 6);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+		intel_ring_advance(rq, cs);
+	}
+
+	return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+	return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+	static const i915_reg_t vd[] = {
+		GEN12_VD0_AUX_NV,
+		GEN12_VD1_AUX_NV,
+		GEN12_VD2_AUX_NV,
+		GEN12_VD3_AUX_NV,
+	};
+
+	static const i915_reg_t ve[] = {
+		GEN12_VE0_AUX_NV,
+		GEN12_VE1_AUX_NV,
+	};
+
+	if (engine->class == VIDEO_DECODE_CLASS)
+		return vd[engine->instance];
+
+	if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+		return ve[engine->instance];
+
+	GEM_BUG_ON("unknown aux_inv reg\n");
+	return INVALID_MMIO_REG;
+}
+
+static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(inv_reg);
+	*cs++ = AUX_INV;
+	*cs++ = MI_NOOP;
+
+	return cs;
+}
+
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+	if (mode & EMIT_FLUSH) {
+		u32 flags = 0;
+		u32 *cs;
+
+		flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_FLUSH_L3;
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		/* Wa_1409600907:tgl */
+		flags |= PIPE_CONTROL_DEPTH_STALL;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+		flags |= PIPE_CONTROL_QW_WRITE;
+
+		flags |= PIPE_CONTROL_CS_STALL;
+
+		cs = intel_ring_begin(rq, 6);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		cs = gen12_emit_pipe_control(cs,
+					     PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+					     flags, LRC_PPHWSP_SCRATCH_ADDR);
+		intel_ring_advance(rq, cs);
+	}
+
+	if (mode & EMIT_INVALIDATE) {
+		u32 flags = 0;
+		u32 *cs;
+
+		flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+		flags |= PIPE_CONTROL_QW_WRITE;
+
+		flags |= PIPE_CONTROL_CS_STALL;
+
+		cs = intel_ring_begin(rq, 8 + 4);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		/*
+		 * Prevent the pre-parser from skipping past the TLB
+		 * invalidate and loading a stale page for the batch
+		 * buffer / request payload.
+		 */
+		*cs++ = preparser_disable(true);
+
+		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+		/* hsdes: 1809175790 */
+		cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
+		*cs++ = preparser_disable(false);
+		intel_ring_advance(rq, cs);
+	}
+
+	return 0;
+}
+
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+	intel_engine_mask_t aux_inv = 0;
+	u32 cmd, *cs;
+
+	cmd = 4;
+	if (mode & EMIT_INVALIDATE)
+		cmd += 2;
+	if (mode & EMIT_INVALIDATE)
+		aux_inv = rq->engine->mask & ~BIT(BCS0);
+	if (aux_inv)
+		cmd += 2 * hweight8(aux_inv) + 2;
+
+	cs = intel_ring_begin(rq, cmd);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	if (mode & EMIT_INVALIDATE)
+		*cs++ = preparser_disable(true);
+
+	cmd = MI_FLUSH_DW + 1;
+
+	/*
+	 * We always require a command barrier so that subsequent
+	 * commands, such as breadcrumb interrupts, are strictly ordered
+	 * wrt the contents of the write cache being flushed to memory
+	 * (and thus being coherent from the CPU).
+	 */
+	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+	if (mode & EMIT_INVALIDATE) {
+		cmd |= MI_INVALIDATE_TLB;
+		if (rq->engine->class == VIDEO_DECODE_CLASS)
+			cmd |= MI_INVALIDATE_BSD;
+	}
+
+	*cs++ = cmd;
+	*cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+	*cs++ = 0; /* upper addr */
+	*cs++ = 0; /* value */
+
+	if (aux_inv) { /* hsdes: 1809175790 */
+		struct intel_engine_cs *engine;
+		unsigned int tmp;
+
+		*cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+		for_each_engine_masked(engine, rq->engine->gt,
+				       aux_inv, tmp) {
+			*cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+			*cs++ = AUX_INV;
+		}
+		*cs++ = MI_NOOP;
+	}
+
+	if (mode & EMIT_INVALIDATE)
+		*cs++ = preparser_disable(false);
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static u32 preempt_address(struct intel_engine_cs *engine)
+{
+	return (i915_ggtt_offset(engine->status_page.vma) +
+		I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+	const struct intel_timeline_cacheline *cl;
+
+	/* Before the request is executed, the timeline/cachline is fixed */
+
+	cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+	if (cl)
+		return cl->ggtt_offset;
+
+	return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+	u32 *cs;
+
+	GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+	if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+		return 0;
+
+	cs = intel_ring_begin(rq, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = hwsp_offset(rq);
+	*cs++ = 0;
+	*cs++ = rq->fence.seqno - 1;
+
+	/*
+	 * Check if we have been preempted before we even get started.
+	 *
+	 * After this point i915_request_started() reports true, even if
+	 * we get preempted and so are no longer running.
+	 *
+	 * i915_request_started() is used during preemption processing
+	 * to decide if the request is currently inside the user payload
+	 * or spinning on a kernel semaphore (or earlier). For no-preemption
+	 * requests, we do allow preemption on the semaphore before the user
+	 * payload, but do not allow preemption once the request is started.
+	 *
+	 * i915_request_started() is similarly used during GPU hangs to
+	 * determine if the user's payload was guilty, and if so, the
+	 * request is banned. Before the request is started, it is assumed
+	 * to be unharmed and an innocent victim of another's hang.
+	 */
+	*cs++ = MI_NOOP;
+	*cs++ = MI_ARB_CHECK;
+
+	intel_ring_advance(rq, cs);
+
+	/* Record the updated position of the request's payload */
+	rq->infix = intel_ring_offset(rq, cs);
+
+	__set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+	return 0;
+}
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+			     u64 offset, u32 len,
+			     const unsigned int flags)
+{
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	/*
+	 * WaDisableCtxRestoreArbitration:bdw,chv
+	 *
+	 * We don't need to perform MI_ARB_ENABLE as often as we do (in
+	 * particular all the gen that do not need the w/a at all!), if we
+	 * took care to make sure that on every switch into this context
+	 * (both ordinary and for preemption) that arbitrartion was enabled
+	 * we would be fine.  However, for gen8 there is another w/a that
+	 * requires us to not preempt inside GPGPU execution, so we keep
+	 * arbitration disabled for gen8 batches. Arbitration will be
+	 * re-enabled before we close the request
+	 * (engine->emit_fini_breadcrumb).
+	 */
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+	/* FIXME(BDW+): Address space and security selectors. */
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
+		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+int gen8_emit_bb_start(struct i915_request *rq,
+		       u64 offset, u32 len,
+		       const unsigned int flags)
+{
+	u32 *cs;
+
+	if (unlikely(i915_request_has_nopreempt(rq)))
+		return gen8_emit_bb_start_noarb(rq, offset, len, flags);
+
+	cs = intel_ring_begin(rq, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
+		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_NOOP;
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static void assert_request_valid(struct i915_request *rq)
+{
+	struct intel_ring *ring __maybe_unused = rq->ring;
+
+	/* Can we unwind this request without appearing to go forwards? */
+	GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
+{
+	/* Ensure there's always at least one preemption point per-request. */
+	*cs++ = MI_ARB_CHECK;
+	*cs++ = MI_NOOP;
+	rq->wa_tail = intel_ring_offset(rq, cs);
+
+	/* Check that entire request is less than half the ring */
+	assert_request_valid(rq);
+
+	return cs;
+}
+
+static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+	*cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_EQ_SDD;
+	*cs++ = 0;
+	*cs++ = preempt_address(rq->engine);
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+
+	return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+	*cs++ = MI_USER_INTERRUPT;
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	if (intel_engine_has_semaphores(rq->engine))
+		cs = emit_preempt_busywait(rq, cs);
+
+	rq->tail = intel_ring_offset(rq, cs);
+	assert_ring_tail_valid(rq->ring, rq->tail);
+
+	return gen8_emit_wa_tail(rq, cs);
+}
+
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+	return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+}
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+	return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+}
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+	cs = gen8_emit_pipe_control(cs,
+				    PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+				    PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+				    PIPE_CONTROL_DC_FLUSH_ENABLE,
+				    0);
+
+	/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+	cs = gen8_emit_ggtt_write_rcs(cs,
+				      rq->fence.seqno,
+				      hwsp_offset(rq),
+				      PIPE_CONTROL_FLUSH_ENABLE |
+				      PIPE_CONTROL_CS_STALL);
+
+	return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+	cs = gen8_emit_ggtt_write_rcs(cs,
+				      rq->fence.seqno,
+				      hwsp_offset(rq),
+				      PIPE_CONTROL_CS_STALL |
+				      PIPE_CONTROL_TILE_CACHE_FLUSH |
+				      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+				      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+				      PIPE_CONTROL_DC_FLUSH_ENABLE |
+				      PIPE_CONTROL_FLUSH_ENABLE);
+
+	return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+	*cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+	*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_EQ_SDD;
+	*cs++ = 0;
+	*cs++ = preempt_address(rq->engine);
+	*cs++ = 0;
+	*cs++ = 0;
+
+	return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+	*cs++ = MI_USER_INTERRUPT;
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	if (intel_engine_has_semaphores(rq->engine))
+		cs = gen12_emit_preempt_busywait(rq, cs);
+
+	rq->tail = intel_ring_offset(rq, cs);
+	assert_ring_tail_valid(rq->ring, rq->tail);
+
+	return gen8_emit_wa_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+	/* XXX Stalling flush before seqno write; post-sync not */
+	cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+	return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+	cs = gen12_emit_ggtt_write_rcs(cs,
+				       rq->fence.seqno,
+				       hwsp_offset(rq),
+				       PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+				       PIPE_CONTROL_CS_STALL |
+				       PIPE_CONTROL_TILE_CACHE_FLUSH |
+				       PIPE_CONTROL_FLUSH_L3 |
+				       PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+				       PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+				       /* Wa_1409600907:tgl */
+				       PIPE_CONTROL_DEPTH_STALL |
+				       PIPE_CONTROL_DC_FLUSH_ENABLE |
+				       PIPE_CONTROL_FLUSH_ENABLE);
+
+	return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
new file mode 100644
index 0000000000000000000000000000000000000000..cc6e21d3662a677ac08bc41b1b72342d50b2d0d8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __GEN8_ENGINE_CS_H__
+#define __GEN8_ENGINE_CS_H__
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq);
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+			     u64 offset, u32 len,
+			     const unsigned int flags);
+int gen8_emit_bb_start(struct i915_request *rq,
+		       u64 offset, u32 len,
+		       const unsigned int flags);
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+
+static inline u32 *
+__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+	memset(batch, 0, 6 * sizeof(u32));
+
+	batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+	batch[1] = flags1;
+	batch[2] = offset;
+
+	return batch + 6;
+}
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+	return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+	return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
+static inline u32 *
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
+{
+	*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+	*cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+	*cs++ = offset;
+	*cs++ = 0;
+	*cs++ = value;
+	*cs++ = 0; /* We're thrashing one extra dword. */
+
+	return cs;
+}
+
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+	/* We're using qword write, offset should be aligned to 8 bytes. */
+	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+	return __gen8_emit_write_rcs(cs,
+				     value,
+				     gtt_offset,
+				     0,
+				     flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+	/* We're using qword write, offset should be aligned to 8 bytes. */
+	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+	return __gen8_emit_write_rcs(cs,
+				     value,
+				     gtt_offset,
+				     flags0,
+				     flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+	*cs++ = (MI_FLUSH_DW + 1) | flags;
+	*cs++ = gtt_offset;
+	*cs++ = 0;
+	*cs++ = value;
+
+	return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+	GEM_BUG_ON(gtt_offset & (1 << 5));
+	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+	return __gen8_emit_flush_dw(cs,
+				    value,
+				    gtt_offset | MI_FLUSH_DW_USE_GTT,
+				    flags | MI_FLUSH_DW_OP_STOREDW);
+}
+
+#endif /* __GEN8_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index a37c968ef8f7cc30127a214041bc50666dda52f0..755522ced60de733fde89fde6c44b9d6da406edc 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -109,7 +109,7 @@ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 
 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
 
-static inline unsigned int
+static unsigned int
 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
 {
 	const int shift = gen8_pd_shift(lvl);
@@ -125,7 +125,7 @@ gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
 		return i915_pde_index(end, shift) - *idx;
 }
 
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+static bool gen8_pd_contains(u64 start, u64 end, int lvl)
 {
 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 
@@ -133,7 +133,7 @@ static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
 	return (start ^ end) & mask && (start & ~mask) == 0;
 }
 
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
+static unsigned int gen8_pt_count(u64 start, u64 end)
 {
 	GEM_BUG_ON(start >= end);
 	if ((start ^ end) >> gen8_pd_shift(1))
@@ -142,14 +142,13 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end)
 		return end - start;
 }
 
-static inline unsigned int
-gen8_pd_top_count(const struct i915_address_space *vm)
+static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
 {
 	unsigned int shift = __gen8_pte_shift(vm->top);
 	return (vm->total + (1ull << shift) - 1) >> shift;
 }
 
-static inline struct i915_page_directory *
+static struct i915_page_directory *
 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
 {
 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
@@ -160,7 +159,7 @@ gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
 }
 
-static inline struct i915_page_directory *
+static struct i915_page_directory *
 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
 {
 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 1d1757584f49026d7f3b6fb7cedfb875fce68a0f..34a645d6babdf285d74c9d215df07a9c27bd635b 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -234,6 +234,7 @@ static void signal_irq_work(struct irq_work *work)
 		intel_breadcrumbs_disarm_irq(b);
 
 	rcu_read_lock();
+	atomic_inc(&b->signaler_active);
 	list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
 		struct i915_request *rq;
 
@@ -256,19 +257,20 @@ static void signal_irq_work(struct irq_work *work)
 			list_del_rcu(&rq->signal_link);
 			release = remove_signaling_context(b, ce);
 			spin_unlock(&ce->signal_lock);
+			if (release) {
+				if (intel_timeline_is_last(ce->timeline, rq))
+					add_retire(b, ce->timeline);
+				intel_context_put(ce);
+			}
 
 			if (__dma_fence_signal(&rq->fence))
 				/* We own signal_node now, xfer to local list */
 				signal = slist_add(&rq->signal_node, signal);
 			else
 				i915_request_put(rq);
-
-			if (release) {
-				add_retire(b, ce->timeline);
-				intel_context_put(ce);
-			}
 		}
 	}
+	atomic_dec(&b->signaler_active);
 	rcu_read_unlock();
 
 	llist_for_each_safe(signal, sn, signal) {
@@ -327,17 +329,19 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
 	spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
 {
-	/* Kick the work once more to drain the signalers */
+	if (!READ_ONCE(b->irq_armed))
+		return;
+
+	/* Kick the work once more to drain the signalers, and disarm the irq */
 	irq_work_sync(&b->irq_work);
-	while (unlikely(READ_ONCE(b->irq_armed))) {
+	while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
 		local_irq_disable();
 		signal_irq_work(&b->irq_work);
 		local_irq_enable();
 		cond_resched();
 	}
-	GEM_BUG_ON(!list_empty(&b->signalers));
 }
 
 void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
@@ -469,6 +473,39 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
 	i915_request_put(rq);
 }
 
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+				      struct intel_breadcrumbs *b)
+{
+	struct i915_request *rq, *rn;
+	bool release = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ce->signal_lock, flags);
+
+	if (list_empty(&ce->signals))
+		goto unlock;
+
+	list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
+		GEM_BUG_ON(!__i915_request_is_complete(rq));
+		if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+					&rq->fence.flags))
+			continue;
+
+		list_del_rcu(&rq->signal_link);
+		irq_signal_request(rq, b);
+		i915_request_put(rq);
+	}
+	release = remove_signaling_context(b, ce);
+
+unlock:
+	spin_unlock_irqrestore(&ce->signal_lock, flags);
+	if (release)
+		intel_context_put(ce);
+
+	while (atomic_read(&b->signaler_active))
+		cpu_relax();
+}
+
 static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
 {
 	struct intel_context *ce;
@@ -481,8 +518,8 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
 		list_for_each_entry_rcu(rq, &ce->signals, signal_link)
 			drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
 				   rq->fence.context, rq->fence.seqno,
-				   i915_request_completed(rq) ? "!" :
-				   i915_request_started(rq) ? "*" :
+				   __i915_request_is_complete(rq) ? "!" :
+				   __i915_request_has_started(rq) ? "*" :
 				   "",
 				   jiffies_to_msecs(jiffies - rq->emitted_jiffies));
 	}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
index ed3d1deabfbd8773445e3e13b1afd81c2f830a5a..3ce5ce270b0446797e7f865f78af5a66bf847928 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -6,6 +6,7 @@
 #ifndef __INTEL_BREADCRUMBS__
 #define __INTEL_BREADCRUMBS__
 
+#include <linux/atomic.h>
 #include <linux/irq_work.h>
 
 #include "intel_engine_types.h"
@@ -19,7 +20,18 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
 void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
 
 void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b)
+{
+	atomic_inc(&b->active);
+}
+
+static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+	if (atomic_dec_and_test(&b->active))
+		__intel_breadcrumbs_park(b);
+}
 
 static inline void
 intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
@@ -33,4 +45,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
 bool i915_request_enable_breadcrumb(struct i915_request *request);
 void i915_request_cancel_breadcrumb(struct i915_request *request);
 
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+				      struct intel_breadcrumbs *b);
+
 #endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index a74bb3062bd8a9ecc6c17f8709f30ff73fc3f4d0..3a084ce8ff5e16a9e818c5f4b2346812b81a5784 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,17 +29,20 @@
  * the overhead of waking that client is much preferred.
  */
 struct intel_breadcrumbs {
-	/* Not all breadcrumbs are attached to physical HW */
-	struct intel_engine_cs *irq_engine;
+	atomic_t active;
 
 	spinlock_t signalers_lock; /* protects the list of signalers */
 	struct list_head signalers;
 	struct llist_head signaled_requests;
+	atomic_t signaler_active;
 
 	spinlock_t irq_lock; /* protects the interrupt from hardirq context */
 	struct irq_work irq_work; /* for use from inside irq_lock */
 	unsigned int irq_enabled;
 	bool irq_armed;
+
+	/* Not all breadcrumbs are attached to physical HW */
+	struct intel_engine_cs *irq_engine;
 };
 
 #endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fda2eba81e222a65384677d0234a77846ecaed9d..d24ab6fa0ee5d17432dd7c6b86933b55a4b6c5b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -191,6 +191,11 @@ static inline bool intel_context_is_closed(const struct intel_context *ce)
 	return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
 }
 
+static inline bool intel_context_has_inflight(const struct intel_context *ce)
+{
+	return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
+}
+
 static inline bool intel_context_use_semaphores(const struct intel_context *ce)
 {
 	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -248,16 +253,14 @@ intel_context_clear_nopreempt(struct intel_context *ce)
 
 static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
 {
-	const u32 period =
-		RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+	const u32 period = ce->engine->gt->clock_period_ns;
 
 	return READ_ONCE(ce->runtime.total) * period;
 }
 
 static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
 {
-	const u32 period =
-		RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+	const u32 period = ce->engine->gt->clock_period_ns;
 
 	return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index b9c8163978a38094e2c113777940e31340d7c35e..8dfd8f656aaa679fbb0a14d3f5f512059c30e14c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,7 +9,6 @@
 #include "intel_engine_pm.h"
 #include "intel_gpu_commands.h"
 #include "intel_lrc.h"
-#include "intel_lrc_reg.h"
 #include "intel_ring.h"
 #include "intel_sseu.h"
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 52fa9c1327464f0cfa6ec85845b3e4776a186066..e10d78601bbd830f84a740dd4507bd3be093db89 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -30,6 +30,10 @@ struct intel_context;
 struct intel_ring;
 
 struct intel_context_ops {
+	unsigned long flags;
+#define COPS_HAS_INFLIGHT_BIT 0
+#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+
 	int (*alloc)(struct intel_context *ce);
 
 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
@@ -58,8 +62,12 @@ struct intel_context {
 
 	struct intel_engine_cs *engine;
 	struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+	__intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
 
 	struct i915_address_space *vm;
 	struct i915_gem_context __rcu *gem_context;
@@ -81,12 +89,13 @@ struct intel_context {
 	unsigned long flags;
 #define CONTEXT_BARRIER_BIT		0
 #define CONTEXT_ALLOC_BIT		1
-#define CONTEXT_VALID_BIT		2
-#define CONTEXT_CLOSED_BIT		3
-#define CONTEXT_USE_SEMAPHORES		4
-#define CONTEXT_BANNED			5
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	6
-#define CONTEXT_NOPREEMPT		7
+#define CONTEXT_INIT_BIT		2
+#define CONTEXT_VALID_BIT		3
+#define CONTEXT_CLOSED_BIT		4
+#define CONTEXT_USE_SEMAPHORES		5
+#define CONTEXT_BANNED			6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	7
+#define CONTEXT_NOPREEMPT		8
 
 	u32 *lrc_reg_state;
 	union {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 760fefdfe39216361b74dd1a8faf330b0e045641..47ee8578e51112a058673dfe04cf51b55337d341 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -15,7 +15,6 @@
 #include "i915_selftest.h"
 #include "gt/intel_timeline.h"
 #include "intel_engine_types.h"
-#include "intel_gpu_commands.h"
 #include "intel_workarounds.h"
 
 struct drm_printer;
@@ -223,91 +222,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
 
 void intel_engine_init_execlists(struct intel_engine_cs *engine);
 
-static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
-	memset(batch, 0, 6 * sizeof(u32));
-
-	batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
-	batch[1] = flags1;
-	batch[2] = offset;
-
-	return batch + 6;
-}
-
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
-{
-	return __gen8_emit_pipe_control(batch, 0, flags, offset);
-}
-
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
-	return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
-}
-
-static inline u32 *
-__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
-{
-	*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
-	*cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
-	*cs++ = offset;
-	*cs++ = 0;
-	*cs++ = value;
-	*cs++ = 0; /* We're thrashing one extra dword. */
-
-	return cs;
-}
-
-static inline u32*
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
-	/* We're using qword write, offset should be aligned to 8 bytes. */
-	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
-	return __gen8_emit_write_rcs(cs,
-				     value,
-				     gtt_offset,
-				     0,
-				     flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32*
-gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
-{
-	/* We're using qword write, offset should be aligned to 8 bytes. */
-	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
-	return __gen8_emit_write_rcs(cs,
-				     value,
-				     gtt_offset,
-				     flags0,
-				     flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32 *
-__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
-	*cs++ = (MI_FLUSH_DW + 1) | flags;
-	*cs++ = gtt_offset;
-	*cs++ = 0;
-	*cs++ = value;
-
-	return cs;
-}
-
-static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
-	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
-	GEM_BUG_ON(gtt_offset & (1 << 5));
-	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
-	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
-	return __gen8_emit_flush_dw(cs,
-				    value,
-				    gtt_offset | MI_FLUSH_DW_USE_GTT,
-				    flags | MI_FLUSH_DW_OP_STOREDW);
-}
-
 static inline void __intel_engine_reset(struct intel_engine_cs *engine,
 					bool stalled)
 {
@@ -318,7 +232,12 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
 
 bool intel_engines_are_idle(struct intel_gt *gt);
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
+static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+	__intel_engine_flush_submission(engine, true);
+}
 
 void intel_engines_reset_default_submission(struct intel_gt *gt);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0b31670343f5a2a4b04e10a4ed81c0e37be257a9..fb1b1d09697511ab88338ef8fe460a36e9d80265 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -33,12 +33,14 @@
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
 #include "intel_engine_user.h"
+#include "intel_execlists_submission.h"
 #include "intel_gt.h"
 #include "intel_gt_requests.h"
 #include "intel_gt_pm.h"
-#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
 #include "intel_reset.h"
 #include "intel_ring.h"
+#include "uc/intel_guc_submission.h"
 
 /* Haswell does have the CXT_SIZE register however it does not appear to be
  * valid. Now, docs explain in dwords what is in the context object. The full
@@ -340,7 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	engine->schedule = NULL;
 
 	ewma__engine_latency_init(&engine->latency);
-	seqlock_init(&engine->stats.lock);
+	seqcount_init(&engine->stats.lock);
 
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
@@ -647,6 +649,8 @@ static int init_status_page(struct intel_engine_cs *engine)
 	void *vaddr;
 	int ret;
 
+	INIT_LIST_HEAD(&engine->status_page.timelines);
+
 	/*
 	 * Though the HWS register does support 36bit addresses, historically
 	 * we have had hangs and corruption reported due to wild writes if
@@ -723,6 +727,9 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 	intel_engine_init_whitelist(engine);
 	intel_engine_init_ctx_wa(engine);
 
+	if (INTEL_GEN(engine->i915) >= 12)
+		engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
 	return 0;
 
 err_status:
@@ -829,6 +836,21 @@ create_pinned_context(struct intel_engine_cs *engine,
 	return ce;
 }
 
+static void destroy_pinned_context(struct intel_context *ce)
+{
+	struct intel_engine_cs *engine = ce->engine;
+	struct i915_vma *hwsp = engine->status_page.vma;
+
+	GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+	mutex_lock(&hwsp->vm->mutex);
+	list_del(&ce->timeline->engine_link);
+	mutex_unlock(&hwsp->vm->mutex);
+
+	intel_context_unpin(ce);
+	intel_context_put(ce);
+}
+
 static struct intel_context *
 create_kernel_context(struct intel_engine_cs *engine)
 {
@@ -889,7 +911,9 @@ int intel_engines_init(struct intel_gt *gt)
 	enum intel_engine_id id;
 	int err;
 
-	if (HAS_EXECLISTS(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		setup = intel_guc_submission_setup;
+	else if (HAS_EXECLISTS(gt->i915))
 		setup = intel_execlists_submission_setup;
 	else
 		setup = intel_ring_submission_setup;
@@ -925,7 +949,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	GEM_BUG_ON(!list_empty(&engine->active.requests));
 	tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
 
-	cleanup_status_page(engine);
 	intel_breadcrumbs_free(engine->breadcrumbs);
 
 	intel_engine_fini_retire(engine);
@@ -934,11 +957,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->default_state)
 		fput(engine->default_state);
 
-	if (engine->kernel_context) {
-		intel_context_unpin(engine->kernel_context);
-		intel_context_put(engine->kernel_context);
-	}
+	if (engine->kernel_context)
+		destroy_pinned_context(engine->kernel_context);
+
 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+	cleanup_status_page(engine);
 
 	intel_wa_list_free(&engine->ctx_wa_list);
 	intel_wa_list_free(&engine->wa_list);
@@ -1002,32 +1025,50 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine)
 	return READ_ONCE(engine->props.stop_timeout_ms);
 }
 
-int intel_engine_stop_cs(struct intel_engine_cs *engine)
+static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+				  int fast_timeout_us,
+				  int slow_timeout_ms)
 {
 	struct intel_uncore *uncore = engine->uncore;
-	const u32 base = engine->mmio_base;
-	const i915_reg_t mode = RING_MI_MODE(base);
+	const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
 	int err;
 
+	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+	err = __intel_wait_for_register_fw(engine->uncore, mode,
+					   MODE_IDLE, MODE_IDLE,
+					   fast_timeout_us,
+					   slow_timeout_ms,
+					   NULL);
+
+	/* A final mmio read to let GPU writes be hopefully flushed to memory */
+	intel_uncore_posting_read_fw(uncore, mode);
+	return err;
+}
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+	int err = 0;
+
 	if (INTEL_GEN(engine->i915) < 3)
 		return -ENODEV;
 
 	ENGINE_TRACE(engine, "\n");
+	if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
+		ENGINE_TRACE(engine,
+			     "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
+			     ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
+			     ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
 
-	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
-
-	err = 0;
-	if (__intel_wait_for_register_fw(uncore,
-					 mode, MODE_IDLE, MODE_IDLE,
-					 1000, stop_timeout(engine),
-					 NULL)) {
-		ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
-		err = -ETIMEDOUT;
+		/*
+		 * Sometimes we observe that the idle flag is not
+		 * set even though the ring is empty. So double
+		 * check before giving up.
+		 */
+		if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
+		    (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
+			err = -ETIMEDOUT;
 	}
 
-	/* A final mmio read to let GPU writes be hopefully flushed to memory */
-	intel_uncore_posting_read_fw(uncore, mode);
-
 	return err;
 }
 
@@ -1189,17 +1230,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
 	return idle;
 }
 
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
 {
 	struct tasklet_struct *t = &engine->execlists.tasklet;
 
 	if (!t->func)
 		return;
 
-	/* Synchronise and wait for the tasklet on another CPU */
-	tasklet_kill(t);
-
-	/* Having cancelled the tasklet, ensure that is run */
 	local_bh_disable();
 	if (tasklet_trylock(t)) {
 		/* Must wait for any GPU reset in progress. */
@@ -1208,6 +1245,10 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
 		tasklet_unlock(t);
 	}
 	local_bh_enable();
+
+	/* Synchronise and wait for the tasklet on another CPU */
+	if (sync)
+		tasklet_unlock_wait(t);
 }
 
 /**
@@ -1273,8 +1314,12 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
-	for_each_engine(engine, gt, id)
+	for_each_engine(engine, gt, id) {
+		if (engine->sanitize)
+			engine->sanitize(engine);
+
 		engine->set_default_submission(engine);
+	}
 }
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
@@ -1294,44 +1339,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 	}
 }
 
-static int print_sched_attr(const struct i915_sched_attr *attr,
-			    char *buf, int x, int len)
-{
-	if (attr->priority == I915_PRIORITY_INVALID)
-		return x;
-
-	x += snprintf(buf + x, len - x,
-		      " prio=%d", attr->priority);
-
-	return x;
-}
-
-static void print_request(struct drm_printer *m,
-			  struct i915_request *rq,
-			  const char *prefix)
-{
-	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
-	char buf[80] = "";
-	int x = 0;
-
-	x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
-
-	drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
-		   prefix,
-		   rq->fence.context, rq->fence.seqno,
-		   i915_request_completed(rq) ? "!" :
-		   i915_request_started(rq) ? "*" :
-		   "",
-		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-			    &rq->fence.flags) ? "+" :
-		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
-			    &rq->fence.flags) ? "-" :
-		   "",
-		   buf,
-		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
-		   name);
-}
-
 static struct intel_timeline *get_timeline(struct i915_request *rq)
 {
 	struct intel_timeline *tl;
@@ -1480,7 +1487,9 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 		drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
 	}
 
-	if (HAS_EXECLISTS(dev_priv)) {
+	if (intel_engine_in_guc_submission_mode(engine)) {
+		/* nothing to print yet */
+	} else if (HAS_EXECLISTS(dev_priv)) {
 		struct i915_request * const *port, *rq;
 		const u32 *hws =
 			&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1529,7 +1538,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 					intel_context_is_banned(rq->context) ? "*" : "");
 			len += print_ring(hdr + len, sizeof(hdr) - len, rq);
 			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
-			print_request(m, rq, hdr);
+			i915_request_show(m, rq, hdr, 0);
 		}
 		for (port = execlists->pending; (rq = *port); port++) {
 			char hdr[160];
@@ -1543,7 +1552,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 					intel_context_is_banned(rq->context) ? "*" : "");
 			len += print_ring(hdr + len, sizeof(hdr) - len, rq);
 			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
-			print_request(m, rq, hdr);
+			i915_request_show(m, rq, hdr, 0);
 		}
 		rcu_read_unlock();
 		execlists_active_unlock_bh(execlists);
@@ -1667,7 +1676,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 			   ktime_to_ms(intel_engine_get_busy_time(engine,
 								  &dummy)));
 	drm_printf(m, "\tForcewake: %x domains, %d active\n",
-		   engine->fw_domain, atomic_read(&engine->fw_active));
+		   engine->fw_domain, READ_ONCE(engine->fw_active));
 
 	rcu_read_lock();
 	rq = READ_ONCE(engine->heartbeat.systole);
@@ -1687,7 +1696,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	if (rq) {
 		struct intel_timeline *tl = get_timeline(rq);
 
-		print_request(m, rq, "\t\tactive ");
+		i915_request_show(m, rq, "\t\tactive ", 0);
 
 		drm_printf(m, "\t\tring->start:  0x%08x\n",
 			   i915_ggtt_offset(rq->ring->vma));
@@ -1725,7 +1734,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
 	}
 
-	intel_execlists_show_requests(engine, m, print_request, 8);
+	intel_execlists_show_requests(engine, m, i915_request_show, 8);
 
 	drm_printf(m, "HWSP:\n");
 	hexdump(m, engine->status_page.addr, PAGE_SIZE);
@@ -1745,7 +1754,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
 	 * add it to the total.
 	 */
 	*now = ktime_get();
-	if (atomic_read(&engine->stats.active))
+	if (READ_ONCE(engine->stats.active))
 		total = ktime_add(total, ktime_sub(*now, engine->stats.start));
 
 	return total;
@@ -1764,9 +1773,9 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
 	ktime_t total;
 
 	do {
-		seq = read_seqbegin(&engine->stats.lock);
+		seq = read_seqcount_begin(&engine->stats.lock);
 		total = __intel_engine_get_busy_time(engine, now);
-	} while (read_seqretry(&engine->stats.lock, seq));
+	} while (read_seqcount_retry(&engine->stats.lock, seq));
 
 	return total;
 }
@@ -1802,7 +1811,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 		struct intel_timeline *tl = request->context->timeline;
 
 		list_for_each_entry_from_reverse(request, &tl->requests, link) {
-			if (i915_request_completed(request))
+			if (__i915_request_is_complete(request))
 				break;
 
 			active = request;
@@ -1813,10 +1822,10 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 		return active;
 
 	list_for_each_entry(request, &engine->active.requests, sched.link) {
-		if (i915_request_completed(request))
+		if (__i915_request_is_complete(request))
 			continue;
 
-		if (!i915_request_started(request))
+		if (!__i915_request_has_started(request))
 			continue;
 
 		/* More than one preemptible request may match! */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9060385cd69e64d30c892e8e3bdf29877f6bfe67..d7be2b9339f91333cb39a175518a5da7b766d59b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -37,6 +37,18 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
 	return true;
 }
 
+static struct i915_request *
+heartbeat_create(struct intel_context *ce, gfp_t gfp)
+{
+	struct i915_request *rq;
+
+	intel_context_enter(ce);
+	rq = __i915_request_create(ce, gfp);
+	intel_context_exit(ce);
+
+	return rq;
+}
+
 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
@@ -45,6 +57,15 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
 		engine->heartbeat.systole = i915_request_get(rq);
 }
 
+static void heartbeat_commit(struct i915_request *rq,
+			     const struct i915_sched_attr *attr)
+{
+	idle_pulse(rq->engine, rq);
+
+	__i915_request_commit(rq);
+	__i915_request_queue(rq, attr);
+}
+
 static void show_heartbeat(const struct i915_request *rq,
 			   struct intel_engine_cs *engine)
 {
@@ -139,16 +160,11 @@ static void heartbeat(struct work_struct *wrk)
 		goto out;
 	}
 
-	intel_context_enter(ce);
-	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
-	intel_context_exit(ce);
+	rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
 	if (IS_ERR(rq))
 		goto unlock;
 
-	idle_pulse(engine, rq);
-
-	__i915_request_commit(rq);
-	__i915_request_queue(rq, &attr);
+	heartbeat_commit(rq, &attr);
 
 unlock:
 	mutex_unlock(&ce->timeline->mutex);
@@ -187,17 +203,13 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
 	GEM_BUG_ON(!intel_engine_has_preemption(engine));
 	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
-	intel_context_enter(ce);
-	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
-	intel_context_exit(ce);
+	rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
 	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
-	idle_pulse(engine, rq);
 
-	__i915_request_commit(rq);
-	__i915_request_queue(rq, &attr);
+	heartbeat_commit(rq, &attr);
 	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
 
 	return 0;
@@ -273,8 +285,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 
 int intel_engine_flush_barriers(struct intel_engine_cs *engine)
 {
+	struct i915_sched_attr attr = {
+		.priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+	};
+	struct intel_context *ce = engine->kernel_context;
 	struct i915_request *rq;
-	int err = 0;
+	int err;
 
 	if (llist_empty(&engine->barrier_tasks))
 		return 0;
@@ -282,15 +298,22 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
 	if (!intel_engine_pm_get_if_awake(engine))
 		return 0;
 
-	rq = i915_request_create(engine->kernel_context);
+	if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+		err = -EINTR;
+		goto out_rpm;
+	}
+
+	rq = heartbeat_create(ce, GFP_KERNEL);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
-		goto out_rpm;
+		goto out_unlock;
 	}
 
-	idle_pulse(engine, rq);
-	i915_request_add(rq);
+	heartbeat_commit(rq, &attr);
 
+	err = 0;
+out_unlock:
+	mutex_unlock(&ce->timeline->mutex);
 out_rpm:
 	intel_engine_pm_put(engine);
 	return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 499b09cb4acf29866b0a6a4107c6ce07c656e890..e67d09259dd0c3d04b6e3b34f365026c8ef59538 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -60,18 +60,26 @@ static int __engine_unpark(struct intel_wakeref *wf)
 
 		/* Scrub the context image after our loss of control */
 		ce->ops->reset(ce);
+
+		CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+			 ce->timeline->seqno,
+			 READ_ONCE(*ce->timeline->hwsp_seqno),
+			 ce->ring->emit);
+		GEM_BUG_ON(ce->timeline->seqno !=
+			   READ_ONCE(*ce->timeline->hwsp_seqno));
 	}
 
 	if (engine->unpark)
 		engine->unpark(engine);
 
+	intel_breadcrumbs_unpark(engine->breadcrumbs);
 	intel_engine_unpark_heartbeat(engine);
 	return 0;
 }
 
 #if IS_ENABLED(CONFIG_LOCKDEP)
 
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
 {
 	unsigned long flags;
 
@@ -81,8 +89,8 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
 	return flags;
 }
 
-static inline void __timeline_mark_unlock(struct intel_context *ce,
-					  unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+				   unsigned long flags)
 {
 	mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
 	local_irq_restore(flags);
@@ -90,13 +98,13 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
 
 #else
 
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
 {
 	return 0;
 }
 
-static inline void __timeline_mark_unlock(struct intel_context *ce,
-					  unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+				   unsigned long flags)
 {
 }
 
@@ -136,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq,
 		list_add_tail(&tl->link, &timelines->active_list);
 
 	/* Hand the request over to HW and so engine_retire() */
-	__i915_request_queue(rq, NULL);
+	__i915_request_queue_bh(rq);
 
 	/* Let new submissions commence (and maybe retire this timeline) */
 	__intel_wakeref_defer_park(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
new file mode 100644
index 0000000000000000000000000000000000000000..24fbdd94351aed3ea8267f57a04c746f352964bd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_STATS_H__
+#define __INTEL_ENGINE_STATS_H__
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/seqlock.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_engine.h"
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (engine->stats.active) {
+		engine->stats.active++;
+		return;
+	}
+
+	/* The writer is serialised; but the pmu reader may be from hardirq */
+	local_irq_save(flags);
+	write_seqcount_begin(&engine->stats.lock);
+
+	engine->stats.start = ktime_get();
+	engine->stats.active++;
+
+	write_seqcount_end(&engine->stats.lock);
+	local_irq_restore(flags);
+
+	GEM_BUG_ON(!engine->stats.active);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	GEM_BUG_ON(!engine->stats.active);
+	if (engine->stats.active > 1) {
+		engine->stats.active--;
+		return;
+	}
+
+	local_irq_save(flags);
+	write_seqcount_begin(&engine->stats.lock);
+
+	engine->stats.active--;
+	engine->stats.total =
+		ktime_add(engine->stats.total,
+			  ktime_sub(ktime_get(), engine->stats.start));
+
+	write_seqcount_end(&engine->stats.lock);
+	local_irq_restore(flags);
+}
+
+#endif /* __INTEL_ENGINE_STATS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ee6312601c56db66dbc357324f6c12054adfc6fd..d2346b42554796d6efc76928c99987ef4643659e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -68,6 +68,7 @@ typedef u8 intel_engine_mask_t;
 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
 
 struct intel_hw_status_page {
+	struct list_head timelines;
 	struct i915_vma *vma;
 	u32 *addr;
 };
@@ -183,7 +184,8 @@ struct intel_engine_execlists {
 	 * Reserve the upper 16b for tracking internal errors.
 	 */
 	u32 error_interrupt;
-#define ERROR_CSB BIT(31)
+#define ERROR_CSB	BIT(31)
+#define ERROR_PREEMPT	BIT(30)
 
 	/**
 	 * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
@@ -236,16 +238,6 @@ struct intel_engine_execlists {
 	 */
 	unsigned int port_mask;
 
-	/**
-	 * @switch_priority_hint: Second context priority.
-	 *
-	 * We submit multiple contexts to the HW simultaneously and would
-	 * like to occasionally switch between them to emulate timeslicing.
-	 * To know when timeslicing is suitable, we track the priority of
-	 * the context submitted second.
-	 */
-	int switch_priority_hint;
-
 	/**
 	 * @queue_priority_hint: Highest pending priority.
 	 *
@@ -327,7 +319,7 @@ struct intel_engine_cs {
 	 * as possible.
 	 */
 	enum forcewake_domains fw_domain;
-	atomic_t fw_active;
+	unsigned int fw_active;
 
 	unsigned long context_tag;
 
@@ -524,12 +516,12 @@ struct intel_engine_cs {
 		/**
 		 * @active: Number of contexts currently scheduled in.
 		 */
-		atomic_t active;
+		unsigned int active;
 
 		/**
 		 * @lock: Lock protecting the below fields.
 		 */
-		seqlock_t lock;
+		seqcount_t lock;
 
 		/**
 		 * @total: Total time this engine was busy.
@@ -559,6 +551,8 @@ struct intel_engine_cs {
 		unsigned long stop_timeout_ms;
 		unsigned long timeslice_duration_ms;
 	} props, defaults;
+
+	I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
 };
 
 static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
new file mode 100644
index 0000000000000000000000000000000000000000..ac1be7a632d3b0841b2f854e41ac0caa14546aa9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -0,0 +1,3896 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+#include <linux/interrupt.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "gen8_engine_cs.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_stats.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_mocs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+#include "shmem_utils.h"
+
+#define RING_EXECLIST_QFULL		(1 << 0x2)
+#define RING_EXECLIST1_VALID		(1 << 0x3)
+#define RING_EXECLIST0_VALID		(1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS	(3 << 0xE)
+#define RING_EXECLIST1_ACTIVE		(1 << 0x11)
+#define RING_EXECLIST0_ACTIVE		(1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE	(1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED	(1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH	(1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE	(1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
+
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+	 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE	(0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw)	((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK		GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID		0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+	(FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+struct virtual_engine {
+	struct intel_engine_cs base;
+	struct intel_context context;
+	struct rcu_work rcu;
+
+	/*
+	 * We allow only a single request through the virtual engine at a time
+	 * (each request in the timeline waits for the completion fence of
+	 * the previous before being submitted). By restricting ourselves to
+	 * only submitting a single request, each request is placed on to a
+	 * physical to maximise load spreading (by virtue of the late greedy
+	 * scheduling -- each real engine takes the next available request
+	 * upon idling).
+	 */
+	struct i915_request *request;
+
+	/*
+	 * We keep a rbtree of available virtual engines inside each physical
+	 * engine, sorted by priority. Here we preallocate the nodes we need
+	 * for the virtual engine, indexed by physical_engine->id.
+	 */
+	struct ve_node {
+		struct rb_node rb;
+		int prio;
+	} nodes[I915_NUM_ENGINES];
+
+	/*
+	 * Keep track of bonded pairs -- restrictions upon on our selection
+	 * of physical engines any particular request may be submitted to.
+	 * If we receive a submit-fence from a master engine, we will only
+	 * use one of sibling_mask physical engines.
+	 */
+	struct ve_bond {
+		const struct intel_engine_cs *master;
+		intel_engine_mask_t sibling_mask;
+	} *bonds;
+	unsigned int num_bonds;
+
+	/* And finally, which physical engines this virtual engine maps onto. */
+	unsigned int num_siblings;
+	struct intel_engine_cs *siblings[];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+	GEM_BUG_ON(!intel_engine_is_virtual(engine));
+	return container_of(engine, struct virtual_engine, base);
+}
+
+static struct i915_request *
+__active_request(const struct intel_timeline * const tl,
+		 struct i915_request *rq,
+		 int error)
+{
+	struct i915_request *active = rq;
+
+	list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+		if (__i915_request_is_complete(rq))
+			break;
+
+		if (error) {
+			i915_request_set_error_once(rq, error);
+			__i915_request_skip(rq);
+		}
+		active = rq;
+	}
+
+	return active;
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+	return __active_request(tl, rq, 0);
+}
+
+static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+	/*
+	 * We inspect HWS_PREEMPT with a semaphore inside
+	 * engine->emit_fini_breadcrumb. If the dword is true,
+	 * the ring is paused as the semaphore will busywait
+	 * until the dword is false.
+	 */
+	engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+	if (state)
+		wmb();
+}
+
+static struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+	return rb_entry(rb, struct i915_priolist, node);
+}
+
+static int rq_prio(const struct i915_request *rq)
+{
+	return READ_ONCE(rq->sched.attr.priority);
+}
+
+static int effective_prio(const struct i915_request *rq)
+{
+	int prio = rq_prio(rq);
+
+	/*
+	 * If this request is special and must not be interrupted at any
+	 * cost, so be it. Note we are only checking the most recent request
+	 * in the context and so may be masking an earlier vip request. It
+	 * is hoped that under the conditions where nopreempt is used, this
+	 * will not matter (i.e. all requests to that context will be
+	 * nopreempt for as long as desired).
+	 */
+	if (i915_request_has_nopreempt(rq))
+		prio = I915_PRIORITY_UNPREEMPTABLE;
+
+	return prio;
+}
+
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+	struct i915_priolist *p;
+	struct rb_node *rb;
+
+	rb = rb_first_cached(&execlists->queue);
+	if (!rb)
+		return INT_MIN;
+
+	/*
+	 * As the priolist[] are inverted, with the highest priority in [0],
+	 * we have to flip the index value to become priority.
+	 */
+	p = to_priolist(rb);
+	if (!I915_USER_PRIORITY_SHIFT)
+		return p->priority;
+
+	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+	struct rb_node *rb = rb_first_cached(&el->virtual);
+
+	return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
+static bool need_preempt(const struct intel_engine_cs *engine,
+			 const struct i915_request *rq)
+{
+	int last_prio;
+
+	if (!intel_engine_has_semaphores(engine))
+		return false;
+
+	/*
+	 * Check if the current priority hint merits a preemption attempt.
+	 *
+	 * We record the highest value priority we saw during rescheduling
+	 * prior to this dequeue, therefore we know that if it is strictly
+	 * less than the current tail of ESLP[0], we do not need to force
+	 * a preempt-to-idle cycle.
+	 *
+	 * However, the priority hint is a mere hint that we may need to
+	 * preempt. If that hint is stale or we may be trying to preempt
+	 * ourselves, ignore the request.
+	 *
+	 * More naturally we would write
+	 *      prio >= max(0, last);
+	 * except that we wish to prevent triggering preemption at the same
+	 * priority level: the task that is running should remain running
+	 * to preserve FIFO ordering of dependencies.
+	 */
+	last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+	if (engine->execlists.queue_priority_hint <= last_prio)
+		return false;
+
+	/*
+	 * Check against the first request in ELSP[1], it will, thanks to the
+	 * power of PI, be the highest priority of that context.
+	 */
+	if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
+		return true;
+
+	/*
+	 * If the inflight context did not trigger the preemption, then maybe
+	 * it was the set of queued requests? Pick the highest priority in
+	 * the queue (the first active priolist) and see if it deserves to be
+	 * running instead of ELSP[0].
+	 *
+	 * The highest priority request in the queue can not be either
+	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+	 * context, it's priority would not exceed ELSP[0] aka last_prio.
+	 */
+	return max(virtual_prio(&engine->execlists),
+		   queue_prio(&engine->execlists)) > last_prio;
+}
+
+__maybe_unused static bool
+assert_priority_queue(const struct i915_request *prev,
+		      const struct i915_request *next)
+{
+	/*
+	 * Without preemption, the prev may refer to the still active element
+	 * which we refuse to let go.
+	 *
+	 * Even with preemption, there are times when we think it is better not
+	 * to preempt and leave an ostensibly lower priority request in flight.
+	 */
+	if (i915_request_is_active(prev))
+		return true;
+
+	return rq_prio(prev) >= rq_prio(next);
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq, *rn, *active = NULL;
+	struct list_head *pl;
+	int prio = I915_PRIORITY_INVALID;
+
+	lockdep_assert_held(&engine->active.lock);
+
+	list_for_each_entry_safe_reverse(rq, rn,
+					 &engine->active.requests,
+					 sched.link) {
+		if (__i915_request_is_complete(rq)) {
+			list_del_init(&rq->sched.link);
+			continue;
+		}
+
+		__i915_request_unsubmit(rq);
+
+		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+		if (rq_prio(rq) != prio) {
+			prio = rq_prio(rq);
+			pl = i915_sched_lookup_priolist(engine, prio);
+		}
+		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+		list_move(&rq->sched.link, pl);
+		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+		/* Check in case we rollback so far we wrap [size/2] */
+		if (intel_ring_direction(rq->ring,
+					 rq->tail,
+					 rq->ring->tail + 8) > 0)
+			rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
+		active = rq;
+	}
+
+	return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+	struct intel_engine_cs *engine =
+		container_of(execlists, typeof(*engine), execlists);
+
+	return __unwind_incomplete_requests(engine);
+}
+
+static void
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
+{
+	/*
+	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
+	 * The compiler should eliminate this function as dead-code.
+	 */
+	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+		return;
+
+	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+				   status, rq);
+}
+
+static void reset_active(struct i915_request *rq,
+			 struct intel_engine_cs *engine)
+{
+	struct intel_context * const ce = rq->context;
+	u32 head;
+
+	/*
+	 * The executing context has been cancelled. We want to prevent
+	 * further execution along this context and propagate the error on
+	 * to anything depending on its results.
+	 *
+	 * In __i915_request_submit(), we apply the -EIO and remove the
+	 * requests' payloads for any banned requests. But first, we must
+	 * rewind the context back to the start of the incomplete request so
+	 * that we do not jump back into the middle of the batch.
+	 *
+	 * We preserve the breadcrumbs and semaphores of the incomplete
+	 * requests so that inter-timeline dependencies (i.e other timelines)
+	 * remain correctly ordered. And we defer to __i915_request_submit()
+	 * so that all asynchronous waits are correctly handled.
+	 */
+	ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
+		     rq->fence.context, rq->fence.seqno);
+
+	/* On resubmission of the active request, payload will be scrubbed */
+	if (__i915_request_is_complete(rq))
+		head = rq->tail;
+	else
+		head = __active_request(ce->timeline, rq, -EIO)->head;
+	head = intel_ring_wrap(ce->ring, head);
+
+	/* Scrub the context image to prevent replaying the previous batch */
+	lrc_init_regs(ce, engine, true);
+
+	/* We've switched away, so this should be a no-op, but intent matters */
+	ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+	struct intel_engine_cs * const engine = rq->engine;
+	struct intel_context * const ce = rq->context;
+
+	intel_context_get(ce);
+
+	if (unlikely(intel_context_is_closed(ce) &&
+		     !intel_engine_has_heartbeat(engine)))
+		intel_context_set_banned(ce);
+
+	if (unlikely(intel_context_is_banned(ce)))
+		reset_active(rq, engine);
+
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		lrc_check_regs(ce, engine, "before");
+
+	if (ce->tag) {
+		/* Use a fixed tag for OA and friends */
+		GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+		ce->lrc.ccid = ce->tag;
+	} else {
+		/* We don't need a strict matching tag, just different values */
+		unsigned int tag = __ffs(engine->context_tag);
+
+		GEM_BUG_ON(tag >= BITS_PER_LONG);
+		__clear_bit(tag, &engine->context_tag);
+		ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+		BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+	}
+
+	ce->lrc.ccid |= engine->execlists.ccid;
+
+	__intel_gt_pm_get(engine->gt);
+	if (engine->fw_domain && !engine->fw_active++)
+		intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+	intel_engine_context_in(engine);
+
+	CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
+
+	return engine;
+}
+
+static void execlists_schedule_in(struct i915_request *rq, int idx)
+{
+	struct intel_context * const ce = rq->context;
+	struct intel_engine_cs *old;
+
+	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+	trace_i915_request_in(rq, idx);
+
+	old = ce->inflight;
+	if (!old)
+		old = __execlists_schedule_in(rq);
+	WRITE_ONCE(ce->inflight, ptr_inc(old));
+
+	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+}
+
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+	struct intel_engine_cs *engine = rq->engine;
+
+	spin_lock_irq(&engine->active.lock);
+
+	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+	WRITE_ONCE(rq->engine, &ve->base);
+	ve->base.submit_request(rq);
+
+	spin_unlock_irq(&engine->active.lock);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_engine_cs *engine = rq->engine;
+
+	/*
+	 * After this point, the rq may be transferred to a new sibling, so
+	 * before we clear ce->inflight make sure that the context has been
+	 * removed from the b->signalers and furthermore we need to make sure
+	 * that the concurrent iterator in signal_irq_work is no longer
+	 * following ce->signal_link.
+	 */
+	if (!list_empty(&ce->signals))
+		intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
+
+	/*
+	 * This engine is now too busy to run this virtual request, so
+	 * see if we can find an alternative engine for it to execute on.
+	 * Once a request has become bonded to this engine, we treat it the
+	 * same as other native request.
+	 */
+	if (i915_request_in_priority_queue(rq) &&
+	    rq->execution_mask != engine->mask)
+		resubmit_virtual_request(rq, ve);
+
+	if (READ_ONCE(ve->request))
+		tasklet_hi_schedule(&ve->base.execlists.tasklet);
+}
+
+static void __execlists_schedule_out(struct i915_request * const rq,
+				     struct intel_context * const ce)
+{
+	struct intel_engine_cs * const engine = rq->engine;
+	unsigned int ccid;
+
+	/*
+	 * NB process_csb() is not under the engine->active.lock and hence
+	 * schedule_out can race with schedule_in meaning that we should
+	 * refrain from doing non-trivial work here.
+	 */
+
+	CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
+	GEM_BUG_ON(ce->inflight != engine);
+
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		lrc_check_regs(ce, engine, "after");
+
+	/*
+	 * If we have just completed this context, the engine may now be
+	 * idle and we want to re-enter powersaving.
+	 */
+	if (intel_timeline_is_last(ce->timeline, rq) &&
+	    __i915_request_is_complete(rq))
+		intel_engine_add_retire(engine, ce->timeline);
+
+	ccid = ce->lrc.ccid;
+	ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+	ccid &= GEN12_MAX_CONTEXT_HW_ID;
+	if (ccid < BITS_PER_LONG) {
+		GEM_BUG_ON(ccid == 0);
+		GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+		__set_bit(ccid - 1, &engine->context_tag);
+	}
+
+	lrc_update_runtime(ce);
+	intel_engine_context_out(engine);
+	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+	if (engine->fw_domain && !--engine->fw_active)
+		intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
+	intel_gt_pm_put_async(engine->gt);
+
+	/*
+	 * If this is part of a virtual engine, its next request may
+	 * have been blocked waiting for access to the active context.
+	 * We have to kick all the siblings again in case we need to
+	 * switch (e.g. the next request is not runnable on this
+	 * engine). Hopefully, we will already have submitted the next
+	 * request before the tasklet runs and do not need to rebuild
+	 * each virtual tree and kick everyone again.
+	 */
+	if (ce->engine != engine)
+		kick_siblings(rq, ce);
+
+	WRITE_ONCE(ce->inflight, NULL);
+	intel_context_put(ce);
+}
+
+static inline void execlists_schedule_out(struct i915_request *rq)
+{
+	struct intel_context * const ce = rq->context;
+
+	trace_i915_request_out(rq);
+
+	GEM_BUG_ON(!ce->inflight);
+	ce->inflight = ptr_dec(ce->inflight);
+	if (!__intel_context_inflight_count(ce->inflight))
+		__execlists_schedule_out(rq, ce);
+
+	i915_request_put(rq);
+}
+
+static u64 execlists_update_context(struct i915_request *rq)
+{
+	struct intel_context *ce = rq->context;
+	u64 desc = ce->lrc.desc;
+	u32 tail, prev;
+
+	/*
+	 * WaIdleLiteRestore:bdw,skl
+	 *
+	 * We should never submit the context with the same RING_TAIL twice
+	 * just in case we submit an empty ring, which confuses the HW.
+	 *
+	 * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+	 * the normal request to be able to always advance the RING_TAIL on
+	 * subsequent resubmissions (for lite restore). Should that fail us,
+	 * and we try and submit the same tail again, force the context
+	 * reload.
+	 *
+	 * If we need to return to a preempted context, we need to skip the
+	 * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+	 * HW has a tendency to ignore us rewinding the TAIL to the end of
+	 * an earlier request.
+	 */
+	GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+	prev = rq->ring->tail;
+	tail = intel_ring_set_tail(rq->ring, rq->tail);
+	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+		desc |= CTX_DESC_FORCE_RESTORE;
+	ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+	rq->tail = rq->wa_tail;
+
+	/*
+	 * Make sure the context image is complete before we submit it to HW.
+	 *
+	 * Ostensibly, writes (including the WCB) should be flushed prior to
+	 * an uncached write such as our mmio register access, the empirical
+	 * evidence (esp. on Braswell) suggests that the WC write into memory
+	 * may not be visible to the HW prior to the completion of the UC
+	 * register write and that we may begin execution from the context
+	 * before its image is complete leading to invalid PD chasing.
+	 */
+	wmb();
+
+	ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
+	return desc;
+}
+
+static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+{
+	if (execlists->ctrl_reg) {
+		writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+		writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+	} else {
+		writel(upper_32_bits(desc), execlists->submit_reg);
+		writel(lower_32_bits(desc), execlists->submit_reg);
+	}
+}
+
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+	if (!rq)
+		return "";
+
+	snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+		 prefix,
+		 rq->context->lrc.ccid,
+		 rq->fence.context, rq->fence.seqno,
+		 __i915_request_is_complete(rq) ? "!" :
+		 __i915_request_has_started(rq) ? "*" :
+		 "",
+		 rq_prio(rq));
+
+	return buf;
+}
+
+static __maybe_unused noinline void
+trace_ports(const struct intel_engine_execlists *execlists,
+	    const char *msg,
+	    struct i915_request * const *ports)
+{
+	const struct intel_engine_cs *engine =
+		container_of(execlists, typeof(*engine), execlists);
+	char __maybe_unused p0[40], p1[40];
+
+	if (!ports[0])
+		return;
+
+	ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+		     dump_port(p0, sizeof(p0), "", ports[0]),
+		     dump_port(p1, sizeof(p1), ", ", ports[1]));
+}
+
+static bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static __maybe_unused noinline bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+		     const char *msg)
+{
+	struct intel_engine_cs *engine =
+		container_of(execlists, typeof(*engine), execlists);
+	struct i915_request * const *port, *rq;
+	struct intel_context *ce = NULL;
+	bool sentinel = false;
+	u32 ccid = -1;
+
+	trace_ports(execlists, msg, execlists->pending);
+
+	/* We may be messing around with the lists during reset, lalala */
+	if (reset_in_progress(execlists))
+		return true;
+
+	if (!execlists->pending[0]) {
+		GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+			      engine->name);
+		return false;
+	}
+
+	if (execlists->pending[execlists_num_ports(execlists)]) {
+		GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+			      engine->name, execlists_num_ports(execlists));
+		return false;
+	}
+
+	for (port = execlists->pending; (rq = *port); port++) {
+		unsigned long flags;
+		bool ok = true;
+
+		GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+		GEM_BUG_ON(!i915_request_is_active(rq));
+
+		if (ce == rq->context) {
+			GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+		ce = rq->context;
+
+		if (ccid == ce->lrc.ccid) {
+			GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+				      engine->name,
+				      ccid, ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+		ccid = ce->lrc.ccid;
+
+		/*
+		 * Sentinels are supposed to be the last request so they flush
+		 * the current execution off the HW. Check that they are the only
+		 * request in the pending submission.
+		 */
+		if (sentinel) {
+			GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+		sentinel = i915_request_has_sentinel(rq);
+
+		/*
+		 * We want virtual requests to only be in the first slot so
+		 * that they are never stuck behind a hog and can be immediately
+		 * transferred onto the next idle engine.
+		 */
+		if (rq->execution_mask != engine->mask &&
+		    port != execlists->pending) {
+			GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+
+		/* Hold tightly onto the lock to prevent concurrent retires! */
+		if (!spin_trylock_irqsave(&rq->lock, flags))
+			continue;
+
+		if (__i915_request_is_complete(rq))
+			goto unlock;
+
+		if (i915_active_is_idle(&ce->active) &&
+		    !intel_context_is_barrier(ce)) {
+			GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			ok = false;
+			goto unlock;
+		}
+
+		if (!i915_vma_is_pinned(ce->state)) {
+			GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			ok = false;
+			goto unlock;
+		}
+
+		if (!i915_vma_is_pinned(ce->ring->vma)) {
+			GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			ok = false;
+			goto unlock;
+		}
+
+unlock:
+		spin_unlock_irqrestore(&rq->lock, flags);
+		if (!ok)
+			return false;
+	}
+
+	return ce;
+}
+
+static void execlists_submit_ports(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists *execlists = &engine->execlists;
+	unsigned int n;
+
+	GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
+	/*
+	 * We can skip acquiring intel_runtime_pm_get() here as it was taken
+	 * on our behalf by the request (see i915_gem_mark_busy()) and it will
+	 * not be relinquished until the device is idle (see
+	 * i915_gem_idle_work_handler()). As a precaution, we make sure
+	 * that all ELSP are drained i.e. we have processed the CSB,
+	 * before allowing ourselves to idle and calling intel_runtime_pm_put().
+	 */
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+	/*
+	 * ELSQ note: the submit queue is not cleared after being submitted
+	 * to the HW so we need to make sure we always clean it up. This is
+	 * currently ensured by the fact that we always write the same number
+	 * of elsq entries, keep this in mind before changing the loop below.
+	 */
+	for (n = execlists_num_ports(execlists); n--; ) {
+		struct i915_request *rq = execlists->pending[n];
+
+		write_desc(execlists,
+			   rq ? execlists_update_context(rq) : 0,
+			   n);
+	}
+
+	/* we need to manually load the submit queue */
+	if (execlists->ctrl_reg)
+		writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+}
+
+static bool ctx_single_port_submission(const struct intel_context *ce)
+{
+	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+		intel_context_force_single_submission(ce));
+}
+
+static bool can_merge_ctx(const struct intel_context *prev,
+			  const struct intel_context *next)
+{
+	if (prev != next)
+		return false;
+
+	if (ctx_single_port_submission(prev))
+		return false;
+
+	return true;
+}
+
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+	return READ_ONCE(rq->fence.flags);
+}
+
+static bool can_merge_rq(const struct i915_request *prev,
+			 const struct i915_request *next)
+{
+	GEM_BUG_ON(prev == next);
+	GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+	/*
+	 * We do not submit known completed requests. Therefore if the next
+	 * request is already completed, we can pretend to merge it in
+	 * with the previous context (and we will skip updating the ELSP
+	 * and tracking). Thus hopefully keeping the ELSP full with active
+	 * contexts, despite the best efforts of preempt-to-busy to confuse
+	 * us.
+	 */
+	if (__i915_request_is_complete(next))
+		return true;
+
+	if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+		     (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+		      BIT(I915_FENCE_FLAG_SENTINEL))))
+		return false;
+
+	if (!can_merge_ctx(prev->context, next->context))
+		return false;
+
+	GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
+	return true;
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+			    const struct i915_request *rq,
+			    const struct intel_engine_cs *engine)
+{
+	const struct intel_engine_cs *inflight;
+
+	if (!rq)
+		return false;
+
+	if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+		return false;
+
+	/*
+	 * We track when the HW has completed saving the context image
+	 * (i.e. when we have seen the final CS event switching out of
+	 * the context) and must not overwrite the context image before
+	 * then. This restricts us to only using the active engine
+	 * while the previous virtualized request is inflight (so
+	 * we reuse the register offsets). This is a very small
+	 * hystersis on the greedy seelction algorithm.
+	 */
+	inflight = intel_context_inflight(&ve->context);
+	if (inflight && inflight != engine)
+		return false;
+
+	return true;
+}
+
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists *el = &engine->execlists;
+	struct rb_node *rb = rb_first_cached(&el->virtual);
+
+	while (rb) {
+		struct virtual_engine *ve =
+			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+		struct i915_request *rq = READ_ONCE(ve->request);
+
+		/* lazily cleanup after another engine handled rq */
+		if (!rq || !virtual_matches(ve, rq, engine)) {
+			rb_erase_cached(rb, &el->virtual);
+			RB_CLEAR_NODE(rb);
+			rb = rb_first_cached(&el->virtual);
+			continue;
+		}
+
+		return ve;
+	}
+
+	return NULL;
+}
+
+static void virtual_xfer_context(struct virtual_engine *ve,
+				 struct intel_engine_cs *engine)
+{
+	unsigned int n;
+
+	if (likely(engine == ve->siblings[0]))
+		return;
+
+	GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+	if (!intel_engine_has_relative_mmio(engine))
+		lrc_update_offsets(&ve->context, engine);
+
+	/*
+	 * Move the bound engine to the top of the list for
+	 * future execution. We then kick this tasklet first
+	 * before checking others, so that we preferentially
+	 * reuse this set of bound registers.
+	 */
+	for (n = 1; n < ve->num_siblings; n++) {
+		if (ve->siblings[n] == engine) {
+			swap(ve->siblings[n], ve->siblings[0]);
+			break;
+		}
+	}
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+	LIST_HEAD(list);
+
+	/*
+	 * We want to move the interrupted request to the back of
+	 * the round-robin list (i.e. its priority level), but
+	 * in doing so, we must then move all requests that were in
+	 * flight and were waiting for the interrupted request to
+	 * be run after it again.
+	 */
+	do {
+		struct i915_dependency *p;
+
+		GEM_BUG_ON(i915_request_is_active(rq));
+		list_move_tail(&rq->sched.link, pl);
+
+		for_each_waiter(p, rq) {
+			struct i915_request *w =
+				container_of(p->waiter, typeof(*w), sched);
+
+			if (p->flags & I915_DEPENDENCY_WEAK)
+				continue;
+
+			/* Leave semaphores spinning on the other engines */
+			if (w->engine != rq->engine)
+				continue;
+
+			/* No waiter should start before its signaler */
+			GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+				   __i915_request_has_started(w) &&
+				   !__i915_request_is_complete(rq));
+
+			GEM_BUG_ON(i915_request_is_active(w));
+			if (!i915_request_is_ready(w))
+				continue;
+
+			if (rq_prio(w) < rq_prio(rq))
+				continue;
+
+			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+			list_move_tail(&w->sched.link, &list);
+		}
+
+		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+	} while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	rq = __unwind_incomplete_requests(engine);
+	if (!rq)
+		return;
+
+	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+		const struct i915_request *rq)
+{
+	/*
+	 * Once bitten, forever smitten!
+	 *
+	 * If the active context ever busy-waited on a semaphore,
+	 * it will be treated as a hog until the end of its timeslice (i.e.
+	 * until it is scheduled out and replaced by a new submission,
+	 * possibly even its own lite-restore). The HW only sends an interrupt
+	 * on the first miss, and we do know if that semaphore has been
+	 * signaled, or even if it is now stuck on another semaphore. Play
+	 * safe, yield if it might be stuck -- it will be given a fresh
+	 * timeslice in the near future.
+	 */
+	return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+			    const struct i915_request *rq)
+{
+	if (!intel_engine_has_timeslices(engine))
+		return false;
+
+	/* If not currently active, or about to switch, wait for next event */
+	if (!rq || __i915_request_is_complete(rq))
+		return false;
+
+	/* We do not need to start the timeslice until after the ACK */
+	if (READ_ONCE(engine->execlists.pending[0]))
+		return false;
+
+	/* If ELSP[1] is occupied, always check to see if worth slicing */
+	if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+		ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+		return true;
+	}
+
+	/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+		ENGINE_TRACE(engine, "timeslice required for queue\n");
+		return true;
+	}
+
+	if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
+		ENGINE_TRACE(engine, "timeslice required for virtual\n");
+		return true;
+	}
+
+	return false;
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+	const struct intel_engine_execlists *el = &engine->execlists;
+
+	if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+		return false;
+
+	if (!needs_timeslice(engine, rq))
+		return false;
+
+	return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
+static unsigned long timeslice(const struct intel_engine_cs *engine)
+{
+	return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists *el = &engine->execlists;
+	unsigned long duration;
+
+	/* Disable the timer if there is nothing to switch to */
+	duration = 0;
+	if (needs_timeslice(engine, *el->active)) {
+		/* Avoid continually prolonging an active timeslice */
+		if (timer_active(&el->timer)) {
+			/*
+			 * If we just submitted a new ELSP after an old
+			 * context, that context may have already consumed
+			 * its timeslice, so recheck.
+			 */
+			if (!timer_pending(&el->timer))
+				tasklet_hi_schedule(&el->tasklet);
+			return;
+		}
+
+		duration = timeslice(engine);
+	}
+
+	set_timer_ms(&el->timer, duration);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+					    const struct i915_request *rq)
+{
+	if (!rq)
+		return 0;
+
+	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
+	if (unlikely(intel_context_is_banned(rq->context)))
+		return 1;
+
+	return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+				const struct i915_request *rq)
+{
+	if (!intel_engine_has_preempt_reset(engine))
+		return;
+
+	set_timer_ms(&engine->execlists.preempt,
+		     active_preempt_timeout(engine, rq));
+}
+
+static bool completed(const struct i915_request *rq)
+{
+	if (i915_request_has_sentinel(rq))
+		return false;
+
+	return __i915_request_is_complete(rq);
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_request **port = execlists->pending;
+	struct i915_request ** const last_port = port + execlists->port_mask;
+	struct i915_request *last, * const *active;
+	struct virtual_engine *ve;
+	struct rb_node *rb;
+	bool submit = false;
+
+	/*
+	 * Hardware submission is through 2 ports. Conceptually each port
+	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+	 * static for a context, and unique to each, so we only execute
+	 * requests belonging to a single context from each ring. RING_HEAD
+	 * is maintained by the CS in the context image, it marks the place
+	 * where it got up to last time, and through RING_TAIL we tell the CS
+	 * where we want to execute up to this time.
+	 *
+	 * In this list the requests are in order of execution. Consecutive
+	 * requests from the same context are adjacent in the ringbuffer. We
+	 * can combine these requests into a single RING_TAIL update:
+	 *
+	 *              RING_HEAD...req1...req2
+	 *                                    ^- RING_TAIL
+	 * since to execute req2 the CS must first execute req1.
+	 *
+	 * Our goal then is to point each port to the end of a consecutive
+	 * sequence of requests as being the most optimal (fewest wake ups
+	 * and context switches) submission.
+	 */
+
+	spin_lock(&engine->active.lock);
+
+	/*
+	 * If the queue is higher priority than the last
+	 * request in the currently active context, submit afresh.
+	 * We will resubmit again afterwards in case we need to split
+	 * the active context to interject the preemption request,
+	 * i.e. we will retrigger preemption following the ack in case
+	 * of trouble.
+	 *
+	 */
+	active = execlists->active;
+	while ((last = *active) && completed(last))
+		active++;
+
+	if (last) {
+		if (need_preempt(engine, last)) {
+			ENGINE_TRACE(engine,
+				     "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+				     last->fence.context,
+				     last->fence.seqno,
+				     last->sched.attr.priority,
+				     execlists->queue_priority_hint);
+			record_preemption(execlists);
+
+			/*
+			 * Don't let the RING_HEAD advance past the breadcrumb
+			 * as we unwind (and until we resubmit) so that we do
+			 * not accidentally tell it to go backwards.
+			 */
+			ring_set_paused(engine, 1);
+
+			/*
+			 * Note that we have not stopped the GPU at this point,
+			 * so we are unwinding the incomplete requests as they
+			 * remain inflight and so by the time we do complete
+			 * the preemption, some of the unwound requests may
+			 * complete!
+			 */
+			__unwind_incomplete_requests(engine);
+
+			last = NULL;
+		} else if (timeslice_expired(engine, last)) {
+			ENGINE_TRACE(engine,
+				     "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+				     yesno(timer_expired(&execlists->timer)),
+				     last->fence.context, last->fence.seqno,
+				     rq_prio(last),
+				     execlists->queue_priority_hint,
+				     yesno(timeslice_yield(execlists, last)));
+
+			/*
+			 * Consume this timeslice; ensure we start a new one.
+			 *
+			 * The timeslice expired, and we will unwind the
+			 * running contexts and recompute the next ELSP.
+			 * If that submit will be the same pair of contexts
+			 * (due to dependency ordering), we will skip the
+			 * submission. If we don't cancel the timer now,
+			 * we will see that the timer has expired and
+			 * reschedule the tasklet; continually until the
+			 * next context switch or other preeemption event.
+			 *
+			 * Since we have decided to reschedule based on
+			 * consumption of this timeslice, if we submit the
+			 * same context again, grant it a full timeslice.
+			 */
+			cancel_timer(&execlists->timer);
+			ring_set_paused(engine, 1);
+			defer_active(engine);
+
+			/*
+			 * Unlike for preemption, if we rewind and continue
+			 * executing the same context as previously active,
+			 * the order of execution will remain the same and
+			 * the tail will only advance. We do not need to
+			 * force a full context restore, as a lite-restore
+			 * is sufficient to resample the monotonic TAIL.
+			 *
+			 * If we switch to any other context, similarly we
+			 * will not rewind TAIL of current context, and
+			 * normal save/restore will preserve state and allow
+			 * us to later continue executing the same request.
+			 */
+			last = NULL;
+		} else {
+			/*
+			 * Otherwise if we already have a request pending
+			 * for execution after the current one, we can
+			 * just wait until the next CS event before
+			 * queuing more. In either case we will force a
+			 * lite-restore preemption event, but if we wait
+			 * we hopefully coalesce several updates into a single
+			 * submission.
+			 */
+			if (active[1]) {
+				/*
+				 * Even if ELSP[1] is occupied and not worthy
+				 * of timeslices, our queue might be.
+				 */
+				spin_unlock(&engine->active.lock);
+				return;
+			}
+		}
+	}
+
+	/* XXX virtual is always taking precedence */
+	while ((ve = first_virtual_engine(engine))) {
+		struct i915_request *rq;
+
+		spin_lock(&ve->base.active.lock);
+
+		rq = ve->request;
+		if (unlikely(!virtual_matches(ve, rq, engine)))
+			goto unlock; /* lost the race to a sibling */
+
+		GEM_BUG_ON(rq->engine != &ve->base);
+		GEM_BUG_ON(rq->context != &ve->context);
+
+		if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+			spin_unlock(&ve->base.active.lock);
+			break;
+		}
+
+		if (last && !can_merge_rq(last, rq)) {
+			spin_unlock(&ve->base.active.lock);
+			spin_unlock(&engine->active.lock);
+			return; /* leave this for another sibling */
+		}
+
+		ENGINE_TRACE(engine,
+			     "virtual rq=%llx:%lld%s, new engine? %s\n",
+			     rq->fence.context,
+			     rq->fence.seqno,
+			     __i915_request_is_complete(rq) ? "!" :
+			     __i915_request_has_started(rq) ? "*" :
+			     "",
+			     yesno(engine != ve->siblings[0]));
+
+		WRITE_ONCE(ve->request, NULL);
+		WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+
+		rb = &ve->nodes[engine->id].rb;
+		rb_erase_cached(rb, &execlists->virtual);
+		RB_CLEAR_NODE(rb);
+
+		GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+		WRITE_ONCE(rq->engine, engine);
+
+		if (__i915_request_submit(rq)) {
+			/*
+			 * Only after we confirm that we will submit
+			 * this request (i.e. it has not already
+			 * completed), do we want to update the context.
+			 *
+			 * This serves two purposes. It avoids
+			 * unnecessary work if we are resubmitting an
+			 * already completed request after timeslicing.
+			 * But more importantly, it prevents us altering
+			 * ve->siblings[] on an idle context, where
+			 * we may be using ve->siblings[] in
+			 * virtual_context_enter / virtual_context_exit.
+			 */
+			virtual_xfer_context(ve, engine);
+			GEM_BUG_ON(ve->siblings[0] != engine);
+
+			submit = true;
+			last = rq;
+		}
+
+		i915_request_put(rq);
+unlock:
+		spin_unlock(&ve->base.active.lock);
+
+		/*
+		 * Hmm, we have a bunch of virtual engine requests,
+		 * but the first one was already completed (thanks
+		 * preempt-to-busy!). Keep looking at the veng queue
+		 * until we have no more relevant requests (i.e.
+		 * the normal submit queue has higher priority).
+		 */
+		if (submit)
+			break;
+	}
+
+	while ((rb = rb_first_cached(&execlists->queue))) {
+		struct i915_priolist *p = to_priolist(rb);
+		struct i915_request *rq, *rn;
+		int i;
+
+		priolist_for_each_request_consume(rq, rn, p, i) {
+			bool merge = true;
+
+			/*
+			 * Can we combine this request with the current port?
+			 * It has to be the same context/ringbuffer and not
+			 * have any exceptions (e.g. GVT saying never to
+			 * combine contexts).
+			 *
+			 * If we can combine the requests, we can execute both
+			 * by updating the RING_TAIL to point to the end of the
+			 * second request, and so we never need to tell the
+			 * hardware about the first.
+			 */
+			if (last && !can_merge_rq(last, rq)) {
+				/*
+				 * If we are on the second port and cannot
+				 * combine this request with the last, then we
+				 * are done.
+				 */
+				if (port == last_port)
+					goto done;
+
+				/*
+				 * We must not populate both ELSP[] with the
+				 * same LRCA, i.e. we must submit 2 different
+				 * contexts if we submit 2 ELSP.
+				 */
+				if (last->context == rq->context)
+					goto done;
+
+				if (i915_request_has_sentinel(last))
+					goto done;
+
+				/*
+				 * We avoid submitting virtual requests into
+				 * the secondary ports so that we can migrate
+				 * the request immediately to another engine
+				 * rather than wait for the primary request.
+				 */
+				if (rq->execution_mask != engine->mask)
+					goto done;
+
+				/*
+				 * If GVT overrides us we only ever submit
+				 * port[0], leaving port[1] empty. Note that we
+				 * also have to be careful that we don't queue
+				 * the same context (even though a different
+				 * request) to the second port.
+				 */
+				if (ctx_single_port_submission(last->context) ||
+				    ctx_single_port_submission(rq->context))
+					goto done;
+
+				merge = false;
+			}
+
+			if (__i915_request_submit(rq)) {
+				if (!merge) {
+					*port++ = i915_request_get(last);
+					last = NULL;
+				}
+
+				GEM_BUG_ON(last &&
+					   !can_merge_ctx(last->context,
+							  rq->context));
+				GEM_BUG_ON(last &&
+					   i915_seqno_passed(last->fence.seqno,
+							     rq->fence.seqno));
+
+				submit = true;
+				last = rq;
+			}
+		}
+
+		rb_erase_cached(&p->node, &execlists->queue);
+		i915_priolist_free(p);
+	}
+done:
+	*port++ = i915_request_get(last);
+
+	/*
+	 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+	 *
+	 * We choose the priority hint such that if we add a request of greater
+	 * priority than this, we kick the submission tasklet to decide on
+	 * the right order of submitting the requests to hardware. We must
+	 * also be prepared to reorder requests as they are in-flight on the
+	 * HW. We derive the priority hint then as the first "hole" in
+	 * the HW submission ports and if there are no available slots,
+	 * the priority of the lowest executing request, i.e. last.
+	 *
+	 * When we do receive a higher priority request ready to run from the
+	 * user, see queue_request(), the priority hint is bumped to that
+	 * request triggering preemption on the next dequeue (or subsequent
+	 * interrupt for secondary ports).
+	 */
+	execlists->queue_priority_hint = queue_prio(execlists);
+	spin_unlock(&engine->active.lock);
+
+	/*
+	 * We can skip poking the HW if we ended up with exactly the same set
+	 * of requests as currently running, e.g. trying to timeslice a pair
+	 * of ordered contexts.
+	 */
+	if (submit &&
+	    memcmp(active,
+		   execlists->pending,
+		   (port - execlists->pending) * sizeof(*port))) {
+		*port = NULL;
+		while (port-- != execlists->pending)
+			execlists_schedule_in(*port, port - execlists->pending);
+
+		WRITE_ONCE(execlists->yield, -1);
+		set_preempt_timeout(engine, *active);
+		execlists_submit_ports(engine);
+	} else {
+		ring_set_paused(engine, 0);
+		while (port-- != execlists->pending)
+			i915_request_put(*port);
+		*execlists->pending = NULL;
+	}
+}
+
+static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+{
+	local_irq_disable(); /* Suspend interrupts across request submission */
+	execlists_dequeue(engine);
+	local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+}
+
+static void clear_ports(struct i915_request **ports, int count)
+{
+	memset_p((void **)ports, NULL, count);
+}
+
+static void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+	/* A memcpy_p() would be very useful here! */
+	while (count--)
+		WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+		     struct i915_request **inactive)
+{
+	struct i915_request * const *port;
+
+	for (port = execlists->pending; *port; port++)
+		*inactive++ = *port;
+	clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+
+	/* Mark the end of active before we overwrite *active */
+	for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+		*inactive++ = *port;
+	clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+	smp_wmb(); /* complete the seqlock for execlists_active() */
+	WRITE_ONCE(execlists->active, execlists->inflight);
+
+	/* Having cancelled all outstanding process_csb(), stop their timers */
+	GEM_BUG_ON(execlists->pending[0]);
+	cancel_timer(&execlists->timer);
+	cancel_timer(&execlists->preempt);
+
+	return inactive;
+}
+
+static void invalidate_csb_entries(const u64 *first, const u64 *last)
+{
+	clflush((void *)first);
+	clflush((void *)last);
+}
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ *     bit  0:     switched to new queue
+ *     bit  1:     reserved
+ *     bit  2:     semaphore wait mode (poll or signal), only valid when
+ *                 switch detail is set to "wait on semaphore"
+ *     bits 3-5:   engine class
+ *     bits 6-11:  engine instance
+ *     bits 12-14: reserved
+ *     bits 15-25: sw context id of the lrc the GT switched to
+ *     bits 26-31: sw counter of the lrc the GT switched to
+ *     bits 32-35: context switch detail
+ *                  - 0: ctx complete
+ *                  - 1: wait on sync flip
+ *                  - 2: wait on vblank
+ *                  - 3: wait on scanline
+ *                  - 4: wait on semaphore
+ *                  - 5: context preempted (not on SEMAPHORE_WAIT or
+ *                       WAIT_FOR_EVENT)
+ *     bit  36:    reserved
+ *     bits 37-43: wait detail (for switch detail 1 to 4)
+ *     bits 44-46: reserved
+ *     bits 47-57: sw context id of the lrc the GT switched away from
+ *     bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static bool gen12_csb_parse(const u64 csb)
+{
+	bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
+	bool new_queue =
+		lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+	/*
+	 * The context switch detail is not guaranteed to be 5 when a preemption
+	 * occurs, so we can't just check for that. The check below works for
+	 * all the cases we care about, including preemptions of WAIT
+	 * instructions and lite-restore. Preempt-to-idle via the CTRL register
+	 * would require some extra handling, but we don't support that.
+	 */
+	if (!ctx_away_valid || new_queue) {
+		GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
+		return true;
+	}
+
+	/*
+	 * switch detail = 5 is covered by the case above and we do not expect a
+	 * context switch on an unsuccessful wait instruction since we always
+	 * use polling mode.
+	 */
+	GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+	return false;
+}
+
+static bool gen8_csb_parse(const u64 csb)
+{
+	return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
+static noinline u64
+wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+	u64 entry;
+
+	/*
+	 * Reading from the HWSP has one particular advantage: we can detect
+	 * a stale entry. Since the write into HWSP is broken, we have no reason
+	 * to trust the HW at all, the mmio entry may equally be unordered, so
+	 * we prefer the path that is self-checking and as a last resort,
+	 * return the mmio value.
+	 *
+	 * tgl,dg1:HSDES#22011327657
+	 */
+	preempt_disable();
+	if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
+		int idx = csb - engine->execlists.csb_status;
+		int status;
+
+		status = GEN8_EXECLISTS_STATUS_BUF;
+		if (idx >= 6) {
+			status = GEN11_EXECLISTS_STATUS_BUF2;
+			idx -= 6;
+		}
+		status += sizeof(u64) * idx;
+
+		entry = intel_uncore_read64(engine->uncore,
+					    _MMIO(engine->mmio_base + status));
+	}
+	preempt_enable();
+
+	return entry;
+}
+
+static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+	u64 entry = READ_ONCE(*csb);
+
+	/*
+	 * Unfortunately, the GPU does not always serialise its write
+	 * of the CSB entries before its write of the CSB pointer, at least
+	 * from the perspective of the CPU, using what is known as a Global
+	 * Observation Point. We may read a new CSB tail pointer, but then
+	 * read the stale CSB entries, causing us to misinterpret the
+	 * context-switch events, and eventually declare the GPU hung.
+	 *
+	 * icl:HSDES#1806554093
+	 * tgl:HSDES#22011248461
+	 */
+	if (unlikely(entry == -1))
+		entry = wa_csb_read(engine, csb);
+
+	/* Consume this entry so that we can spot its future reuse. */
+	WRITE_ONCE(*csb, -1);
+
+	/* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
+	return entry;
+}
+
+static void new_timeslice(struct intel_engine_execlists *el)
+{
+	/* By cancelling, we will start afresh in start_timeslice() */
+	cancel_timer(&el->timer);
+}
+
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	u64 * const buf = execlists->csb_status;
+	const u8 num_entries = execlists->csb_size;
+	struct i915_request **prev;
+	u8 head, tail;
+
+	/*
+	 * As we modify our execlists state tracking we require exclusive
+	 * access. Either we are inside the tasklet, or the tasklet is disabled
+	 * and we assume that is only inside the reset paths and so serialised.
+	 */
+	GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+		   !reset_in_progress(execlists));
+	GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
+
+	/*
+	 * Note that csb_write, csb_status may be either in HWSP or mmio.
+	 * When reading from the csb_write mmio register, we have to be
+	 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+	 * the low 4bits. As it happens we know the next 4bits are always
+	 * zero and so we can simply masked off the low u8 of the register
+	 * and treat it identically to reading from the HWSP (without having
+	 * to use explicit shifting and masking, and probably bifurcating
+	 * the code to handle the legacy mmio read).
+	 */
+	head = execlists->csb_head;
+	tail = READ_ONCE(*execlists->csb_write);
+	if (unlikely(head == tail))
+		return inactive;
+
+	/*
+	 * We will consume all events from HW, or at least pretend to.
+	 *
+	 * The sequence of events from the HW is deterministic, and derived
+	 * from our writes to the ELSP, with a smidgen of variability for
+	 * the arrival of the asynchronous requests wrt to the inflight
+	 * execution. If the HW sends an event that does not correspond with
+	 * the one we are expecting, we have to abandon all hope as we lose
+	 * all tracking of what the engine is actually executing. We will
+	 * only detect we are out of sequence with the HW when we get an
+	 * 'impossible' event because we have already drained our own
+	 * preemption/promotion queue. If this occurs, we know that we likely
+	 * lost track of execution earlier and must unwind and restart, the
+	 * simplest way is by stop processing the event queue and force the
+	 * engine to reset.
+	 */
+	execlists->csb_head = tail;
+	ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+	/*
+	 * Hopefully paired with a wmb() in HW!
+	 *
+	 * We must complete the read of the write pointer before any reads
+	 * from the CSB, so that we do not see stale values. Without an rmb
+	 * (lfence) the HW may speculatively perform the CSB[] reads *before*
+	 * we perform the READ_ONCE(*csb_write).
+	 */
+	rmb();
+
+	/* Remember who was last running under the timer */
+	prev = inactive;
+	*prev = NULL;
+
+	do {
+		bool promote;
+		u64 csb;
+
+		if (++head == num_entries)
+			head = 0;
+
+		/*
+		 * We are flying near dragons again.
+		 *
+		 * We hold a reference to the request in execlist_port[]
+		 * but no more than that. We are operating in softirq
+		 * context and so cannot hold any mutex or sleep. That
+		 * prevents us stopping the requests we are processing
+		 * in port[] from being retired simultaneously (the
+		 * breadcrumb will be complete before we see the
+		 * context-switch). As we only hold the reference to the
+		 * request, any pointer chasing underneath the request
+		 * is subject to a potential use-after-free. Thus we
+		 * store all of the bookkeeping within port[] as
+		 * required, and avoid using unguarded pointers beneath
+		 * request itself. The same applies to the atomic
+		 * status notifier.
+		 */
+
+		csb = csb_read(engine, buf + head);
+		ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+			     head, upper_32_bits(csb), lower_32_bits(csb));
+
+		if (INTEL_GEN(engine->i915) >= 12)
+			promote = gen12_csb_parse(csb);
+		else
+			promote = gen8_csb_parse(csb);
+		if (promote) {
+			struct i915_request * const *old = execlists->active;
+
+			if (GEM_WARN_ON(!*execlists->pending)) {
+				execlists->error_interrupt |= ERROR_CSB;
+				break;
+			}
+
+			ring_set_paused(engine, 0);
+
+			/* Point active to the new ELSP; prevent overwriting */
+			WRITE_ONCE(execlists->active, execlists->pending);
+			smp_wmb(); /* notify execlists_active() */
+
+			/* cancel old inflight, prepare for switch */
+			trace_ports(execlists, "preempted", old);
+			while (*old)
+				*inactive++ = *old++;
+
+			/* switch pending to inflight */
+			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+			copy_ports(execlists->inflight,
+				   execlists->pending,
+				   execlists_num_ports(execlists));
+			smp_wmb(); /* complete the seqlock */
+			WRITE_ONCE(execlists->active, execlists->inflight);
+
+			/* XXX Magic delay for tgl */
+			ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+			WRITE_ONCE(execlists->pending[0], NULL);
+		} else {
+			if (GEM_WARN_ON(!*execlists->active)) {
+				execlists->error_interrupt |= ERROR_CSB;
+				break;
+			}
+
+			/* port0 completed, advanced to port1 */
+			trace_ports(execlists, "completed", execlists->active);
+
+			/*
+			 * We rely on the hardware being strongly
+			 * ordered, that the breadcrumb write is
+			 * coherent (visible from the CPU) before the
+			 * user interrupt is processed. One might assume
+			 * that the breadcrumb write being before the
+			 * user interrupt and the CS event for the context
+			 * switch would therefore be before the CS event
+			 * itself...
+			 */
+			if (GEM_SHOW_DEBUG() &&
+			    !__i915_request_is_complete(*execlists->active)) {
+				struct i915_request *rq = *execlists->active;
+				const u32 *regs __maybe_unused =
+					rq->context->lrc_reg_state;
+
+				ENGINE_TRACE(engine,
+					     "context completed before request!\n");
+				ENGINE_TRACE(engine,
+					     "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+					     ENGINE_READ(engine, RING_START),
+					     ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+					     ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+					     ENGINE_READ(engine, RING_CTL),
+					     ENGINE_READ(engine, RING_MI_MODE));
+				ENGINE_TRACE(engine,
+					     "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+					     i915_ggtt_offset(rq->ring->vma),
+					     rq->head, rq->tail,
+					     rq->fence.context,
+					     lower_32_bits(rq->fence.seqno),
+					     hwsp_seqno(rq));
+				ENGINE_TRACE(engine,
+					     "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+					     regs[CTX_RING_START],
+					     regs[CTX_RING_HEAD],
+					     regs[CTX_RING_TAIL]);
+			}
+
+			*inactive++ = *execlists->active++;
+
+			GEM_BUG_ON(execlists->active - execlists->inflight >
+				   execlists_num_ports(execlists));
+		}
+	} while (head != tail);
+
+	/*
+	 * Gen11 has proven to fail wrt global observation point between
+	 * entry and tail update, failing on the ordering and thus
+	 * we see an old entry in the context status buffer.
+	 *
+	 * Forcibly evict out entries for the next gpu csb update,
+	 * to increase the odds that we get a fresh entries with non
+	 * working hardware. The cost for doing so comes out mostly with
+	 * the wash as hardware, working or not, will need to do the
+	 * invalidation before.
+	 */
+	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+
+	/*
+	 * We assume that any event reflects a change in context flow
+	 * and merits a fresh timeslice. We reinstall the timer after
+	 * inspecting the queue to see if we need to resumbit.
+	 */
+	if (*prev != *execlists->active) /* elide lite-restores */
+		new_timeslice(execlists);
+
+	return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+			     struct i915_request **last)
+{
+	while (port != last)
+		execlists_schedule_out(*port++);
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+	LIST_HEAD(list);
+
+	do {
+		struct i915_dependency *p;
+
+		if (i915_request_is_active(rq))
+			__i915_request_unsubmit(rq);
+
+		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+		list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+		i915_request_set_hold(rq);
+		RQ_TRACE(rq, "on hold\n");
+
+		for_each_waiter(p, rq) {
+			struct i915_request *w =
+				container_of(p->waiter, typeof(*w), sched);
+
+			if (p->flags & I915_DEPENDENCY_WEAK)
+				continue;
+
+			/* Leave semaphores spinning on the other engines */
+			if (w->engine != rq->engine)
+				continue;
+
+			if (!i915_request_is_ready(w))
+				continue;
+
+			if (__i915_request_is_complete(w))
+				continue;
+
+			if (i915_request_on_hold(w))
+				continue;
+
+			list_move_tail(&w->sched.link, &list);
+		}
+
+		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+	} while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+			   struct i915_request *rq)
+{
+	if (i915_request_on_hold(rq))
+		return false;
+
+	spin_lock_irq(&engine->active.lock);
+
+	if (__i915_request_is_complete(rq)) { /* too late! */
+		rq = NULL;
+		goto unlock;
+	}
+
+	/*
+	 * Transfer this request onto the hold queue to prevent it
+	 * being resumbitted to HW (and potentially completed) before we have
+	 * released it. Since we may have already submitted following
+	 * requests, we need to remove those as well.
+	 */
+	GEM_BUG_ON(i915_request_on_hold(rq));
+	GEM_BUG_ON(rq->engine != engine);
+	__execlists_hold(rq);
+	GEM_BUG_ON(list_empty(&engine->active.hold));
+
+unlock:
+	spin_unlock_irq(&engine->active.lock);
+	return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+	struct i915_dependency *p;
+	bool result = false;
+
+	/*
+	 * If one of our ancestors is on hold, we must also be on hold,
+	 * otherwise we will bypass it and execute before it.
+	 */
+	rcu_read_lock();
+	for_each_signaler(p, rq) {
+		const struct i915_request *s =
+			container_of(p->signaler, typeof(*s), sched);
+
+		if (s->engine != rq->engine)
+			continue;
+
+		result = i915_request_on_hold(s);
+		if (result)
+			break;
+	}
+	rcu_read_unlock();
+
+	return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+	LIST_HEAD(list);
+
+	do {
+		struct i915_dependency *p;
+
+		RQ_TRACE(rq, "hold release\n");
+
+		GEM_BUG_ON(!i915_request_on_hold(rq));
+		GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+		i915_request_clear_hold(rq);
+		list_move_tail(&rq->sched.link,
+			       i915_sched_lookup_priolist(rq->engine,
+							  rq_prio(rq)));
+		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+		/* Also release any children on this engine that are ready */
+		for_each_waiter(p, rq) {
+			struct i915_request *w =
+				container_of(p->waiter, typeof(*w), sched);
+
+			if (p->flags & I915_DEPENDENCY_WEAK)
+				continue;
+
+			/* Propagate any change in error status */
+			if (rq->fence.error)
+				i915_request_set_error_once(w, rq->fence.error);
+
+			if (w->engine != rq->engine)
+				continue;
+
+			if (!i915_request_on_hold(w))
+				continue;
+
+			/* Check that no other parents are also on hold */
+			if (hold_request(w))
+				continue;
+
+			list_move_tail(&w->sched.link, &list);
+		}
+
+		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+	} while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+			     struct i915_request *rq)
+{
+	spin_lock_irq(&engine->active.lock);
+
+	/*
+	 * Move this request back to the priority queue, and all of its
+	 * children and grandchildren that were suspended along with it.
+	 */
+	__execlists_unhold(rq);
+
+	if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+		engine->execlists.queue_priority_hint = rq_prio(rq);
+		tasklet_hi_schedule(&engine->execlists.tasklet);
+	}
+
+	spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+	struct work_struct work;
+	struct i915_request *rq;
+	struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+	struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+	const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+	struct intel_engine_cs *engine = cap->rq->engine;
+	struct intel_gt_coredump *gt = cap->error->gt;
+	struct intel_engine_capture_vma *vma;
+
+	/* Compress all the objects attached to the request, slow! */
+	vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+	if (vma) {
+		struct i915_vma_compress *compress =
+			i915_vma_capture_prepare(gt);
+
+		intel_engine_coredump_add_vma(gt->engine, vma, compress);
+		i915_vma_capture_finish(gt, compress);
+	}
+
+	gt->simulated = gt->engine->simulated;
+	cap->error->simulated = gt->simulated;
+
+	/* Publish the error state, and announce it to the world */
+	i915_error_state_store(cap->error);
+	i915_gpu_coredump_put(cap->error);
+
+	/* Return this request and all that depend upon it for signaling */
+	execlists_unhold(engine, cap->rq);
+	i915_request_put(cap->rq);
+
+	kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+	const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+	struct execlists_capture *cap;
+
+	cap = kmalloc(sizeof(*cap), gfp);
+	if (!cap)
+		return NULL;
+
+	cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+	if (!cap->error)
+		goto err_cap;
+
+	cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+	if (!cap->error->gt)
+		goto err_gpu;
+
+	cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+	if (!cap->error->gt->engine)
+		goto err_gt;
+
+	cap->error->gt->engine->hung = true;
+
+	return cap;
+
+err_gt:
+	kfree(cap->error->gt);
+err_gpu:
+	kfree(cap->error);
+err_cap:
+	kfree(cap);
+	return NULL;
+}
+
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+	const struct intel_engine_execlists * const el = &engine->execlists;
+	struct i915_request * const *port, *rq;
+
+	/*
+	 * Use the most recent result from process_csb(), but just in case
+	 * we trigger an error (via interrupt) before the first CS event has
+	 * been written, peek at the next submission.
+	 */
+
+	for (port = el->active; (rq = *port); port++) {
+		if (rq->context->lrc.ccid == ccid) {
+			ENGINE_TRACE(engine,
+				     "ccid:%x found at active:%zd\n",
+				     ccid, port - el->active);
+			return rq;
+		}
+	}
+
+	for (port = el->pending; (rq = *port); port++) {
+		if (rq->context->lrc.ccid == ccid) {
+			ENGINE_TRACE(engine,
+				     "ccid:%x found at pending:%zd\n",
+				     ccid, port - el->pending);
+			return rq;
+		}
+	}
+
+	ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+	return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+	return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+	struct execlists_capture *cap;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+		return;
+
+	/*
+	 * We need to _quickly_ capture the engine state before we reset.
+	 * We are inside an atomic section (softirq) here and we are delaying
+	 * the forced preemption event.
+	 */
+	cap = capture_regs(engine);
+	if (!cap)
+		return;
+
+	spin_lock_irq(&engine->active.lock);
+	cap->rq = active_context(engine, active_ccid(engine));
+	if (cap->rq) {
+		cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+		cap->rq = i915_request_get_rcu(cap->rq);
+	}
+	spin_unlock_irq(&engine->active.lock);
+	if (!cap->rq)
+		goto err_free;
+
+	/*
+	 * Remove the request from the execlists queue, and take ownership
+	 * of the request. We pass it to our worker who will _slowly_ compress
+	 * all the pages the _user_ requested for debugging their batch, after
+	 * which we return it to the queue for signaling.
+	 *
+	 * By removing them from the execlists queue, we also remove the
+	 * requests from being processed by __unwind_incomplete_requests()
+	 * during the intel_engine_reset(), and so they will *not* be replayed
+	 * afterwards.
+	 *
+	 * Note that because we have not yet reset the engine at this point,
+	 * it is possible for the request that we have identified as being
+	 * guilty, did in fact complete and we will then hit an arbitration
+	 * point allowing the outstanding preemption to succeed. The likelihood
+	 * of that is very low (as capturing of the engine registers should be
+	 * fast enough to run inside an irq-off atomic section!), so we will
+	 * simply hold that request accountable for being non-preemptible
+	 * long enough to force the reset.
+	 */
+	if (!execlists_hold(engine, cap->rq))
+		goto err_rq;
+
+	INIT_WORK(&cap->work, execlists_capture_work);
+	schedule_work(&cap->work);
+	return;
+
+err_rq:
+	i915_request_put(cap->rq);
+err_free:
+	i915_gpu_coredump_put(cap->error);
+	kfree(cap);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
+{
+	const unsigned int bit = I915_RESET_ENGINE + engine->id;
+	unsigned long *lock = &engine->gt->reset.flags;
+
+	if (!intel_has_reset_engine(engine->gt))
+		return;
+
+	if (test_and_set_bit(bit, lock))
+		return;
+
+	ENGINE_TRACE(engine, "reset for %s\n", msg);
+
+	/* Mark this tasklet as disabled to avoid waiting for it to complete */
+	tasklet_disable_nosync(&engine->execlists.tasklet);
+
+	ring_set_paused(engine, 1); /* Freeze the current request in place */
+	execlists_capture(engine);
+	intel_engine_reset(engine, msg);
+
+	tasklet_enable(&engine->execlists.tasklet);
+	clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+	const struct timer_list *t = &engine->execlists.preempt;
+
+	if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+		return false;
+
+	if (!timer_expired(t))
+		return false;
+
+	return engine->execlists.pending[0];
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+	struct i915_request **inactive;
+
+	rcu_read_lock();
+	inactive = process_csb(engine, post);
+	GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
+
+	if (unlikely(preempt_timeout(engine))) {
+		cancel_timer(&engine->execlists.preempt);
+		engine->execlists.error_interrupt |= ERROR_PREEMPT;
+	}
+
+	if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+		const char *msg;
+
+		/* Generate the error message in priority wrt to the user! */
+		if (engine->execlists.error_interrupt & GENMASK(15, 0))
+			msg = "CS error"; /* thrown by a user payload */
+		else if (engine->execlists.error_interrupt & ERROR_CSB)
+			msg = "invalid CSB event";
+		else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
+			msg = "preemption time out";
+		else
+			msg = "internal error";
+
+		engine->execlists.error_interrupt = 0;
+		execlists_reset(engine, msg);
+	}
+
+	if (!engine->execlists.pending[0]) {
+		execlists_dequeue_irq(engine);
+		start_timeslice(engine);
+	}
+
+	post_process_csb(post, inactive);
+	rcu_read_unlock();
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+	/* Kick the tasklet for some interrupt coalescing and reset handling */
+	tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+	__execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+	execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+	execlists_kick(timer, preempt);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+			  struct i915_request *rq)
+{
+	GEM_BUG_ON(!list_empty(&rq->sched.link));
+	list_add_tail(&rq->sched.link,
+		      i915_sched_lookup_priolist(engine, rq_prio(rq)));
+	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static bool submit_queue(struct intel_engine_cs *engine,
+			 const struct i915_request *rq)
+{
+	struct intel_engine_execlists *execlists = &engine->execlists;
+
+	if (rq_prio(rq) <= execlists->queue_priority_hint)
+		return false;
+
+	execlists->queue_priority_hint = rq_prio(rq);
+	return true;
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+			     const struct i915_request *rq)
+{
+	GEM_BUG_ON(i915_request_on_hold(rq));
+	return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
+static void execlists_submit_request(struct i915_request *request)
+{
+	struct intel_engine_cs *engine = request->engine;
+	unsigned long flags;
+
+	/* Will be called from irq-context when using foreign fences. */
+	spin_lock_irqsave(&engine->active.lock, flags);
+
+	if (unlikely(ancestor_on_hold(engine, request))) {
+		RQ_TRACE(request, "ancestor on hold\n");
+		list_add_tail(&request->sched.link, &engine->active.hold);
+		i915_request_set_hold(request);
+	} else {
+		queue_request(engine, request);
+
+		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+		GEM_BUG_ON(list_empty(&request->sched.link));
+
+		if (submit_queue(engine, request))
+			__execlists_kick(&engine->execlists);
+	}
+
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static int execlists_context_pre_pin(struct intel_context *ce,
+				     struct i915_gem_ww_ctx *ww,
+				     void **vaddr)
+{
+	return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+{
+	return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static int execlists_context_alloc(struct intel_context *ce)
+{
+	return lrc_alloc(ce, ce->engine);
+}
+
+static const struct intel_context_ops execlists_context_ops = {
+	.flags = COPS_HAS_INFLIGHT,
+
+	.alloc = execlists_context_alloc,
+
+	.pre_pin = execlists_context_pre_pin,
+	.pin = execlists_context_pin,
+	.unpin = lrc_unpin,
+	.post_unpin = lrc_post_unpin,
+
+	.enter = intel_context_enter_engine,
+	.exit = intel_context_exit_engine,
+
+	.reset = lrc_reset,
+	.destroy = lrc_destroy,
+};
+
+static int emit_pdps(struct i915_request *rq)
+{
+	const struct intel_engine_cs * const engine = rq->engine;
+	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+	int err, i;
+	u32 *cs;
+
+	GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+
+	/*
+	 * Beware ye of the dragons, this sequence is magic!
+	 *
+	 * Small changes to this sequence can cause anything from
+	 * GPU hangs to forcewake errors and machine lockups!
+	 */
+
+	cs = intel_ring_begin(rq, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(rq, cs);
+
+	/* Flush any residual operations from the context load */
+	err = engine->emit_flush(rq, EMIT_FLUSH);
+	if (err)
+		return err;
+
+	/* Magic required to prevent forcewake errors! */
+	err = engine->emit_flush(rq, EMIT_INVALIDATE);
+	if (err)
+		return err;
+
+	cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	/* Ensure the LRI have landed before we invalidate & continue */
+	*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+	for (i = GEN8_3LVL_PDPES; i--; ) {
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+		u32 base = engine->mmio_base;
+
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+		*cs++ = upper_32_bits(pd_daddr);
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+		*cs++ = lower_32_bits(pd_daddr);
+	}
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	intel_ring_advance(rq, cs);
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static int execlists_request_alloc(struct i915_request *request)
+{
+	int ret;
+
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
+	/*
+	 * Flush enough space to reduce the likelihood of waiting after
+	 * we start building the request - in which case we will just
+	 * have to repeat work.
+	 */
+	request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+	/*
+	 * Note that after this point, we have committed to using
+	 * this request as it is being used to both track the
+	 * state of engine initialisation and liveness of the
+	 * golden renderstate above. Think twice before you try
+	 * to cancel/unwind this request now.
+	 */
+
+	if (!i915_vm_is_4lvl(request->context->vm)) {
+		ret = emit_pdps(request);
+		if (ret)
+			return ret;
+	}
+
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+	if (ret)
+		return ret;
+
+	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+	return 0;
+}
+
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	const unsigned int reset_value = execlists->csb_size - 1;
+
+	ring_set_paused(engine, 0);
+
+	/*
+	 * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+	 * Bludgeon them with a mmio update to be sure.
+	 */
+	ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+		     0xffff << 16 | reset_value << 8 | reset_value);
+	ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+	/*
+	 * After a reset, the HW starts writing into CSB entry [0]. We
+	 * therefore have to set our HEAD pointer back one entry so that
+	 * the *first* entry we check is entry 0. To complicate this further,
+	 * as we don't wait for the first interrupt after reset, we have to
+	 * fake the HW write to point back to the last entry so that our
+	 * inline comparison of our cached head position against the last HW
+	 * write works even before the first interrupt.
+	 */
+	execlists->csb_head = reset_value;
+	WRITE_ONCE(*execlists->csb_write, reset_value);
+	wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+	/* Check that the GPU does indeed update the CSB entries! */
+	memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
+	invalidate_csb_entries(&execlists->csb_status[0],
+			       &execlists->csb_status[reset_value]);
+
+	/* Once more for luck and our trusty paranoia */
+	ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+		     0xffff << 16 | reset_value << 8 | reset_value);
+	ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+	GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+	struct intel_timeline *tl;
+
+	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+		intel_timeline_reset_seqno(tl);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+	GEM_BUG_ON(execlists_active(&engine->execlists));
+
+	/*
+	 * Poison residual state on resume, in case the suspend didn't!
+	 *
+	 * We have to assume that across suspend/resume (or other loss
+	 * of control) that the contents of our pinned buffers has been
+	 * lost, replaced by garbage. Since this doesn't always happen,
+	 * let's poison such state so that we more quickly spot when
+	 * we falsely assume it has been preserved.
+	 */
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+	reset_csb_pointers(engine);
+
+	/*
+	 * The kernel_context HWSP is stored in the status_page. As above,
+	 * that may be lost on resume/initialisation, and so we need to
+	 * reset the value in the HWSP.
+	 */
+	sanitize_hwsp(engine);
+
+	/* And scrub the dirty cachelines for the HWSP */
+	clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+	u32 status;
+
+	engine->execlists.error_interrupt = 0;
+	ENGINE_WRITE(engine, RING_EMR, ~0u);
+	ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+	status = ENGINE_READ(engine, RING_ESR);
+	if (unlikely(status)) {
+		drm_err(&engine->i915->drm,
+			"engine '%s' resumed still in error: %08x\n",
+			engine->name, status);
+		__intel_gt_reset(engine->gt, engine->mask);
+	}
+
+	/*
+	 * On current gen8+, we have 2 signals to play with
+	 *
+	 * - I915_ERROR_INSTUCTION (bit 0)
+	 *
+	 *    Generate an error if the command parser encounters an invalid
+	 *    instruction
+	 *
+	 *    This is a fatal error.
+	 *
+	 * - CP_PRIV (bit 2)
+	 *
+	 *    Generate an error on privilege violation (where the CP replaces
+	 *    the instruction with a no-op). This also fires for writes into
+	 *    read-only scratch pages.
+	 *
+	 *    This is a non-fatal error, parsing continues.
+	 *
+	 * * there are a few others defined for odd HW that we do not use
+	 *
+	 * Since CP_PRIV fires for cases where we have chosen to ignore the
+	 * error (as the HW is validating and suppressing the mistakes), we
+	 * only unmask the instruction error bit.
+	 */
+	ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
+static void enable_execlists(struct intel_engine_cs *engine)
+{
+	u32 mode;
+
+	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+	if (INTEL_GEN(engine->i915) >= 11)
+		mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+	else
+		mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+	ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
+
+	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+	ENGINE_WRITE_FW(engine,
+			RING_HWS_PGA,
+			i915_ggtt_offset(engine->status_page.vma));
+	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+	enable_error_interrupt(engine);
+}
+
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+	bool unexpected = false;
+
+	if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
+		drm_dbg(&engine->i915->drm,
+			"STOP_RING still set in RING_MI_MODE\n");
+		unexpected = true;
+	}
+
+	return unexpected;
+}
+
+static int execlists_resume(struct intel_engine_cs *engine)
+{
+	intel_mocs_init_engine(engine);
+
+	intel_breadcrumbs_reset(engine->breadcrumbs);
+
+	if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+		struct drm_printer p = drm_debug_printer(__func__);
+
+		intel_engine_dump(engine, &p, NULL);
+	}
+
+	enable_execlists(engine);
+
+	return 0;
+}
+
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+
+	ENGINE_TRACE(engine, "depth<-%d\n",
+		     atomic_read(&execlists->tasklet.count));
+
+	/*
+	 * Prevent request submission to the hardware until we have
+	 * completed the reset in i915_gem_reset_finish(). If a request
+	 * is completed by one engine, it may then queue a request
+	 * to a second via its execlists->tasklet *just* as we are
+	 * calling engine->resume() and also writing the ELSP.
+	 * Turning off the execlists->tasklet until the reset is over
+	 * prevents the race.
+	 */
+	__tasklet_disable_sync_once(&execlists->tasklet);
+	GEM_BUG_ON(!reset_in_progress(execlists));
+
+	/*
+	 * We stop engines, otherwise we might get failed reset and a
+	 * dead gpu (on elk). Also as modern gpu as kbl can suffer
+	 * from system hang if batchbuffer is progressing when
+	 * the reset is issued, regardless of READY_TO_RESET ack.
+	 * Thus assume it is best to stop engines on all gens
+	 * where we have a gpu reset.
+	 *
+	 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+	 *
+	 * FIXME: Wa for more modern gens needs to be validated
+	 */
+	ring_set_paused(engine, 1);
+	intel_engine_stop_cs(engine);
+
+	engine->execlists.reset_ccid = active_ccid(engine);
+}
+
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+
+	mb(); /* paranoia: read the CSB pointers from after the reset */
+	clflush(execlists->csb_write);
+	mb();
+
+	inactive = process_csb(engine, inactive); /* drain preemption events */
+
+	/* Following the reset, we need to reload the CSB read/write pointers */
+	reset_csb_pointers(engine);
+
+	return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	u32 head;
+
+	/*
+	 * Save the currently executing context, even if we completed
+	 * its request, it was still running at the time of the
+	 * reset and will have been clobbered.
+	 */
+	rq = active_context(engine, engine->execlists.reset_ccid);
+	if (!rq)
+		return;
+
+	ce = rq->context;
+	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+	if (__i915_request_is_complete(rq)) {
+		/* Idle context; tidy up the ring so we can restart afresh */
+		head = intel_ring_wrap(ce->ring, rq->tail);
+		goto out_replay;
+	}
+
+	/* We still have requests in-flight; the engine should be active */
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+	/* Context has requests still in-flight; it should not be idle! */
+	GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
+	rq = active_request(ce->timeline, rq);
+	head = intel_ring_wrap(ce->ring, rq->head);
+	GEM_BUG_ON(head == ce->ring->tail);
+
+	/*
+	 * If this request hasn't started yet, e.g. it is waiting on a
+	 * semaphore, we need to avoid skipping the request or else we
+	 * break the signaling chain. However, if the context is corrupt
+	 * the request will not restart and we will be stuck with a wedged
+	 * device. It is quite often the case that if we issue a reset
+	 * while the GPU is loading the context image, that the context
+	 * image becomes corrupt.
+	 *
+	 * Otherwise, if we have not started yet, the request should replay
+	 * perfectly and we do not need to flag the result as being erroneous.
+	 */
+	if (!__i915_request_has_started(rq))
+		goto out_replay;
+
+	/*
+	 * If the request was innocent, we leave the request in the ELSP
+	 * and will try to replay it on restarting. The context image may
+	 * have been corrupted by the reset, in which case we may have
+	 * to service a new GPU hang, but more likely we can continue on
+	 * without impact.
+	 *
+	 * If the request was guilty, we presume the context is corrupt
+	 * and have to at least restore the RING register in the context
+	 * image back to the expected values to skip over the guilty request.
+	 */
+	__i915_request_reset(rq, stalled);
+
+	/*
+	 * We want a simple context + ring to execute the breadcrumb update.
+	 * We cannot rely on the context being intact across the GPU hang,
+	 * so clear it and rebuild just what we need for the breadcrumb.
+	 * All pending requests for this context will be zapped, and any
+	 * future request will be after userspace has had the opportunity
+	 * to recreate its own state.
+	 */
+out_replay:
+	ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+		     head, ce->ring->tail);
+	lrc_reset_regs(ce, engine);
+	ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+	struct i915_request **inactive;
+
+	rcu_read_lock();
+	inactive = reset_csb(engine, post);
+
+	execlists_reset_active(engine, true);
+
+	inactive = cancel_port_requests(execlists, inactive);
+	post_process_csb(post, inactive);
+	rcu_read_unlock();
+}
+
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+	unsigned long flags;
+
+	ENGINE_TRACE(engine, "\n");
+
+	/* Process the csb, find the guilty context and throw away */
+	execlists_reset_csb(engine, stalled);
+
+	/* Push back any incomplete requests for replay after the reset. */
+	rcu_read_lock();
+	spin_lock_irqsave(&engine->active.lock, flags);
+	__unwind_incomplete_requests(engine);
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+	rcu_read_unlock();
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+	/* The driver is wedged; don't process any more events. */
+	WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+}
+
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_request *rq, *rn;
+	struct rb_node *rb;
+	unsigned long flags;
+
+	ENGINE_TRACE(engine, "\n");
+
+	/*
+	 * Before we call engine->cancel_requests(), we should have exclusive
+	 * access to the submission state. This is arranged for us by the
+	 * caller disabling the interrupt generation, the tasklet and other
+	 * threads that may then access the same state, giving us a free hand
+	 * to reset state. However, we still need to let lockdep be aware that
+	 * we know this state may be accessed in hardirq context, so we
+	 * disable the irq around this manipulation and we want to keep
+	 * the spinlock focused on its duties and not accidentally conflate
+	 * coverage to the submission's irq state. (Similarly, although we
+	 * shouldn't need to disable irq around the manipulation of the
+	 * submission's irq state, we also wish to remind ourselves that
+	 * it is irq state.)
+	 */
+	execlists_reset_csb(engine, true);
+
+	rcu_read_lock();
+	spin_lock_irqsave(&engine->active.lock, flags);
+
+	/* Mark all executing requests as skipped. */
+	list_for_each_entry(rq, &engine->active.requests, sched.link)
+		i915_request_mark_eio(rq);
+	intel_engine_signal_breadcrumbs(engine);
+
+	/* Flush the queued requests to the timeline list (for retiring). */
+	while ((rb = rb_first_cached(&execlists->queue))) {
+		struct i915_priolist *p = to_priolist(rb);
+		int i;
+
+		priolist_for_each_request_consume(rq, rn, p, i) {
+			i915_request_mark_eio(rq);
+			__i915_request_submit(rq);
+		}
+
+		rb_erase_cached(&p->node, &execlists->queue);
+		i915_priolist_free(p);
+	}
+
+	/* On-hold requests will be flushed to timeline upon their release */
+	list_for_each_entry(rq, &engine->active.hold, sched.link)
+		i915_request_mark_eio(rq);
+
+	/* Cancel all attached virtual engines */
+	while ((rb = rb_first_cached(&execlists->virtual))) {
+		struct virtual_engine *ve =
+			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+		rb_erase_cached(rb, &execlists->virtual);
+		RB_CLEAR_NODE(rb);
+
+		spin_lock(&ve->base.active.lock);
+		rq = fetch_and_zero(&ve->request);
+		if (rq) {
+			i915_request_mark_eio(rq);
+
+			rq->engine = engine;
+			__i915_request_submit(rq);
+			i915_request_put(rq);
+
+			ve->base.execlists.queue_priority_hint = INT_MIN;
+		}
+		spin_unlock(&ve->base.active.lock);
+	}
+
+	/* Remaining _unready_ requests will be nop'ed when submitted */
+
+	execlists->queue_priority_hint = INT_MIN;
+	execlists->queue = RB_ROOT_CACHED;
+
+	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+	execlists->tasklet.func = nop_submission_tasklet;
+
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+	rcu_read_unlock();
+}
+
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+
+	/*
+	 * After a GPU reset, we may have requests to replay. Do so now while
+	 * we still have the forcewake to be sure that the GPU is not allowed
+	 * to sleep before we restart and reload a context.
+	 *
+	 * If the GPU reset fails, the engine may still be alive with requests
+	 * inflight. We expect those to complete, or for the device to be
+	 * reset as the next level of recovery, and as a final resort we
+	 * will declare the device wedged.
+	 */
+	GEM_BUG_ON(!reset_in_progress(execlists));
+
+	/* And kick in case we missed a new request submission. */
+	if (__tasklet_enable(&execlists->tasklet))
+		__execlists_kick(execlists);
+
+	ENGINE_TRACE(engine, "depth->%d\n",
+		     atomic_read(&execlists->tasklet.count));
+}
+
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
+{
+	ENGINE_WRITE(engine, RING_IMR,
+		     ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	ENGINE_POSTING_READ(engine, RING_IMR);
+}
+
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
+{
+	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+}
+
+static void execlists_park(struct intel_engine_cs *engine)
+{
+	cancel_timer(&engine->execlists.timer);
+	cancel_timer(&engine->execlists.preempt);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+	if (INTEL_GEN(engine->i915) > 8)
+		return true;
+
+	/* GPGPU on bdw requires extra w/a; not implemented */
+	return engine->class != RENDER_CLASS;
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+	engine->submit_request = execlists_submit_request;
+	engine->schedule = i915_schedule;
+	engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+	engine->reset.prepare = execlists_reset_prepare;
+	engine->reset.rewind = execlists_reset_rewind;
+	engine->reset.cancel = execlists_reset_cancel;
+	engine->reset.finish = execlists_reset_finish;
+
+	engine->park = execlists_park;
+	engine->unpark = NULL;
+
+	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+	if (!intel_vgpu_active(engine->i915)) {
+		engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+		if (can_preempt(engine)) {
+			engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+			if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+				engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+		}
+	}
+
+	if (intel_engine_has_preemption(engine))
+		engine->emit_bb_start = gen8_emit_bb_start;
+	else
+		engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+	/* Synchronise with residual timers and any softirq they raise */
+	del_timer_sync(&engine->execlists.timer);
+	del_timer_sync(&engine->execlists.preempt);
+	tasklet_kill(&engine->execlists.tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+	engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+	execlists_shutdown(engine);
+
+	intel_engine_cleanup_common(engine);
+	lrc_fini_wa_ctx(engine);
+}
+
+static void
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+{
+	/* Default vfuncs which can be overriden by each engine. */
+
+	engine->resume = execlists_resume;
+
+	engine->cops = &execlists_context_ops;
+	engine->request_alloc = execlists_request_alloc;
+
+	engine->emit_flush = gen8_emit_flush_xcs;
+	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+	if (INTEL_GEN(engine->i915) >= 12) {
+		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+		engine->emit_flush = gen12_emit_flush_xcs;
+	}
+	engine->set_default_submission = execlists_set_default_submission;
+
+	if (INTEL_GEN(engine->i915) < 11) {
+		engine->irq_enable = gen8_logical_ring_enable_irq;
+		engine->irq_disable = gen8_logical_ring_disable_irq;
+	} else {
+		/*
+		 * TODO: On Gen11 interrupt masks need to be clear
+		 * to allow C6 entry. Keep interrupts enabled at
+		 * and take the hit of generating extra interrupts
+		 * until a more refined solution exists.
+		 */
+	}
+}
+
+static void logical_ring_default_irqs(struct intel_engine_cs *engine)
+{
+	unsigned int shift = 0;
+
+	if (INTEL_GEN(engine->i915) < 11) {
+		const u8 irq_shifts[] = {
+			[RCS0]  = GEN8_RCS_IRQ_SHIFT,
+			[BCS0]  = GEN8_BCS_IRQ_SHIFT,
+			[VCS0]  = GEN8_VCS0_IRQ_SHIFT,
+			[VCS1]  = GEN8_VCS1_IRQ_SHIFT,
+			[VECS0] = GEN8_VECS_IRQ_SHIFT,
+		};
+
+		shift = irq_shifts[engine->id];
+	}
+
+	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+	engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+	engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+	switch (INTEL_GEN(engine->i915)) {
+	case 12:
+		engine->emit_flush = gen12_emit_flush_rcs;
+		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+		break;
+	case 11:
+		engine->emit_flush = gen11_emit_flush_rcs;
+		engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+		break;
+	default:
+		engine->emit_flush = gen8_emit_flush_rcs;
+		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+		break;
+	}
+}
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct drm_i915_private *i915 = engine->i915;
+	struct intel_uncore *uncore = engine->uncore;
+	u32 base = engine->mmio_base;
+
+	tasklet_init(&engine->execlists.tasklet,
+		     execlists_submission_tasklet, (unsigned long)engine);
+	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+	timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+
+	logical_ring_default_vfuncs(engine);
+	logical_ring_default_irqs(engine);
+
+	if (engine->class == RENDER_CLASS)
+		rcs_submission_override(engine);
+
+	lrc_init_wa_ctx(engine);
+
+	if (HAS_LOGICAL_RING_ELSQ(i915)) {
+		execlists->submit_reg = uncore->regs +
+			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+		execlists->ctrl_reg = uncore->regs +
+			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+	} else {
+		execlists->submit_reg = uncore->regs +
+			i915_mmio_reg_offset(RING_ELSP(base));
+	}
+
+	execlists->csb_status =
+		(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+	execlists->csb_write =
+		&engine->status_page.addr[intel_hws_csb_write_index(i915)];
+
+	if (INTEL_GEN(i915) < 11)
+		execlists->csb_size = GEN8_CSB_ENTRIES;
+	else
+		execlists->csb_size = GEN11_CSB_ENTRIES;
+
+	engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+	if (INTEL_GEN(engine->i915) >= 11) {
+		execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+		execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+	}
+
+	/* Finally, take ownership and responsibility for cleanup! */
+	engine->sanitize = execlists_sanitize;
+	engine->release = execlists_release;
+
+	return 0;
+}
+
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+	return &ve->base.execlists.default_priolist.requests[0];
+}
+
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
+{
+	struct virtual_engine *ve =
+		container_of(wrk, typeof(*ve), rcu.work);
+	unsigned int n;
+
+	GEM_BUG_ON(ve->context.inflight);
+
+	/* Preempt-to-busy may leave a stale request behind. */
+	if (unlikely(ve->request)) {
+		struct i915_request *old;
+
+		spin_lock_irq(&ve->base.active.lock);
+
+		old = fetch_and_zero(&ve->request);
+		if (old) {
+			GEM_BUG_ON(!__i915_request_is_complete(old));
+			__i915_request_submit(old);
+			i915_request_put(old);
+		}
+
+		spin_unlock_irq(&ve->base.active.lock);
+	}
+
+	/*
+	 * Flush the tasklet in case it is still running on another core.
+	 *
+	 * This needs to be done before we remove ourselves from the siblings'
+	 * rbtrees as in the case it is running in parallel, it may reinsert
+	 * the rb_node into a sibling.
+	 */
+	tasklet_kill(&ve->base.execlists.tasklet);
+
+	/* Decouple ourselves from the siblings, no more access allowed. */
+	for (n = 0; n < ve->num_siblings; n++) {
+		struct intel_engine_cs *sibling = ve->siblings[n];
+		struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+		if (RB_EMPTY_NODE(node))
+			continue;
+
+		spin_lock_irq(&sibling->active.lock);
+
+		/* Detachment is lazily performed in the execlists tasklet */
+		if (!RB_EMPTY_NODE(node))
+			rb_erase_cached(node, &sibling->execlists.virtual);
+
+		spin_unlock_irq(&sibling->active.lock);
+	}
+	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+	lrc_fini(&ve->context);
+	intel_context_fini(&ve->context);
+
+	intel_breadcrumbs_free(ve->base.breadcrumbs);
+	intel_engine_free_request_pool(&ve->base);
+
+	kfree(ve->bonds);
+	kfree(ve);
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+	struct virtual_engine *ve =
+		container_of(kref, typeof(*ve), context.ref);
+
+	GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+	/*
+	 * When destroying the virtual engine, we have to be aware that
+	 * it may still be in use from an hardirq/softirq context causing
+	 * the resubmission of a completed request (background completion
+	 * due to preempt-to-busy). Before we can free the engine, we need
+	 * to flush the submission code and tasklets that are still potentially
+	 * accessing the engine. Flushing the tasklets requires process context,
+	 * and since we can guard the resubmit onto the engine with an RCU read
+	 * lock, we can delegate the free of the engine to an RCU worker.
+	 */
+	INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+	queue_rcu_work(system_wq, &ve->rcu);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+	int swp;
+
+	/*
+	 * Pick a random sibling on starting to help spread the load around.
+	 *
+	 * New contexts are typically created with exactly the same order
+	 * of siblings, and often started in batches. Due to the way we iterate
+	 * the array of sibling when submitting requests, sibling[0] is
+	 * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+	 * randomised across the system, we also help spread the load by the
+	 * first engine we inspect being different each time.
+	 *
+	 * NB This does not force us to execute on this engine, it will just
+	 * typically be the first we inspect for submission.
+	 */
+	swp = prandom_u32_max(ve->num_siblings);
+	if (swp)
+		swap(ve->siblings[swp], ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+	return lrc_alloc(ce, ve->siblings[0]);
+}
+
+static int virtual_context_pre_pin(struct intel_context *ce,
+				     struct i915_gem_ww_ctx *ww,
+				     void **vaddr)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+	/* Note: we must use a real engine class for setting up reg state */
+	return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+	return lrc_pin(ce, ve->siblings[0], vaddr);
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	unsigned int n;
+
+	for (n = 0; n < ve->num_siblings; n++)
+		intel_engine_pm_get(ve->siblings[n]);
+
+	intel_timeline_enter(ce->timeline);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	unsigned int n;
+
+	intel_timeline_exit(ce->timeline);
+
+	for (n = 0; n < ve->num_siblings; n++)
+		intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+	.flags = COPS_HAS_INFLIGHT,
+
+	.alloc = virtual_context_alloc,
+
+	.pre_pin = virtual_context_pre_pin,
+	.pin = virtual_context_pin,
+	.unpin = lrc_unpin,
+	.post_unpin = lrc_post_unpin,
+
+	.enter = virtual_context_enter,
+	.exit = virtual_context_exit,
+
+	.destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+	struct i915_request *rq;
+	intel_engine_mask_t mask;
+
+	rq = READ_ONCE(ve->request);
+	if (!rq)
+		return 0;
+
+	/* The rq is ready for submission; rq->execution_mask is now stable. */
+	mask = rq->execution_mask;
+	if (unlikely(!mask)) {
+		/* Invalid selection, submit to a random engine in error */
+		i915_request_set_error_once(rq, -ENODEV);
+		mask = ve->siblings[0]->mask;
+	}
+
+	ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+		     rq->fence.context, rq->fence.seqno,
+		     mask, ve->base.execlists.queue_priority_hint);
+
+	return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+	struct virtual_engine * const ve = (struct virtual_engine *)data;
+	const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+	intel_engine_mask_t mask;
+	unsigned int n;
+
+	rcu_read_lock();
+	mask = virtual_submission_mask(ve);
+	rcu_read_unlock();
+	if (unlikely(!mask))
+		return;
+
+	for (n = 0; n < ve->num_siblings; n++) {
+		struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+		struct ve_node * const node = &ve->nodes[sibling->id];
+		struct rb_node **parent, *rb;
+		bool first;
+
+		if (!READ_ONCE(ve->request))
+			break; /* already handled by a sibling's tasklet */
+
+		spin_lock_irq(&sibling->active.lock);
+
+		if (unlikely(!(mask & sibling->mask))) {
+			if (!RB_EMPTY_NODE(&node->rb)) {
+				rb_erase_cached(&node->rb,
+						&sibling->execlists.virtual);
+				RB_CLEAR_NODE(&node->rb);
+			}
+
+			goto unlock_engine;
+		}
+
+		if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
+			/*
+			 * Cheat and avoid rebalancing the tree if we can
+			 * reuse this node in situ.
+			 */
+			first = rb_first_cached(&sibling->execlists.virtual) ==
+				&node->rb;
+			if (prio == node->prio || (prio > node->prio && first))
+				goto submit_engine;
+
+			rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+		}
+
+		rb = NULL;
+		first = true;
+		parent = &sibling->execlists.virtual.rb_root.rb_node;
+		while (*parent) {
+			struct ve_node *other;
+
+			rb = *parent;
+			other = rb_entry(rb, typeof(*other), rb);
+			if (prio > other->prio) {
+				parent = &rb->rb_left;
+			} else {
+				parent = &rb->rb_right;
+				first = false;
+			}
+		}
+
+		rb_link_node(&node->rb, rb, parent);
+		rb_insert_color_cached(&node->rb,
+				       &sibling->execlists.virtual,
+				       first);
+
+submit_engine:
+		GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+		node->prio = prio;
+		if (first && prio > sibling->execlists.queue_priority_hint)
+			tasklet_hi_schedule(&sibling->execlists.tasklet);
+
+unlock_engine:
+		spin_unlock_irq(&sibling->active.lock);
+
+		if (intel_context_inflight(&ve->context))
+			break;
+	}
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	unsigned long flags;
+
+	ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+		     rq->fence.context,
+		     rq->fence.seqno);
+
+	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+	spin_lock_irqsave(&ve->base.active.lock, flags);
+
+	/* By the time we resubmit a request, it may be completed */
+	if (__i915_request_is_complete(rq)) {
+		__i915_request_submit(rq);
+		goto unlock;
+	}
+
+	if (ve->request) { /* background completion from preempt-to-busy */
+		GEM_BUG_ON(!__i915_request_is_complete(ve->request));
+		__i915_request_submit(ve->request);
+		i915_request_put(ve->request);
+	}
+
+	ve->base.execlists.queue_priority_hint = rq_prio(rq);
+	ve->request = i915_request_get(rq);
+
+	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+	list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+	tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+unlock:
+	spin_unlock_irqrestore(&ve->base.active.lock, flags);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+		  const struct intel_engine_cs *master)
+{
+	int i;
+
+	for (i = 0; i < ve->num_bonds; i++) {
+		if (ve->bonds[i].master == master)
+			return &ve->bonds[i];
+	}
+
+	return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	intel_engine_mask_t allowed, exec;
+	struct ve_bond *bond;
+
+	allowed = ~to_request(signal)->engine->mask;
+
+	bond = virtual_find_bond(ve, to_request(signal)->engine);
+	if (bond)
+		allowed &= bond->sibling_mask;
+
+	/* Restrict the bonded request to run on only the available engines */
+	exec = READ_ONCE(rq->execution_mask);
+	while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+		;
+
+	/* Prevent the master from being re-run on the bonded engines */
+	to_request(signal)->execution_mask &= ~allowed;
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+			       unsigned int count)
+{
+	struct virtual_engine *ve;
+	unsigned int n;
+	int err;
+
+	if (count == 0)
+		return ERR_PTR(-EINVAL);
+
+	if (count == 1)
+		return intel_context_create(siblings[0]);
+
+	ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+	if (!ve)
+		return ERR_PTR(-ENOMEM);
+
+	ve->base.i915 = siblings[0]->i915;
+	ve->base.gt = siblings[0]->gt;
+	ve->base.uncore = siblings[0]->uncore;
+	ve->base.id = -1;
+
+	ve->base.class = OTHER_CLASS;
+	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+
+	/*
+	 * The decision on whether to submit a request using semaphores
+	 * depends on the saturated state of the engine. We only compute
+	 * this during HW submission of the request, and we need for this
+	 * state to be globally applied to all requests being submitted
+	 * to this engine. Virtual engines encompass more than one physical
+	 * engine and so we cannot accurately tell in advance if one of those
+	 * engines is already saturated and so cannot afford to use a semaphore
+	 * and be pessimized in priority for doing so -- if we are the only
+	 * context using semaphores after all other clients have stopped, we
+	 * will be starved on the saturated system. Such a global switch for
+	 * semaphores is less than ideal, but alas is the current compromise.
+	 */
+	ve->base.saturated = ALL_ENGINES;
+
+	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+	intel_engine_init_execlists(&ve->base);
+
+	ve->base.cops = &virtual_context_ops;
+	ve->base.request_alloc = execlists_request_alloc;
+
+	ve->base.schedule = i915_schedule;
+	ve->base.submit_request = virtual_submit_request;
+	ve->base.bond_execute = virtual_bond_execute;
+
+	INIT_LIST_HEAD(virtual_queue(ve));
+	ve->base.execlists.queue_priority_hint = INT_MIN;
+	tasklet_init(&ve->base.execlists.tasklet,
+		     virtual_submission_tasklet,
+		     (unsigned long)ve);
+
+	intel_context_init(&ve->context, &ve->base);
+
+	ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+	if (!ve->base.breadcrumbs) {
+		err = -ENOMEM;
+		goto err_put;
+	}
+
+	for (n = 0; n < count; n++) {
+		struct intel_engine_cs *sibling = siblings[n];
+
+		GEM_BUG_ON(!is_power_of_2(sibling->mask));
+		if (sibling->mask & ve->base.mask) {
+			DRM_DEBUG("duplicate %s entry in load balancer\n",
+				  sibling->name);
+			err = -EINVAL;
+			goto err_put;
+		}
+
+		/*
+		 * The virtual engine implementation is tightly coupled to
+		 * the execlists backend -- we push out request directly
+		 * into a tree inside each physical engine. We could support
+		 * layering if we handle cloning of the requests and
+		 * submitting a copy into each backend.
+		 */
+		if (sibling->execlists.tasklet.func !=
+		    execlists_submission_tasklet) {
+			err = -ENODEV;
+			goto err_put;
+		}
+
+		GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+		RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+		ve->siblings[ve->num_siblings++] = sibling;
+		ve->base.mask |= sibling->mask;
+
+		/*
+		 * All physical engines must be compatible for their emission
+		 * functions (as we build the instructions during request
+		 * construction and do not alter them before submission
+		 * on the physical engine). We use the engine class as a guide
+		 * here, although that could be refined.
+		 */
+		if (ve->base.class != OTHER_CLASS) {
+			if (ve->base.class != sibling->class) {
+				DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+					  sibling->class, ve->base.class);
+				err = -EINVAL;
+				goto err_put;
+			}
+			continue;
+		}
+
+		ve->base.class = sibling->class;
+		ve->base.uabi_class = sibling->uabi_class;
+		snprintf(ve->base.name, sizeof(ve->base.name),
+			 "v%dx%d", ve->base.class, count);
+		ve->base.context_size = sibling->context_size;
+
+		ve->base.emit_bb_start = sibling->emit_bb_start;
+		ve->base.emit_flush = sibling->emit_flush;
+		ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+		ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+		ve->base.emit_fini_breadcrumb_dw =
+			sibling->emit_fini_breadcrumb_dw;
+
+		ve->base.flags = sibling->flags;
+	}
+
+	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+	virtual_engine_initial_hint(ve);
+	return &ve->context;
+
+err_put:
+	intel_context_put(&ve->context);
+	return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
+{
+	struct virtual_engine *se = to_virtual_engine(src);
+	struct intel_context *dst;
+
+	dst = intel_execlists_create_virtual(se->siblings,
+					     se->num_siblings);
+	if (IS_ERR(dst))
+		return dst;
+
+	if (se->num_bonds) {
+		struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+		de->bonds = kmemdup(se->bonds,
+				    sizeof(*se->bonds) * se->num_bonds,
+				    GFP_KERNEL);
+		if (!de->bonds) {
+			intel_context_put(dst);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		de->num_bonds = se->num_bonds;
+	}
+
+	return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+				     const struct intel_engine_cs *master,
+				     const struct intel_engine_cs *sibling)
+{
+	struct virtual_engine *ve = to_virtual_engine(engine);
+	struct ve_bond *bond;
+	int n;
+
+	/* Sanity check the sibling is part of the virtual engine */
+	for (n = 0; n < ve->num_siblings; n++)
+		if (sibling == ve->siblings[n])
+			break;
+	if (n == ve->num_siblings)
+		return -EINVAL;
+
+	bond = virtual_find_bond(ve, master);
+	if (bond) {
+		bond->sibling_mask |= sibling->mask;
+		return 0;
+	}
+
+	bond = krealloc(ve->bonds,
+			sizeof(*bond) * (ve->num_bonds + 1),
+			GFP_KERNEL);
+	if (!bond)
+		return -ENOMEM;
+
+	bond[ve->num_bonds].master = master;
+	bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+	ve->bonds = bond;
+	ve->num_bonds++;
+
+	return 0;
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+				   struct drm_printer *m,
+				   void (*show_request)(struct drm_printer *m,
+							const struct i915_request *rq,
+							const char *prefix,
+							int indent),
+				   unsigned int max)
+{
+	const struct intel_engine_execlists *execlists = &engine->execlists;
+	struct i915_request *rq, *last;
+	unsigned long flags;
+	unsigned int count;
+	struct rb_node *rb;
+
+	spin_lock_irqsave(&engine->active.lock, flags);
+
+	last = NULL;
+	count = 0;
+	list_for_each_entry(rq, &engine->active.requests, sched.link) {
+		if (count++ < max - 1)
+			show_request(m, rq, "\t\t", 0);
+		else
+			last = rq;
+	}
+	if (last) {
+		if (count > max) {
+			drm_printf(m,
+				   "\t\t...skipping %d executing requests...\n",
+				   count - max);
+		}
+		show_request(m, last, "\t\t", 0);
+	}
+
+	if (execlists->queue_priority_hint != INT_MIN)
+		drm_printf(m, "\t\tQueue priority hint: %d\n",
+			   READ_ONCE(execlists->queue_priority_hint));
+
+	last = NULL;
+	count = 0;
+	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+		int i;
+
+		priolist_for_each_request(rq, p, i) {
+			if (count++ < max - 1)
+				show_request(m, rq, "\t\t", 0);
+			else
+				last = rq;
+		}
+	}
+	if (last) {
+		if (count > max) {
+			drm_printf(m,
+				   "\t\t...skipping %d queued requests...\n",
+				   count - max);
+		}
+		show_request(m, last, "\t\t", 0);
+	}
+
+	last = NULL;
+	count = 0;
+	for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+		struct virtual_engine *ve =
+			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+		struct i915_request *rq = READ_ONCE(ve->request);
+
+		if (rq) {
+			if (count++ < max - 1)
+				show_request(m, rq, "\t\t", 0);
+			else
+				last = rq;
+		}
+	}
+	if (last) {
+		if (count > max) {
+			drm_printf(m,
+				   "\t\t...skipping %d virtual requests...\n",
+				   count - max);
+		}
+		show_request(m, last, "\t\t", 0);
+	}
+
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+	return engine->set_default_submission ==
+	       execlists_set_default_submission;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_execlists.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
new file mode 100644
index 0000000000000000000000000000000000000000..a8fd7adefd8240cc4229acf8b361fe1ca1dd76c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
+#define __INTEL_EXECLISTS_SUBMISSION_H__
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+
+enum {
+	INTEL_CONTEXT_SCHEDULE_IN = 0,
+	INTEL_CONTEXT_SCHEDULE_OUT,
+	INTEL_CONTEXT_SCHEDULE_PREEMPTED,
+};
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+				   struct drm_printer *m,
+				   void (*show_request)(struct drm_printer *m,
+							const struct i915_request *rq,
+							const char *prefix,
+							int indent),
+				   unsigned int max);
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+			       unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+				     const struct intel_engine_cs *master,
+				     const struct intel_engine_cs *sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
+#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index db8c66dde6558cbee274f72257cf5a5442ed14a1..700588bc9d57d822a04865cc57efc02a99d77c9f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -101,7 +101,16 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
 	 * Query intel_iommu to see if we need the workaround. Presumably that
 	 * was loaded first.
 	 */
-	return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+	if (!intel_vtd_active())
+		return false;
+
+	if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+		return true;
+
+	if (IS_GEN(i915, 12))
+		return true; /* XXX DMAR fault reason 7 */
+
+	return false;
 }
 
 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
@@ -1073,7 +1082,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
 
 	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
 
-	ggtt->do_idle_maps = needs_idle_maps(i915);
+	if (needs_idle_maps(i915)) {
+		drm_notice(&i915->drm,
+			   "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+		ggtt->do_idle_maps = true;
+	}
+
 	ggtt->vm.insert_page = i915_ggtt_insert_page;
 	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
 	ggtt->vm.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7fb36b12fe7a2aabf8e64bfe6ad7d1cb3df4503e..a357bb4318152ba61c4be3309c49e315a665aacf 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -320,13 +320,31 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
 		fence_write(fence);
 }
 
+static bool fence_is_active(const struct i915_fence_reg *fence)
+{
+	return fence->vma && i915_vma_is_active(fence->vma);
+}
+
 static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
 {
-	struct i915_fence_reg *fence;
+	struct i915_fence_reg *active = NULL;
+	struct i915_fence_reg *fence, *fn;
 
-	list_for_each_entry(fence, &ggtt->fence_list, link) {
+	list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
 		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
 
+		if (fence == active) /* now seen this fence twice */
+			active = ERR_PTR(-EAGAIN);
+
+		/* Prefer idle fences so we do not have to wait on the GPU */
+		if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
+			if (!active)
+				active = fence;
+
+			list_move_tail(&fence->link, &ggtt->fence_list);
+			continue;
+		}
+
 		if (atomic_read(&fence->pin_count))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 44f1d51e5ae5237d73a557832e2b5954ae7116aa..d8e1ab41263430b5c9447ed6f35973ef524208ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -46,6 +46,8 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
 
 int intel_gt_init_mmio(struct intel_gt *gt)
 {
+	intel_gt_init_clock_frequency(gt);
+
 	intel_uc_init_mmio(&gt->uc);
 	intel_sseu_info_init(gt);
 
@@ -546,8 +548,6 @@ int intel_gt_init(struct intel_gt *gt)
 	 */
 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 
-	intel_gt_init_clock_frequency(gt);
-
 	err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
 	if (err)
 		goto out_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 104cb30e8c138cc8c275c39711a66cecd9d236a2..06d84cf09570f33665293a970799a69e6a27c0a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -145,7 +145,8 @@ static void pool_retire(struct i915_active *ref)
 }
 
 static struct intel_gt_buffer_pool_node *
-node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+	    enum i915_map_type type)
 {
 	struct intel_gt *gt = to_gt(pool);
 	struct intel_gt_buffer_pool_node *node;
@@ -169,12 +170,14 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
 
 	i915_gem_object_set_readonly(obj);
 
+	node->type = type;
 	node->obj = obj;
 	return node;
 }
 
 struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+			 enum i915_map_type type)
 {
 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
 	struct intel_gt_buffer_pool_node *node;
@@ -191,6 +194,9 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
 		if (node->obj->base.size < size)
 			continue;
 
+		if (node->type != type)
+			continue;
+
 		age = READ_ONCE(node->age);
 		if (!age)
 			continue;
@@ -205,7 +211,7 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
 	rcu_read_unlock();
 
 	if (&node->link == list) {
-		node = node_create(pool, size);
+		node = node_create(pool, size, type);
 		if (IS_ERR(node))
 			return node;
 	}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 42cbac003e8ad7028f6ad075d3127d7e6aac8976..6068f8f1762e59df6c9cb463c861da78131868cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -15,7 +15,8 @@ struct intel_gt;
 struct i915_request;
 
 struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+			 enum i915_map_type type);
 
 static inline int
 intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index bcf1658c96338854a65bde82a371356cf24f763b..d8d82c890da8917a2204cebbb891cf9a4728217a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -11,10 +11,9 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 
+#include "gem/i915_gem_object_types.h"
 #include "i915_active_types.h"
 
-struct drm_i915_gem_object;
-
 struct intel_gt_buffer_pool {
 	spinlock_t lock;
 	struct list_head cache_list[4];
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
 		struct rcu_head rcu;
 	};
 	unsigned long age;
+	enum i915_map_type type;
 };
 
 #endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 9990796868462094370247c4dbd128da52c64cb3..a4242ca8dcd7247a3a07b31fa6c41bd3bf512cea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,34 +7,146 @@
 #include "intel_gt.h"
 #include "intel_gt_clock_utils.h"
 
-#define MHZ_12   12000000 /* 12MHz (24MHz/2), 83.333ns */
-#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
-#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+static u32 read_reference_ts_freq(struct intel_uncore *uncore)
+{
+	u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
+	u32 base_freq, frac_freq;
+
+	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+	base_freq *= 1000000;
+
+	frac_freq = ((ts_override &
+		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+	frac_freq = 1000000 / (frac_freq + 1);
+
+	return base_freq + frac_freq;
+}
+
+static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore,
+					u32 rpm_config_reg)
+{
+	u32 f19_2_mhz = 19200000;
+	u32 f24_mhz = 24000000;
+	u32 crystal_clock =
+		(rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+		GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
 
-static u32 read_clock_frequency(const struct intel_gt *gt)
+	switch (crystal_clock) {
+	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+		return f19_2_mhz;
+	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+		return f24_mhz;
+	default:
+		MISSING_CASE(crystal_clock);
+		return 0;
+	}
+}
+
+static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
+					u32 rpm_config_reg)
 {
-	if (INTEL_GEN(gt->i915) >= 11) {
-		u32 config;
-
-		config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
-		config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
-		config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
-		switch (config) {
-		case 0: return MHZ_12;
-		case 1:
-		case 2: return MHZ_19_2;
-		default:
-		case 3: return MHZ_12_5;
+	u32 f19_2_mhz = 19200000;
+	u32 f24_mhz = 24000000;
+	u32 f25_mhz = 25000000;
+	u32 f38_4_mhz = 38400000;
+	u32 crystal_clock =
+		(rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+		GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+	switch (crystal_clock) {
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+		return f24_mhz;
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+		return f19_2_mhz;
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+		return f38_4_mhz;
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+		return f25_mhz;
+	default:
+		MISSING_CASE(crystal_clock);
+		return 0;
+	}
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+	u32 f12_5_mhz = 12500000;
+	u32 f19_2_mhz = 19200000;
+	u32 f24_mhz = 24000000;
+
+	if (INTEL_GEN(uncore->i915) <= 4) {
+		/*
+		 * PRMs say:
+		 *
+		 *     "The value in this register increments once every 16
+		 *      hclks." (through the “Clocking Configuration”
+		 *      (“CLKCFG”) MCHBAR register)
+		 */
+		return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+	} else if (INTEL_GEN(uncore->i915) <= 8) {
+		/*
+		 * PRMs say:
+		 *
+		 *     "The PCU TSC counts 10ns increments; this timestamp
+		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+		 *      rolling over every 1.5 hours).
+		 */
+		return f12_5_mhz;
+	} else if (INTEL_GEN(uncore->i915) <= 9) {
+		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+		u32 freq = 0;
+
+		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+			freq = read_reference_ts_freq(uncore);
+		} else {
+			freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
+
+			/*
+			 * Now figure out how the command stream's timestamp
+			 * register increments from this frequency (it might
+			 * increment only every few clock cycle).
+			 */
+			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+				      CTC_SHIFT_PARAMETER_SHIFT);
 		}
-	} else if (INTEL_GEN(gt->i915) >= 9) {
-		if (IS_GEN9_LP(gt->i915))
-			return MHZ_19_2;
-		else
-			return MHZ_12;
-	} else {
-		return MHZ_12_5;
+
+		return freq;
+	} else if (INTEL_GEN(uncore->i915) <= 12) {
+		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+		u32 freq = 0;
+
+		/*
+		 * First figure out the reference frequency. There are 2 ways
+		 * we can compute the frequency, either through the
+		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+		 * tells us which one we should use.
+		 */
+		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+			freq = read_reference_ts_freq(uncore);
+		} else {
+			u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+			if (INTEL_GEN(uncore->i915) <= 10)
+				freq = gen10_get_crystal_clock_freq(uncore, c0);
+			else
+				freq = gen11_get_crystal_clock_freq(uncore, c0);
+
+			/*
+			 * Now figure out how the command stream's timestamp
+			 * register increments from this frequency (it might
+			 * increment only every few clock cycle).
+			 */
+			freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+		}
+
+		return freq;
 	}
+
+	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+	return 0;
 }
 
 void intel_gt_init_clock_frequency(struct intel_gt *gt)
@@ -43,20 +155,27 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
 	 * Note that on gen11+, the clock frequency may be reconfigured.
 	 * We do not, and we assume nobody else does.
 	 */
-	gt->clock_frequency = read_clock_frequency(gt);
+	gt->clock_frequency = read_clock_frequency(gt->uncore);
+	if (gt->clock_frequency)
+		gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+
 	GT_TRACE(gt,
-		 "Using clock frequency: %dkHz\n",
-		 gt->clock_frequency / 1000);
+		 "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+		 gt->clock_frequency / 1000,
+		 gt->clock_period_ns,
+		 div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
+			 USEC_PER_SEC));
+
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 void intel_gt_check_clock_frequency(const struct intel_gt *gt)
 {
-	if (gt->clock_frequency != read_clock_frequency(gt)) {
+	if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
 		dev_err(gt->i915->drm.dev,
 			"GT clock frequency changed, was %uHz, now %uHz!\n",
 			gt->clock_frequency,
-			read_clock_frequency(gt));
+			read_clock_frequency(gt->uncore));
 	}
 }
 #endif
@@ -66,26 +185,24 @@ static u64 div_u64_roundup(u64 nom, u32 den)
 	return div_u64(nom + den - 1, den);
 }
 
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
 {
-	return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
-			       gt->clock_frequency);
+	return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
 }
 
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
 {
 	return intel_gt_clock_interval_to_ns(gt, 16 * count);
 }
 
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
 {
-	return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
-			       1000 * 1000 * 1000);
+	return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
 }
 
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
 {
-	u32 val;
+	u64 val;
 
 	/*
 	 * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
@@ -94,9 +211,9 @@ u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
 	 * EI/thresholds are "bad", leading to a very sluggish or even
 	 * frozen machine.
 	 */
-	val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+	val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
 	if (IS_GEN(gt->i915, 6))
-		val = roundup(val, 25);
+		val = div_u64_roundup(val, 25) * 25;
 
 	return val;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
index f793c89f2cbd719538694c2d8dc750e786ab03db..8b03e97a85df318fdcd3d976f04696517b98832a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -18,10 +18,10 @@ void intel_gt_check_clock_frequency(const struct intel_gt *gt);
 static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
 #endif
 
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count);
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count);
 
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns);
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns);
 
 #endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 257063a57101bdda2e3766676c2c68849221cdb3..9830342aa6f4f4251eb681e0f43dc93cefbe7692 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
 #include "intel_breadcrumbs.h"
 #include "intel_gt.h"
 #include "intel_gt_irq.h"
+#include "intel_lrc_reg.h"
 #include "intel_uncore.h"
 #include "intel_rps.h"
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 274aa0dd7050e9bf271ecc2fc985f0166355c95a..c94e8ac884eb9509c026e0fb1a2e852c7ed126d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -39,6 +39,28 @@ static void user_forcewake(struct intel_gt *gt, bool suspend)
 	intel_gt_pm_put(gt);
 }
 
+static void runtime_begin(struct intel_gt *gt)
+{
+	local_irq_disable();
+	write_seqcount_begin(&gt->stats.lock);
+	gt->stats.start = ktime_get();
+	gt->stats.active = true;
+	write_seqcount_end(&gt->stats.lock);
+	local_irq_enable();
+}
+
+static void runtime_end(struct intel_gt *gt)
+{
+	local_irq_disable();
+	write_seqcount_begin(&gt->stats.lock);
+	gt->stats.active = false;
+	gt->stats.total =
+		ktime_add(gt->stats.total,
+			  ktime_sub(ktime_get(), gt->stats.start));
+	write_seqcount_end(&gt->stats.lock);
+	local_irq_enable();
+}
+
 static int __gt_unpark(struct intel_wakeref *wf)
 {
 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -67,6 +89,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
 	i915_pmu_gt_unparked(i915);
 
 	intel_gt_unpark_requests(gt);
+	runtime_begin(gt);
 
 	return 0;
 }
@@ -79,6 +102,7 @@ static int __gt_park(struct intel_wakeref *wf)
 
 	GT_TRACE(gt, "\n");
 
+	runtime_end(gt);
 	intel_gt_park_requests(gt);
 
 	i915_vma_parked(gt);
@@ -106,6 +130,7 @@ static const struct intel_wakeref_ops wf_ops = {
 void intel_gt_pm_init_early(struct intel_gt *gt)
 {
 	intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+	seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
 }
 
 void intel_gt_pm_init(struct intel_gt *gt)
@@ -339,6 +364,30 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
 	return intel_uc_runtime_resume(&gt->uc);
 }
 
+static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+	ktime_t total = gt->stats.total;
+
+	if (gt->stats.active)
+		total = ktime_add(total,
+				  ktime_sub(ktime_get(), gt->stats.start));
+
+	return total;
+}
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+	unsigned int seq;
+	ktime_t total;
+
+	do {
+		seq = read_seqcount_begin(&gt->stats.lock);
+		total = __intel_gt_get_awake_time(gt);
+	} while (read_seqcount_retry(&gt->stats.lock, seq));
+
+	return total;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftest_gt_pm.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 60f0e2fbe55c846ca7e5f4b0bf90b82f29dcc723..63846a856e7e14dc86a21a1d9c690f78242a23ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -58,6 +58,8 @@ int intel_gt_resume(struct intel_gt *gt);
 void intel_gt_runtime_suspend(struct intel_gt *gt);
 int intel_gt_runtime_resume(struct intel_gt *gt);
 
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+
 static inline bool is_mock_gt(const struct intel_gt *gt)
 {
 	return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 66fcbf9d0fdd3735c72432060f5de10fdaf794c8..dc06c78c9eebca3898e2c3355b457e42f55f90fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -135,13 +135,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 	struct intel_gt_timelines *timelines = &gt->timelines;
 	struct intel_timeline *tl, *tn;
 	unsigned long active_count = 0;
-	bool interruptible;
 	LIST_HEAD(free);
 
-	interruptible = true;
-	if (unlikely(timeout < 0))
-		timeout = -timeout, interruptible = false;
-
 	flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
 	spin_lock(&timelines->lock);
 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
@@ -163,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 				mutex_unlock(&tl->mutex);
 
 				timeout = dma_fence_wait_timeout(fence,
-								 interruptible,
+								 true,
 								 timeout);
 				dma_fence_put(fence);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 6d39a4a11bf3983e81a57915f129c23e6f28fa1e..a83d3e18254d4a98afb46216ce0fc6b5da9b8206 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -75,6 +75,7 @@ struct intel_gt {
 	intel_wakeref_t awake;
 
 	u32 clock_frequency;
+	u32 clock_period_ns;
 
 	struct intel_llc llc;
 	struct intel_rc6 rc6;
@@ -87,6 +88,30 @@ struct intel_gt {
 
 	u32 pm_guc_events;
 
+	struct {
+		bool active;
+
+		/**
+		 * @lock: Lock protecting the below fields.
+		 */
+		seqcount_mutex_t lock;
+
+		/**
+		 * @total: Total time this engine was busy.
+		 *
+		 * Accumulated time not counting the most recent block in cases
+		 * where engine is currently busy (active > 0).
+		 */
+		ktime_t total;
+
+		/**
+		 * @start: Timestamp of the last idle to active transition.
+		 *
+		 * Idle is defined as active == 0, active is active > 0.
+		 */
+		ktime_t start;
+	} stats;
+
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
 					    [MAX_ENGINE_INSTANCE + 1];
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7bfe9072be9af69ecf4aa492b460f638209d41c2..04aa6601e984932a6269e9317da40919a96125d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore)
 		bdw_setup_private_ppat(uncore);
 }
 
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
+
+	obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_vma_pin(vma, 0, 0,
+			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+	if (err) {
+		i915_vma_put(vma);
+		return ERR_PTR(err);
+	}
+
+	return vma;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_gtt.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 8a33940a71f3760c169db0283b8a0d2b94129e51..29c10fde8ce3f5eafbe3e3b9cd36117a43cda766 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
 void i915_vm_free_pt_stash(struct i915_address_space *vm,
 			   struct i915_vm_pt_stash *stash);
 
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+
 static inline struct sgt_dma {
 	struct scatterlist *sg;
 	dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 26c7d0a50585a0e2808a17e0a7745d5c6b77d787..94f485b591aff7d8b1184fc424c3436551d6a805 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1,681 +1,216 @@
+// SPDX-License-Identifier: MIT
 /*
  * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Ben Widawsky <ben@bwidawsk.net>
- *    Michel Thierry <michel.thierry@intel.com>
- *    Thomas Daniel <thomas.daniel@intel.com>
- *    Oscar Mateo <oscar.mateo@intel.com>
- *
- */
-
-/**
- * DOC: Logical Rings, Logical Ring Contexts and Execlists
- *
- * Motivation:
- * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
- * These expanded contexts enable a number of new abilities, especially
- * "Execlists" (also implemented in this file).
- *
- * One of the main differences with the legacy HW contexts is that logical
- * ring contexts incorporate many more things to the context's state, like
- * PDPs or ringbuffer control registers:
- *
- * The reason why PDPs are included in the context is straightforward: as
- * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
- * contained there mean you don't need to do a ppgtt->switch_mm yourself,
- * instead, the GPU will do it for you on the context switch.
- *
- * But, what about the ringbuffer control registers (head, tail, etc..)?
- * shouldn't we just need a set of those per engine command streamer? This is
- * where the name "Logical Rings" starts to make sense: by virtualizing the
- * rings, the engine cs shifts to a new "ring buffer" with every context
- * switch. When you want to submit a workload to the GPU you: A) choose your
- * context, B) find its appropriate virtualized ring, C) write commands to it
- * and then, finally, D) tell the GPU to switch to that context.
- *
- * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
- * to a contexts is via a context execution list, ergo "Execlists".
- *
- * LRC implementation:
- * Regarding the creation of contexts, we have:
- *
- * - One global default context.
- * - One local default context for each opened fd.
- * - One local extra context for each context create ioctl call.
- *
- * Now that ringbuffers belong per-context (and not per-engine, like before)
- * and that contexts are uniquely tied to a given engine (and not reusable,
- * like before) we need:
- *
- * - One ringbuffer per-engine inside each context.
- * - One backing object per-engine inside each context.
- *
- * The global default context starts its life with these new objects fully
- * allocated and populated. The local default context for each opened fd is
- * more complex, because we don't know at creation time which engine is going
- * to use them. To handle this, we have implemented a deferred creation of LR
- * contexts:
- *
- * The local context starts its life as a hollow or blank holder, that only
- * gets populated for a given engine once we receive an execbuffer. If later
- * on we receive another execbuffer ioctl for the same context but a different
- * engine, we allocate/populate a new ringbuffer and context backing object and
- * so on.
- *
- * Finally, regarding local contexts created using the ioctl call: as they are
- * only allowed with the render ring, we can allocate & populate them right
- * away (no need to defer anything, at least for now).
- *
- * Execlists implementation:
- * Execlists are the new method by which, on gen8+ hardware, workloads are
- * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
- * This method works as follows:
- *
- * When a request is committed, its commands (the BB start and any leading or
- * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
- * for the appropriate context. The tail pointer in the hardware context is not
- * updated at this time, but instead, kept by the driver in the ringbuffer
- * structure. A structure representing this request is added to a request queue
- * for the appropriate engine: this structure contains a copy of the context's
- * tail after the request was written to the ring buffer and a pointer to the
- * context itself.
- *
- * If the engine's request queue was empty before the request was added, the
- * queue is processed immediately. Otherwise the queue will be processed during
- * a context switch interrupt. In any case, elements on the queue will get sent
- * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
- * globally unique 20-bits submission ID.
- *
- * When execution of a request completes, the GPU updates the context status
- * buffer with a context complete event and generates a context switch interrupt.
- * During the interrupt handling, the driver examines the events in the buffer:
- * for each context complete event, if the announced ID matches that on the head
- * of the request queue, then that request is retired and removed from the queue.
- *
- * After processing, if any requests were retired and the queue is not empty
- * then a new execution list can be submitted. The two requests at the front of
- * the queue are next to be submitted but since a context may not occur twice in
- * an execution list, if subsequent requests have the same ID as the first then
- * the two requests must be combined. This is done simply by discarding requests
- * at the head of the queue until either only one requests is left (in which case
- * we use a NULL second context) or the first two requests have unique IDs.
- *
- * By always executing the first two requests in the queue the driver ensures
- * that the GPU is kept as busy as possible. In the case where a single context
- * completes but a second context is still executing, the request for this second
- * context will be at the head of the queue when we remove the first one. This
- * request will then be resubmitted along with a new request for a different context,
- * which will cause the hardware to continue executing the second request and queue
- * the new request (the GPU detects the condition of a context getting preempted
- * with the same context and optimizes the context switch flow by not doing
- * preemption, but just sampling the new tail pointer).
- *
  */
-#include <linux/interrupt.h>
 
+#include "gen8_engine_cs.h"
 #include "i915_drv.h"
 #include "i915_perf.h"
-#include "i915_trace.h"
-#include "i915_vgpu.h"
-#include "intel_breadcrumbs.h"
-#include "intel_context.h"
-#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
 #include "intel_gt.h"
-#include "intel_gt_pm.h"
-#include "intel_gt_requests.h"
+#include "intel_lrc.h"
 #include "intel_lrc_reg.h"
-#include "intel_mocs.h"
-#include "intel_reset.h"
 #include "intel_ring.h"
-#include "intel_workarounds.h"
 #include "shmem_utils.h"
 
-#define RING_EXECLIST_QFULL		(1 << 0x2)
-#define RING_EXECLIST1_VALID		(1 << 0x3)
-#define RING_EXECLIST0_VALID		(1 << 0x4)
-#define RING_EXECLIST_ACTIVE_STATUS	(3 << 0xE)
-#define RING_EXECLIST1_ACTIVE		(1 << 0x11)
-#define RING_EXECLIST0_ACTIVE		(1 << 0x12)
-
-#define GEN8_CTX_STATUS_IDLE_ACTIVE	(1 << 0)
-#define GEN8_CTX_STATUS_PREEMPTED	(1 << 1)
-#define GEN8_CTX_STATUS_ELEMENT_SWITCH	(1 << 2)
-#define GEN8_CTX_STATUS_ACTIVE_IDLE	(1 << 3)
-#define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
-#define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
-
-#define GEN8_CTX_STATUS_COMPLETED_MASK \
-	 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+static void set_offsets(u32 *regs,
+			const u8 *data,
+			const struct intel_engine_cs *engine,
+			bool close)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+	(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+	(((x) >> 2) & 0x7f)
+#define END 0
+{
+	const u32 base = engine->mmio_base;
 
-#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+	while (*data) {
+		u8 count, flags;
 
-#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE	(0x1) /* lower csb dword */
-#define GEN12_CTX_SWITCH_DETAIL(csb_dw)	((csb_dw) & 0xF) /* upper csb dword */
-#define GEN12_CSB_SW_CTX_ID_MASK		GENMASK(25, 15)
-#define GEN12_IDLE_CTX_ID		0x7FF
-#define GEN12_CSB_CTX_VALID(csb_dw) \
-	(FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+		if (*data & BIT(7)) { /* skip */
+			count = *data++ & ~BIT(7);
+			regs += count;
+			continue;
+		}
 
-/* Typical size of the average request (2 pipecontrols and a MI_BB) */
-#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+		count = *data & 0x3f;
+		flags = *data >> 6;
+		data++;
 
-struct virtual_engine {
-	struct intel_engine_cs base;
-	struct intel_context context;
-	struct rcu_work rcu;
+		*regs = MI_LOAD_REGISTER_IMM(count);
+		if (flags & POSTED)
+			*regs |= MI_LRI_FORCE_POSTED;
+		if (INTEL_GEN(engine->i915) >= 11)
+			*regs |= MI_LRI_LRM_CS_MMIO;
+		regs++;
 
-	/*
-	 * We allow only a single request through the virtual engine at a time
-	 * (each request in the timeline waits for the completion fence of
-	 * the previous before being submitted). By restricting ourselves to
-	 * only submitting a single request, each request is placed on to a
-	 * physical to maximise load spreading (by virtue of the late greedy
-	 * scheduling -- each real engine takes the next available request
-	 * upon idling).
-	 */
-	struct i915_request *request;
+		GEM_BUG_ON(!count);
+		do {
+			u32 offset = 0;
+			u8 v;
 
-	/*
-	 * We keep a rbtree of available virtual engines inside each physical
-	 * engine, sorted by priority. Here we preallocate the nodes we need
-	 * for the virtual engine, indexed by physical_engine->id.
-	 */
-	struct ve_node {
-		struct rb_node rb;
-		int prio;
-	} nodes[I915_NUM_ENGINES];
+			do {
+				v = *data++;
+				offset <<= 7;
+				offset |= v & ~BIT(7);
+			} while (v & BIT(7));
 
-	/*
-	 * Keep track of bonded pairs -- restrictions upon on our selection
-	 * of physical engines any particular request may be submitted to.
-	 * If we receive a submit-fence from a master engine, we will only
-	 * use one of sibling_mask physical engines.
-	 */
-	struct ve_bond {
-		const struct intel_engine_cs *master;
-		intel_engine_mask_t sibling_mask;
-	} *bonds;
-	unsigned int num_bonds;
-
-	/* And finally, which physical engines this virtual engine maps onto. */
-	unsigned int num_siblings;
-	struct intel_engine_cs *siblings[];
-};
+			regs[0] = base + (offset << 2);
+			regs += 2;
+		} while (--count);
+	}
 
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
-{
-	GEM_BUG_ON(!intel_engine_is_virtual(engine));
-	return container_of(engine, struct virtual_engine, base);
+	if (close) {
+		/* Close the batch; used mainly by live_lrc_layout() */
+		*regs = MI_BATCH_BUFFER_END;
+		if (INTEL_GEN(engine->i915) >= 10)
+			*regs |= BIT(0);
+	}
 }
 
-static int __execlists_context_alloc(struct intel_context *ce,
-				     struct intel_engine_cs *engine);
-
-static void execlists_init_reg_state(u32 *reg_state,
-				     const struct intel_context *ce,
-				     const struct intel_engine_cs *engine,
-				     const struct intel_ring *ring,
-				     bool close);
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
-			     const struct intel_engine_cs *engine,
-			     u32 head);
+static const u8 gen8_xcs_offsets[] = {
+	NOP(1),
+	LRI(11, 0),
+	REG16(0x244),
+	REG(0x034),
+	REG(0x030),
+	REG(0x038),
+	REG(0x03c),
+	REG(0x168),
+	REG(0x140),
+	REG(0x110),
+	REG(0x11c),
+	REG(0x114),
+	REG(0x118),
 
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
-	if (INTEL_GEN(engine->i915) >= 12)
-		return 0x60;
-	else if (INTEL_GEN(engine->i915) >= 9)
-		return 0x54;
-	else if (engine->class == RENDER_CLASS)
-		return 0x58;
-	else
-		return -1;
-}
+	NOP(9),
+	LRI(9, 0),
+	REG16(0x3a8),
+	REG16(0x28c),
+	REG16(0x288),
+	REG16(0x284),
+	REG16(0x280),
+	REG16(0x27c),
+	REG16(0x278),
+	REG16(0x274),
+	REG16(0x270),
 
-static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
-{
-	if (INTEL_GEN(engine->i915) >= 12)
-		return 0x74;
-	else if (INTEL_GEN(engine->i915) >= 9)
-		return 0x68;
-	else if (engine->class == RENDER_CLASS)
-		return 0xd8;
-	else
-		return -1;
-}
+	NOP(13),
+	LRI(2, 0),
+	REG16(0x200),
+	REG(0x028),
 
-static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
-{
-	if (INTEL_GEN(engine->i915) >= 12)
-		return 0x12;
-	else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
-		return 0x18;
-	else
-		return -1;
-}
+	END
+};
 
-static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
-{
-	int x;
+static const u8 gen9_xcs_offsets[] = {
+	NOP(1),
+	LRI(14, POSTED),
+	REG16(0x244),
+	REG(0x034),
+	REG(0x030),
+	REG(0x038),
+	REG(0x03c),
+	REG(0x168),
+	REG(0x140),
+	REG(0x110),
+	REG(0x11c),
+	REG(0x114),
+	REG(0x118),
+	REG(0x1c0),
+	REG(0x1c4),
+	REG(0x1c8),
 
-	x = lrc_ring_wa_bb_per_ctx(engine);
-	if (x < 0)
-		return x;
+	NOP(3),
+	LRI(9, POSTED),
+	REG16(0x3a8),
+	REG16(0x28c),
+	REG16(0x288),
+	REG16(0x284),
+	REG16(0x280),
+	REG16(0x27c),
+	REG16(0x278),
+	REG16(0x274),
+	REG16(0x270),
 
-	return x + 2;
-}
+	NOP(13),
+	LRI(1, POSTED),
+	REG16(0x200),
 
-static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
-{
-	int x;
+	NOP(13),
+	LRI(44, POSTED),
+	REG(0x028),
+	REG(0x09c),
+	REG(0x0c0),
+	REG(0x178),
+	REG(0x17c),
+	REG16(0x358),
+	REG(0x170),
+	REG(0x150),
+	REG(0x154),
+	REG(0x158),
+	REG16(0x41c),
+	REG16(0x600),
+	REG16(0x604),
+	REG16(0x608),
+	REG16(0x60c),
+	REG16(0x610),
+	REG16(0x614),
+	REG16(0x618),
+	REG16(0x61c),
+	REG16(0x620),
+	REG16(0x624),
+	REG16(0x628),
+	REG16(0x62c),
+	REG16(0x630),
+	REG16(0x634),
+	REG16(0x638),
+	REG16(0x63c),
+	REG16(0x640),
+	REG16(0x644),
+	REG16(0x648),
+	REG16(0x64c),
+	REG16(0x650),
+	REG16(0x654),
+	REG16(0x658),
+	REG16(0x65c),
+	REG16(0x660),
+	REG16(0x664),
+	REG16(0x668),
+	REG16(0x66c),
+	REG16(0x670),
+	REG16(0x674),
+	REG16(0x678),
+	REG16(0x67c),
+	REG(0x068),
 
-	x = lrc_ring_indirect_ptr(engine);
-	if (x < 0)
-		return x;
+	END
+};
 
-	return x + 2;
-}
+static const u8 gen12_xcs_offsets[] = {
+	NOP(1),
+	LRI(13, POSTED),
+	REG16(0x244),
+	REG(0x034),
+	REG(0x030),
+	REG(0x038),
+	REG(0x03c),
+	REG(0x168),
+	REG(0x140),
+	REG(0x110),
+	REG(0x1c0),
+	REG(0x1c4),
+	REG(0x1c8),
+	REG(0x180),
+	REG16(0x2b4),
 
-static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
-{
-	if (engine->class != RENDER_CLASS)
-		return -1;
-
-	if (INTEL_GEN(engine->i915) >= 12)
-		return 0xb6;
-	else if (INTEL_GEN(engine->i915) >= 11)
-		return 0xaa;
-	else
-		return -1;
-}
-
-static u32
-lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
-{
-	switch (INTEL_GEN(engine->i915)) {
-	default:
-		MISSING_CASE(INTEL_GEN(engine->i915));
-		fallthrough;
-	case 12:
-		return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-	case 11:
-		return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-	case 10:
-		return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-	case 9:
-		return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-	case 8:
-		return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-	}
-}
-
-static void
-lrc_ring_setup_indirect_ctx(u32 *regs,
-			    const struct intel_engine_cs *engine,
-			    u32 ctx_bb_ggtt_addr,
-			    u32 size)
-{
-	GEM_BUG_ON(!size);
-	GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
-	GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
-	regs[lrc_ring_indirect_ptr(engine) + 1] =
-		ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
-
-	GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
-	regs[lrc_ring_indirect_offset(engine) + 1] =
-		lrc_ring_indirect_offset_default(engine) << 6;
-}
-
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
-	/*
-	 * We can use either ppHWSP[16] which is recorded before the context
-	 * switch (and so excludes the cost of context switches) or use the
-	 * value from the context image itself, which is saved/restored earlier
-	 * and so includes the cost of the save.
-	 */
-	return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
-static void mark_eio(struct i915_request *rq)
-{
-	if (i915_request_completed(rq))
-		return;
-
-	GEM_BUG_ON(i915_request_signaled(rq));
-
-	i915_request_set_error_once(rq, -EIO);
-	i915_request_mark_complete(rq);
-}
-
-static struct i915_request *
-active_request(const struct intel_timeline * const tl, struct i915_request *rq)
-{
-	struct i915_request *active = rq;
-
-	rcu_read_lock();
-	list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
-		if (i915_request_completed(rq))
-			break;
-
-		active = rq;
-	}
-	rcu_read_unlock();
-
-	return active;
-}
-
-static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
-{
-	return (i915_ggtt_offset(engine->status_page.vma) +
-		I915_GEM_HWS_PREEMPT_ADDR);
-}
-
-static inline void
-ring_set_paused(const struct intel_engine_cs *engine, int state)
-{
-	/*
-	 * We inspect HWS_PREEMPT with a semaphore inside
-	 * engine->emit_fini_breadcrumb. If the dword is true,
-	 * the ring is paused as the semaphore will busywait
-	 * until the dword is false.
-	 */
-	engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
-	if (state)
-		wmb();
-}
-
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
-	return READ_ONCE(rq->sched.attr.priority);
-}
-
-static int effective_prio(const struct i915_request *rq)
-{
-	int prio = rq_prio(rq);
-
-	/*
-	 * If this request is special and must not be interrupted at any
-	 * cost, so be it. Note we are only checking the most recent request
-	 * in the context and so may be masking an earlier vip request. It
-	 * is hoped that under the conditions where nopreempt is used, this
-	 * will not matter (i.e. all requests to that context will be
-	 * nopreempt for as long as desired).
-	 */
-	if (i915_request_has_nopreempt(rq))
-		prio = I915_PRIORITY_UNPREEMPTABLE;
-
-	return prio;
-}
-
-static int queue_prio(const struct intel_engine_execlists *execlists)
-{
-	struct i915_priolist *p;
-	struct rb_node *rb;
-
-	rb = rb_first_cached(&execlists->queue);
-	if (!rb)
-		return INT_MIN;
-
-	/*
-	 * As the priolist[] are inverted, with the highest priority in [0],
-	 * we have to flip the index value to become priority.
-	 */
-	p = to_priolist(rb);
-	if (!I915_USER_PRIORITY_SHIFT)
-		return p->priority;
-
-	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
-}
-
-static inline bool need_preempt(const struct intel_engine_cs *engine,
-				const struct i915_request *rq,
-				struct rb_node *rb)
-{
-	int last_prio;
-
-	if (!intel_engine_has_semaphores(engine))
-		return false;
-
-	/*
-	 * Check if the current priority hint merits a preemption attempt.
-	 *
-	 * We record the highest value priority we saw during rescheduling
-	 * prior to this dequeue, therefore we know that if it is strictly
-	 * less than the current tail of ESLP[0], we do not need to force
-	 * a preempt-to-idle cycle.
-	 *
-	 * However, the priority hint is a mere hint that we may need to
-	 * preempt. If that hint is stale or we may be trying to preempt
-	 * ourselves, ignore the request.
-	 *
-	 * More naturally we would write
-	 *      prio >= max(0, last);
-	 * except that we wish to prevent triggering preemption at the same
-	 * priority level: the task that is running should remain running
-	 * to preserve FIFO ordering of dependencies.
-	 */
-	last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
-	if (engine->execlists.queue_priority_hint <= last_prio)
-		return false;
-
-	/*
-	 * Check against the first request in ELSP[1], it will, thanks to the
-	 * power of PI, be the highest priority of that context.
-	 */
-	if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
-	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
-		return true;
-
-	if (rb) {
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-		bool preempt = false;
-
-		if (engine == ve->siblings[0]) { /* only preempt one sibling */
-			struct i915_request *next;
-
-			rcu_read_lock();
-			next = READ_ONCE(ve->request);
-			if (next)
-				preempt = rq_prio(next) > last_prio;
-			rcu_read_unlock();
-		}
-
-		if (preempt)
-			return preempt;
-	}
-
-	/*
-	 * If the inflight context did not trigger the preemption, then maybe
-	 * it was the set of queued requests? Pick the highest priority in
-	 * the queue (the first active priolist) and see if it deserves to be
-	 * running instead of ELSP[0].
-	 *
-	 * The highest priority request in the queue can not be either
-	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
-	 * context, it's priority would not exceed ELSP[0] aka last_prio.
-	 */
-	return queue_prio(&engine->execlists) > last_prio;
-}
-
-__maybe_unused static inline bool
-assert_priority_queue(const struct i915_request *prev,
-		      const struct i915_request *next)
-{
-	/*
-	 * Without preemption, the prev may refer to the still active element
-	 * which we refuse to let go.
-	 *
-	 * Even with preemption, there are times when we think it is better not
-	 * to preempt and leave an ostensibly lower priority request in flight.
-	 */
-	if (i915_request_is_active(prev))
-		return true;
-
-	return rq_prio(prev) >= rq_prio(next);
-}
-
-/*
- * The context descriptor encodes various attributes of a context,
- * including its GTT address and some flags. Because it's fairly
- * expensive to calculate, we'll just do it once and cache the result,
- * which remains valid until the context is unpinned.
- *
- * This is what a descriptor looks like, from LSB to MSB::
- *
- *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
- *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
- *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
- *      bits 53-54:    mbz, reserved for use by hardware
- *      bits 55-63:    group ID, currently unused and set to 0
- *
- * Starting from Gen11, the upper dword of the descriptor has a new format:
- *
- *      bits 32-36:    reserved
- *      bits 37-47:    SW context ID
- *      bits 48:53:    engine instance
- *      bit 54:        mbz, reserved for use by hardware
- *      bits 55-60:    SW counter
- *      bits 61-63:    engine class
- *
- * engine info, SW context ID and SW counter need to form a unique number
- * (Context ID) per lrc.
- */
-static u32
-lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
-{
-	u32 desc;
-
-	desc = INTEL_LEGACY_32B_CONTEXT;
-	if (i915_vm_is_4lvl(ce->vm))
-		desc = INTEL_LEGACY_64B_CONTEXT;
-	desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
-	desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-	if (IS_GEN(engine->i915, 8))
-		desc |= GEN8_CTX_L3LLC_COHERENT;
-
-	return i915_ggtt_offset(ce->state) | desc;
-}
-
-static inline unsigned int dword_in_page(void *addr)
-{
-	return offset_in_page(addr) / sizeof(u32);
-}
-
-static void set_offsets(u32 *regs,
-			const u8 *data,
-			const struct intel_engine_cs *engine,
-			bool clear)
-#define NOP(x) (BIT(7) | (x))
-#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
-#define POSTED BIT(0)
-#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
-#define REG16(x) \
-	(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
-	(((x) >> 2) & 0x7f)
-#define END(total_state_size) 0, (total_state_size)
-{
-	const u32 base = engine->mmio_base;
-
-	while (*data) {
-		u8 count, flags;
-
-		if (*data & BIT(7)) { /* skip */
-			count = *data++ & ~BIT(7);
-			if (clear)
-				memset32(regs, MI_NOOP, count);
-			regs += count;
-			continue;
-		}
-
-		count = *data & 0x3f;
-		flags = *data >> 6;
-		data++;
-
-		*regs = MI_LOAD_REGISTER_IMM(count);
-		if (flags & POSTED)
-			*regs |= MI_LRI_FORCE_POSTED;
-		if (INTEL_GEN(engine->i915) >= 11)
-			*regs |= MI_LRI_LRM_CS_MMIO;
-		regs++;
-
-		GEM_BUG_ON(!count);
-		do {
-			u32 offset = 0;
-			u8 v;
-
-			do {
-				v = *data++;
-				offset <<= 7;
-				offset |= v & ~BIT(7);
-			} while (v & BIT(7));
-
-			regs[0] = base + (offset << 2);
-			if (clear)
-				regs[1] = 0;
-			regs += 2;
-		} while (--count);
-	}
-
-	if (clear) {
-		u8 count = *++data;
-
-		/* Clear past the tail for HW access */
-		GEM_BUG_ON(dword_in_page(regs) > count);
-		memset32(regs, MI_NOOP, count - dword_in_page(regs));
-
-		/* Close the batch; used mainly by live_lrc_layout() */
-		*regs = MI_BATCH_BUFFER_END;
-		if (INTEL_GEN(engine->i915) >= 10)
-			*regs |= BIT(0);
-	}
-}
-
-static const u8 gen8_xcs_offsets[] = {
-	NOP(1),
-	LRI(11, 0),
-	REG16(0x244),
-	REG(0x034),
-	REG(0x030),
-	REG(0x038),
-	REG(0x03c),
-	REG(0x168),
-	REG(0x140),
-	REG(0x110),
-	REG(0x11c),
-	REG(0x114),
-	REG(0x118),
-
-	NOP(9),
-	LRI(9, 0),
+	NOP(5),
+	LRI(9, POSTED),
 	REG16(0x3a8),
 	REG16(0x28c),
 	REG16(0x288),
@@ -686,15 +221,10 @@ static const u8 gen8_xcs_offsets[] = {
 	REG16(0x274),
 	REG16(0x270),
 
-	NOP(13),
-	LRI(2, 0),
-	REG16(0x200),
-	REG(0x028),
-
-	END(80)
+	END
 };
 
-static const u8 gen9_xcs_offsets[] = {
+static const u8 gen8_rcs_offsets[] = {
 	NOP(1),
 	LRI(14, POSTED),
 	REG16(0x244),
@@ -725,126 +255,10 @@ static const u8 gen9_xcs_offsets[] = {
 	REG16(0x270),
 
 	NOP(13),
-	LRI(1, POSTED),
-	REG16(0x200),
+	LRI(1, 0),
+	REG(0x0c8),
 
-	NOP(13),
-	LRI(44, POSTED),
-	REG(0x028),
-	REG(0x09c),
-	REG(0x0c0),
-	REG(0x178),
-	REG(0x17c),
-	REG16(0x358),
-	REG(0x170),
-	REG(0x150),
-	REG(0x154),
-	REG(0x158),
-	REG16(0x41c),
-	REG16(0x600),
-	REG16(0x604),
-	REG16(0x608),
-	REG16(0x60c),
-	REG16(0x610),
-	REG16(0x614),
-	REG16(0x618),
-	REG16(0x61c),
-	REG16(0x620),
-	REG16(0x624),
-	REG16(0x628),
-	REG16(0x62c),
-	REG16(0x630),
-	REG16(0x634),
-	REG16(0x638),
-	REG16(0x63c),
-	REG16(0x640),
-	REG16(0x644),
-	REG16(0x648),
-	REG16(0x64c),
-	REG16(0x650),
-	REG16(0x654),
-	REG16(0x658),
-	REG16(0x65c),
-	REG16(0x660),
-	REG16(0x664),
-	REG16(0x668),
-	REG16(0x66c),
-	REG16(0x670),
-	REG16(0x674),
-	REG16(0x678),
-	REG16(0x67c),
-	REG(0x068),
-
-	END(176)
-};
-
-static const u8 gen12_xcs_offsets[] = {
-	NOP(1),
-	LRI(13, POSTED),
-	REG16(0x244),
-	REG(0x034),
-	REG(0x030),
-	REG(0x038),
-	REG(0x03c),
-	REG(0x168),
-	REG(0x140),
-	REG(0x110),
-	REG(0x1c0),
-	REG(0x1c4),
-	REG(0x1c8),
-	REG(0x180),
-	REG16(0x2b4),
-
-	NOP(5),
-	LRI(9, POSTED),
-	REG16(0x3a8),
-	REG16(0x28c),
-	REG16(0x288),
-	REG16(0x284),
-	REG16(0x280),
-	REG16(0x27c),
-	REG16(0x278),
-	REG16(0x274),
-	REG16(0x270),
-
-	END(80)
-};
-
-static const u8 gen8_rcs_offsets[] = {
-	NOP(1),
-	LRI(14, POSTED),
-	REG16(0x244),
-	REG(0x034),
-	REG(0x030),
-	REG(0x038),
-	REG(0x03c),
-	REG(0x168),
-	REG(0x140),
-	REG(0x110),
-	REG(0x11c),
-	REG(0x114),
-	REG(0x118),
-	REG(0x1c0),
-	REG(0x1c4),
-	REG(0x1c8),
-
-	NOP(3),
-	LRI(9, POSTED),
-	REG16(0x3a8),
-	REG16(0x28c),
-	REG16(0x288),
-	REG16(0x284),
-	REG16(0x280),
-	REG16(0x27c),
-	REG16(0x278),
-	REG16(0x274),
-	REG16(0x270),
-
-	NOP(13),
-	LRI(1, 0),
-	REG(0x0c8),
-
-	END(80)
+	END
 };
 
 static const u8 gen9_rcs_offsets[] = {
@@ -898,4384 +312,322 @@ static const u8 gen9_rcs_offsets[] = {
 	REG16(0x604),
 	REG16(0x608),
 	REG16(0x60c),
-	REG16(0x610),
-	REG16(0x614),
-	REG16(0x618),
-	REG16(0x61c),
-	REG16(0x620),
-	REG16(0x624),
-	REG16(0x628),
-	REG16(0x62c),
-	REG16(0x630),
-	REG16(0x634),
-	REG16(0x638),
-	REG16(0x63c),
-	REG16(0x640),
-	REG16(0x644),
-	REG16(0x648),
-	REG16(0x64c),
-	REG16(0x650),
-	REG16(0x654),
-	REG16(0x658),
-	REG16(0x65c),
-	REG16(0x660),
-	REG16(0x664),
-	REG16(0x668),
-	REG16(0x66c),
-	REG16(0x670),
-	REG16(0x674),
-	REG16(0x678),
-	REG16(0x67c),
-	REG(0x68),
-
-	END(176)
-};
-
-static const u8 gen11_rcs_offsets[] = {
-	NOP(1),
-	LRI(15, POSTED),
-	REG16(0x244),
-	REG(0x034),
-	REG(0x030),
-	REG(0x038),
-	REG(0x03c),
-	REG(0x168),
-	REG(0x140),
-	REG(0x110),
-	REG(0x11c),
-	REG(0x114),
-	REG(0x118),
-	REG(0x1c0),
-	REG(0x1c4),
-	REG(0x1c8),
-	REG(0x180),
-
-	NOP(1),
-	LRI(9, POSTED),
-	REG16(0x3a8),
-	REG16(0x28c),
-	REG16(0x288),
-	REG16(0x284),
-	REG16(0x280),
-	REG16(0x27c),
-	REG16(0x278),
-	REG16(0x274),
-	REG16(0x270),
-
-	LRI(1, POSTED),
-	REG(0x1b0),
-
-	NOP(10),
-	LRI(1, 0),
-	REG(0x0c8),
-
-	END(80)
-};
-
-static const u8 gen12_rcs_offsets[] = {
-	NOP(1),
-	LRI(13, POSTED),
-	REG16(0x244),
-	REG(0x034),
-	REG(0x030),
-	REG(0x038),
-	REG(0x03c),
-	REG(0x168),
-	REG(0x140),
-	REG(0x110),
-	REG(0x1c0),
-	REG(0x1c4),
-	REG(0x1c8),
-	REG(0x180),
-	REG16(0x2b4),
-
-	NOP(5),
-	LRI(9, POSTED),
-	REG16(0x3a8),
-	REG16(0x28c),
-	REG16(0x288),
-	REG16(0x284),
-	REG16(0x280),
-	REG16(0x27c),
-	REG16(0x278),
-	REG16(0x274),
-	REG16(0x270),
-
-	LRI(3, POSTED),
-	REG(0x1b0),
-	REG16(0x5a8),
-	REG16(0x5ac),
-
-	NOP(6),
-	LRI(1, 0),
-	REG(0x0c8),
-	NOP(3 + 9 + 1),
-
-	LRI(51, POSTED),
-	REG16(0x588),
-	REG16(0x588),
-	REG16(0x588),
-	REG16(0x588),
-	REG16(0x588),
-	REG16(0x588),
-	REG(0x028),
-	REG(0x09c),
-	REG(0x0c0),
-	REG(0x178),
-	REG(0x17c),
-	REG16(0x358),
-	REG(0x170),
-	REG(0x150),
-	REG(0x154),
-	REG(0x158),
-	REG16(0x41c),
-	REG16(0x600),
-	REG16(0x604),
-	REG16(0x608),
-	REG16(0x60c),
-	REG16(0x610),
-	REG16(0x614),
-	REG16(0x618),
-	REG16(0x61c),
-	REG16(0x620),
-	REG16(0x624),
-	REG16(0x628),
-	REG16(0x62c),
-	REG16(0x630),
-	REG16(0x634),
-	REG16(0x638),
-	REG16(0x63c),
-	REG16(0x640),
-	REG16(0x644),
-	REG16(0x648),
-	REG16(0x64c),
-	REG16(0x650),
-	REG16(0x654),
-	REG16(0x658),
-	REG16(0x65c),
-	REG16(0x660),
-	REG16(0x664),
-	REG16(0x668),
-	REG16(0x66c),
-	REG16(0x670),
-	REG16(0x674),
-	REG16(0x678),
-	REG16(0x67c),
-	REG(0x068),
-	REG(0x084),
-	NOP(1),
-
-	END(192)
-};
-
-#undef END
-#undef REG16
-#undef REG
-#undef LRI
-#undef NOP
-
-static const u8 *reg_offsets(const struct intel_engine_cs *engine)
-{
-	/*
-	 * The gen12+ lists only have the registers we program in the basic
-	 * default state. We rely on the context image using relative
-	 * addressing to automatic fixup the register state between the
-	 * physical engines for virtual engine.
-	 */
-	GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
-		   !intel_engine_has_relative_mmio(engine));
-
-	if (engine->class == RENDER_CLASS) {
-		if (INTEL_GEN(engine->i915) >= 12)
-			return gen12_rcs_offsets;
-		else if (INTEL_GEN(engine->i915) >= 11)
-			return gen11_rcs_offsets;
-		else if (INTEL_GEN(engine->i915) >= 9)
-			return gen9_rcs_offsets;
-		else
-			return gen8_rcs_offsets;
-	} else {
-		if (INTEL_GEN(engine->i915) >= 12)
-			return gen12_xcs_offsets;
-		else if (INTEL_GEN(engine->i915) >= 9)
-			return gen9_xcs_offsets;
-		else
-			return gen8_xcs_offsets;
-	}
-}
-
-static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine)
-{
-	struct i915_request *rq, *rn, *active = NULL;
-	struct list_head *pl;
-	int prio = I915_PRIORITY_INVALID;
-
-	lockdep_assert_held(&engine->active.lock);
-
-	list_for_each_entry_safe_reverse(rq, rn,
-					 &engine->active.requests,
-					 sched.link) {
-		if (i915_request_completed(rq))
-			continue; /* XXX */
-
-		__i915_request_unsubmit(rq);
-
-		/*
-		 * Push the request back into the queue for later resubmission.
-		 * If this request is not native to this physical engine (i.e.
-		 * it came from a virtual source), push it back onto the virtual
-		 * engine so that it can be moved across onto another physical
-		 * engine as load dictates.
-		 */
-		if (likely(rq->execution_mask == engine->mask)) {
-			GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
-			if (rq_prio(rq) != prio) {
-				prio = rq_prio(rq);
-				pl = i915_sched_lookup_priolist(engine, prio);
-			}
-			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
-			list_move(&rq->sched.link, pl);
-			set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
-			/* Check in case we rollback so far we wrap [size/2] */
-			if (intel_ring_direction(rq->ring,
-						 rq->tail,
-						 rq->ring->tail + 8) > 0)
-				rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-
-			active = rq;
-		} else {
-			struct intel_engine_cs *owner = rq->context->engine;
-
-			WRITE_ONCE(rq->engine, owner);
-			owner->submit_request(rq);
-			active = NULL;
-		}
-	}
-
-	return active;
-}
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
-	struct intel_engine_cs *engine =
-		container_of(execlists, typeof(*engine), execlists);
-
-	return __unwind_incomplete_requests(engine);
-}
-
-static inline void
-execlists_context_status_change(struct i915_request *rq, unsigned long status)
-{
-	/*
-	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
-	 * The compiler should eliminate this function as dead-code.
-	 */
-	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
-		return;
-
-	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
-				   status, rq);
-}
-
-static void intel_engine_context_in(struct intel_engine_cs *engine)
-{
-	unsigned long flags;
-
-	if (atomic_add_unless(&engine->stats.active, 1, 0))
-		return;
-
-	write_seqlock_irqsave(&engine->stats.lock, flags);
-	if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
-		engine->stats.start = ktime_get();
-		atomic_inc(&engine->stats.active);
-	}
-	write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static void intel_engine_context_out(struct intel_engine_cs *engine)
-{
-	unsigned long flags;
-
-	GEM_BUG_ON(!atomic_read(&engine->stats.active));
-
-	if (atomic_add_unless(&engine->stats.active, -1, 1))
-		return;
-
-	write_seqlock_irqsave(&engine->stats.lock, flags);
-	if (atomic_dec_and_test(&engine->stats.active)) {
-		engine->stats.total =
-			ktime_add(engine->stats.total,
-				  ktime_sub(ktime_get(), engine->stats.start));
-	}
-	write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static void
-execlists_check_context(const struct intel_context *ce,
-			const struct intel_engine_cs *engine,
-			const char *when)
-{
-	const struct intel_ring *ring = ce->ring;
-	u32 *regs = ce->lrc_reg_state;
-	bool valid = true;
-	int x;
-
-	if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
-		pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
-		       engine->name,
-		       regs[CTX_RING_START],
-		       i915_ggtt_offset(ring->vma));
-		regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
-		valid = false;
-	}
-
-	if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
-	    (RING_CTL_SIZE(ring->size) | RING_VALID)) {
-		pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
-		       engine->name,
-		       regs[CTX_RING_CTL],
-		       (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
-		regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
-		valid = false;
-	}
-
-	x = lrc_ring_mi_mode(engine);
-	if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
-		pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
-		       engine->name, regs[x + 1]);
-		regs[x + 1] &= ~STOP_RING;
-		regs[x + 1] |= STOP_RING << 16;
-		valid = false;
-	}
-
-	WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
-}
-
-static void restore_default_state(struct intel_context *ce,
-				  struct intel_engine_cs *engine)
-{
-	u32 *regs;
-
-	regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
-	execlists_init_reg_state(regs, ce, engine, ce->ring, true);
-
-	ce->runtime.last = intel_context_get_runtime(ce);
-}
-
-static void reset_active(struct i915_request *rq,
-			 struct intel_engine_cs *engine)
-{
-	struct intel_context * const ce = rq->context;
-	u32 head;
-
-	/*
-	 * The executing context has been cancelled. We want to prevent
-	 * further execution along this context and propagate the error on
-	 * to anything depending on its results.
-	 *
-	 * In __i915_request_submit(), we apply the -EIO and remove the
-	 * requests' payloads for any banned requests. But first, we must
-	 * rewind the context back to the start of the incomplete request so
-	 * that we do not jump back into the middle of the batch.
-	 *
-	 * We preserve the breadcrumbs and semaphores of the incomplete
-	 * requests so that inter-timeline dependencies (i.e other timelines)
-	 * remain correctly ordered. And we defer to __i915_request_submit()
-	 * so that all asynchronous waits are correctly handled.
-	 */
-	ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
-		     rq->fence.context, rq->fence.seqno);
-
-	/* On resubmission of the active request, payload will be scrubbed */
-	if (i915_request_completed(rq))
-		head = rq->tail;
-	else
-		head = active_request(ce->timeline, rq)->head;
-	head = intel_ring_wrap(ce->ring, head);
-
-	/* Scrub the context image to prevent replaying the previous batch */
-	restore_default_state(ce, engine);
-	__execlists_update_reg_state(ce, engine, head);
-
-	/* We've switched away, so this should be a no-op, but intent matters */
-	ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-	ce->runtime.num_underflow += dt < 0;
-	ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
-#endif
-}
-
-static void intel_context_update_runtime(struct intel_context *ce)
-{
-	u32 old;
-	s32 dt;
-
-	if (intel_context_is_barrier(ce))
-		return;
-
-	old = ce->runtime.last;
-	ce->runtime.last = intel_context_get_runtime(ce);
-	dt = ce->runtime.last - old;
-
-	if (unlikely(dt <= 0)) {
-		CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
-			 old, ce->runtime.last, dt);
-		st_update_runtime_underflow(ce, dt);
-		return;
-	}
-
-	ewma_runtime_add(&ce->runtime.avg, dt);
-	ce->runtime.total += dt;
-}
-
-static inline struct intel_engine_cs *
-__execlists_schedule_in(struct i915_request *rq)
-{
-	struct intel_engine_cs * const engine = rq->engine;
-	struct intel_context * const ce = rq->context;
-
-	intel_context_get(ce);
-
-	if (unlikely(intel_context_is_banned(ce)))
-		reset_active(rq, engine);
-
-	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		execlists_check_context(ce, engine, "before");
-
-	if (ce->tag) {
-		/* Use a fixed tag for OA and friends */
-		GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
-		ce->lrc.ccid = ce->tag;
-	} else {
-		/* We don't need a strict matching tag, just different values */
-		unsigned int tag = ffs(READ_ONCE(engine->context_tag));
-
-		GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
-		clear_bit(tag - 1, &engine->context_tag);
-		ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
-
-		BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
-	}
-
-	ce->lrc.ccid |= engine->execlists.ccid;
-
-	__intel_gt_pm_get(engine->gt);
-	if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
-		intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
-	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
-	intel_engine_context_in(engine);
-
-	return engine;
-}
-
-static inline struct i915_request *
-execlists_schedule_in(struct i915_request *rq, int idx)
-{
-	struct intel_context * const ce = rq->context;
-	struct intel_engine_cs *old;
-
-	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
-	trace_i915_request_in(rq, idx);
-
-	old = READ_ONCE(ce->inflight);
-	do {
-		if (!old) {
-			WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
-			break;
-		}
-	} while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
-
-	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
-	return i915_request_get(rq);
-}
-
-static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
-{
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-	struct i915_request *next = READ_ONCE(ve->request);
-
-	if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
-		tasklet_hi_schedule(&ve->base.execlists.tasklet);
-}
-
-static inline void
-__execlists_schedule_out(struct i915_request *rq,
-			 struct intel_engine_cs * const engine,
-			 unsigned int ccid)
-{
-	struct intel_context * const ce = rq->context;
-
-	/*
-	 * NB process_csb() is not under the engine->active.lock and hence
-	 * schedule_out can race with schedule_in meaning that we should
-	 * refrain from doing non-trivial work here.
-	 */
-
-	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		execlists_check_context(ce, engine, "after");
-
-	/*
-	 * If we have just completed this context, the engine may now be
-	 * idle and we want to re-enter powersaving.
-	 */
-	if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
-	    i915_request_completed(rq))
-		intel_engine_add_retire(engine, ce->timeline);
-
-	ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
-	ccid &= GEN12_MAX_CONTEXT_HW_ID;
-	if (ccid < BITS_PER_LONG) {
-		GEM_BUG_ON(ccid == 0);
-		GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
-		set_bit(ccid - 1, &engine->context_tag);
-	}
-
-	intel_context_update_runtime(ce);
-	intel_engine_context_out(engine);
-	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
-	if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
-		intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
-	intel_gt_pm_put_async(engine->gt);
-
-	/*
-	 * If this is part of a virtual engine, its next request may
-	 * have been blocked waiting for access to the active context.
-	 * We have to kick all the siblings again in case we need to
-	 * switch (e.g. the next request is not runnable on this
-	 * engine). Hopefully, we will already have submitted the next
-	 * request before the tasklet runs and do not need to rebuild
-	 * each virtual tree and kick everyone again.
-	 */
-	if (ce->engine != engine)
-		kick_siblings(rq, ce);
-
-	intel_context_put(ce);
-}
-
-static inline void
-execlists_schedule_out(struct i915_request *rq)
-{
-	struct intel_context * const ce = rq->context;
-	struct intel_engine_cs *cur, *old;
-	u32 ccid;
-
-	trace_i915_request_out(rq);
-
-	ccid = rq->context->lrc.ccid;
-	old = READ_ONCE(ce->inflight);
-	do
-		cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
-	while (!try_cmpxchg(&ce->inflight, &old, cur));
-	if (!cur)
-		__execlists_schedule_out(rq, old, ccid);
-
-	i915_request_put(rq);
-}
-
-static u64 execlists_update_context(struct i915_request *rq)
-{
-	struct intel_context *ce = rq->context;
-	u64 desc = ce->lrc.desc;
-	u32 tail, prev;
-
-	/*
-	 * WaIdleLiteRestore:bdw,skl
-	 *
-	 * We should never submit the context with the same RING_TAIL twice
-	 * just in case we submit an empty ring, which confuses the HW.
-	 *
-	 * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
-	 * the normal request to be able to always advance the RING_TAIL on
-	 * subsequent resubmissions (for lite restore). Should that fail us,
-	 * and we try and submit the same tail again, force the context
-	 * reload.
-	 *
-	 * If we need to return to a preempted context, we need to skip the
-	 * lite-restore and force it to reload the RING_TAIL. Otherwise, the
-	 * HW has a tendency to ignore us rewinding the TAIL to the end of
-	 * an earlier request.
-	 */
-	GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
-	prev = rq->ring->tail;
-	tail = intel_ring_set_tail(rq->ring, rq->tail);
-	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
-		desc |= CTX_DESC_FORCE_RESTORE;
-	ce->lrc_reg_state[CTX_RING_TAIL] = tail;
-	rq->tail = rq->wa_tail;
-
-	/*
-	 * Make sure the context image is complete before we submit it to HW.
-	 *
-	 * Ostensibly, writes (including the WCB) should be flushed prior to
-	 * an uncached write such as our mmio register access, the empirical
-	 * evidence (esp. on Braswell) suggests that the WC write into memory
-	 * may not be visible to the HW prior to the completion of the UC
-	 * register write and that we may begin execution from the context
-	 * before its image is complete leading to invalid PD chasing.
-	 */
-	wmb();
-
-	ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
-	return desc;
-}
-
-static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
-{
-	if (execlists->ctrl_reg) {
-		writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
-		writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
-	} else {
-		writel(upper_32_bits(desc), execlists->submit_reg);
-		writel(lower_32_bits(desc), execlists->submit_reg);
-	}
-}
-
-static __maybe_unused char *
-dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
-{
-	if (!rq)
-		return "";
-
-	snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
-		 prefix,
-		 rq->context->lrc.ccid,
-		 rq->fence.context, rq->fence.seqno,
-		 i915_request_completed(rq) ? "!" :
-		 i915_request_started(rq) ? "*" :
-		 "",
-		 rq_prio(rq));
-
-	return buf;
-}
-
-static __maybe_unused void
-trace_ports(const struct intel_engine_execlists *execlists,
-	    const char *msg,
-	    struct i915_request * const *ports)
-{
-	const struct intel_engine_cs *engine =
-		container_of(execlists, typeof(*engine), execlists);
-	char __maybe_unused p0[40], p1[40];
-
-	if (!ports[0])
-		return;
-
-	ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
-		     dump_port(p0, sizeof(p0), "", ports[0]),
-		     dump_port(p1, sizeof(p1), ", ", ports[1]));
-}
-
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
-	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
-static __maybe_unused bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
-		     const char *msg)
-{
-	struct intel_engine_cs *engine =
-		container_of(execlists, typeof(*engine), execlists);
-	struct i915_request * const *port, *rq;
-	struct intel_context *ce = NULL;
-	bool sentinel = false;
-	u32 ccid = -1;
-
-	trace_ports(execlists, msg, execlists->pending);
-
-	/* We may be messing around with the lists during reset, lalala */
-	if (reset_in_progress(execlists))
-		return true;
-
-	if (!execlists->pending[0]) {
-		GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
-			      engine->name);
-		return false;
-	}
-
-	if (execlists->pending[execlists_num_ports(execlists)]) {
-		GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
-			      engine->name, execlists_num_ports(execlists));
-		return false;
-	}
-
-	for (port = execlists->pending; (rq = *port); port++) {
-		unsigned long flags;
-		bool ok = true;
-
-		GEM_BUG_ON(!kref_read(&rq->fence.refcount));
-		GEM_BUG_ON(!i915_request_is_active(rq));
-
-		if (ce == rq->context) {
-			GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
-				      engine->name,
-				      ce->timeline->fence_context,
-				      port - execlists->pending);
-			return false;
-		}
-		ce = rq->context;
-
-		if (ccid == ce->lrc.ccid) {
-			GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
-				      engine->name,
-				      ccid, ce->timeline->fence_context,
-				      port - execlists->pending);
-			return false;
-		}
-		ccid = ce->lrc.ccid;
-
-		/*
-		 * Sentinels are supposed to be the last request so they flush
-		 * the current execution off the HW. Check that they are the only
-		 * request in the pending submission.
-		 */
-		if (sentinel) {
-			GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
-				      engine->name,
-				      ce->timeline->fence_context,
-				      port - execlists->pending);
-			return false;
-		}
-		sentinel = i915_request_has_sentinel(rq);
-
-		/* Hold tightly onto the lock to prevent concurrent retires! */
-		if (!spin_trylock_irqsave(&rq->lock, flags))
-			continue;
-
-		if (i915_request_completed(rq))
-			goto unlock;
-
-		if (i915_active_is_idle(&ce->active) &&
-		    !intel_context_is_barrier(ce)) {
-			GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
-				      engine->name,
-				      ce->timeline->fence_context,
-				      port - execlists->pending);
-			ok = false;
-			goto unlock;
-		}
-
-		if (!i915_vma_is_pinned(ce->state)) {
-			GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
-				      engine->name,
-				      ce->timeline->fence_context,
-				      port - execlists->pending);
-			ok = false;
-			goto unlock;
-		}
-
-		if (!i915_vma_is_pinned(ce->ring->vma)) {
-			GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
-				      engine->name,
-				      ce->timeline->fence_context,
-				      port - execlists->pending);
-			ok = false;
-			goto unlock;
-		}
-
-unlock:
-		spin_unlock_irqrestore(&rq->lock, flags);
-		if (!ok)
-			return false;
-	}
-
-	return ce;
-}
-
-static void execlists_submit_ports(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-	unsigned int n;
-
-	GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
-
-	/*
-	 * We can skip acquiring intel_runtime_pm_get() here as it was taken
-	 * on our behalf by the request (see i915_gem_mark_busy()) and it will
-	 * not be relinquished until the device is idle (see
-	 * i915_gem_idle_work_handler()). As a precaution, we make sure
-	 * that all ELSP are drained i.e. we have processed the CSB,
-	 * before allowing ourselves to idle and calling intel_runtime_pm_put().
-	 */
-	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
-	/*
-	 * ELSQ note: the submit queue is not cleared after being submitted
-	 * to the HW so we need to make sure we always clean it up. This is
-	 * currently ensured by the fact that we always write the same number
-	 * of elsq entries, keep this in mind before changing the loop below.
-	 */
-	for (n = execlists_num_ports(execlists); n--; ) {
-		struct i915_request *rq = execlists->pending[n];
-
-		write_desc(execlists,
-			   rq ? execlists_update_context(rq) : 0,
-			   n);
-	}
-
-	/* we need to manually load the submit queue */
-	if (execlists->ctrl_reg)
-		writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-}
-
-static bool ctx_single_port_submission(const struct intel_context *ce)
-{
-	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-		intel_context_force_single_submission(ce));
-}
-
-static bool can_merge_ctx(const struct intel_context *prev,
-			  const struct intel_context *next)
-{
-	if (prev != next)
-		return false;
-
-	if (ctx_single_port_submission(prev))
-		return false;
-
-	return true;
-}
-
-static unsigned long i915_request_flags(const struct i915_request *rq)
-{
-	return READ_ONCE(rq->fence.flags);
-}
-
-static bool can_merge_rq(const struct i915_request *prev,
-			 const struct i915_request *next)
-{
-	GEM_BUG_ON(prev == next);
-	GEM_BUG_ON(!assert_priority_queue(prev, next));
-
-	/*
-	 * We do not submit known completed requests. Therefore if the next
-	 * request is already completed, we can pretend to merge it in
-	 * with the previous context (and we will skip updating the ELSP
-	 * and tracking). Thus hopefully keeping the ELSP full with active
-	 * contexts, despite the best efforts of preempt-to-busy to confuse
-	 * us.
-	 */
-	if (i915_request_completed(next))
-		return true;
-
-	if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
-		     (BIT(I915_FENCE_FLAG_NOPREEMPT) |
-		      BIT(I915_FENCE_FLAG_SENTINEL))))
-		return false;
-
-	if (!can_merge_ctx(prev->context, next->context))
-		return false;
-
-	GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
-	return true;
-}
-
-static void virtual_update_register_offsets(u32 *regs,
-					    struct intel_engine_cs *engine)
-{
-	set_offsets(regs, reg_offsets(engine), engine, false);
-}
-
-static bool virtual_matches(const struct virtual_engine *ve,
-			    const struct i915_request *rq,
-			    const struct intel_engine_cs *engine)
-{
-	const struct intel_engine_cs *inflight;
-
-	if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
-		return false;
-
-	/*
-	 * We track when the HW has completed saving the context image
-	 * (i.e. when we have seen the final CS event switching out of
-	 * the context) and must not overwrite the context image before
-	 * then. This restricts us to only using the active engine
-	 * while the previous virtualized request is inflight (so
-	 * we reuse the register offsets). This is a very small
-	 * hystersis on the greedy seelction algorithm.
-	 */
-	inflight = intel_context_inflight(&ve->context);
-	if (inflight && inflight != engine)
-		return false;
-
-	return true;
-}
-
-static void virtual_xfer_context(struct virtual_engine *ve,
-				 struct intel_engine_cs *engine)
-{
-	unsigned int n;
-
-	if (likely(engine == ve->siblings[0]))
-		return;
-
-	GEM_BUG_ON(READ_ONCE(ve->context.inflight));
-	if (!intel_engine_has_relative_mmio(engine))
-		virtual_update_register_offsets(ve->context.lrc_reg_state,
-						engine);
-
-	/*
-	 * Move the bound engine to the top of the list for
-	 * future execution. We then kick this tasklet first
-	 * before checking others, so that we preferentially
-	 * reuse this set of bound registers.
-	 */
-	for (n = 1; n < ve->num_siblings; n++) {
-		if (ve->siblings[n] == engine) {
-			swap(ve->siblings[n], ve->siblings[0]);
-			break;
-		}
-	}
-}
-
-#define for_each_waiter(p__, rq__) \
-	list_for_each_entry_lockless(p__, \
-				     &(rq__)->sched.waiters_list, \
-				     wait_link)
-
-#define for_each_signaler(p__, rq__) \
-	list_for_each_entry_rcu(p__, \
-				&(rq__)->sched.signalers_list, \
-				signal_link)
-
-static void defer_request(struct i915_request *rq, struct list_head * const pl)
-{
-	LIST_HEAD(list);
-
-	/*
-	 * We want to move the interrupted request to the back of
-	 * the round-robin list (i.e. its priority level), but
-	 * in doing so, we must then move all requests that were in
-	 * flight and were waiting for the interrupted request to
-	 * be run after it again.
-	 */
-	do {
-		struct i915_dependency *p;
-
-		GEM_BUG_ON(i915_request_is_active(rq));
-		list_move_tail(&rq->sched.link, pl);
-
-		for_each_waiter(p, rq) {
-			struct i915_request *w =
-				container_of(p->waiter, typeof(*w), sched);
-
-			if (p->flags & I915_DEPENDENCY_WEAK)
-				continue;
-
-			/* Leave semaphores spinning on the other engines */
-			if (w->engine != rq->engine)
-				continue;
-
-			/* No waiter should start before its signaler */
-			GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
-				   i915_request_started(w) &&
-				   !i915_request_completed(rq));
-
-			GEM_BUG_ON(i915_request_is_active(w));
-			if (!i915_request_is_ready(w))
-				continue;
-
-			if (rq_prio(w) < rq_prio(rq))
-				continue;
-
-			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
-			list_move_tail(&w->sched.link, &list);
-		}
-
-		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
-	} while (rq);
-}
-
-static void defer_active(struct intel_engine_cs *engine)
-{
-	struct i915_request *rq;
-
-	rq = __unwind_incomplete_requests(engine);
-	if (!rq)
-		return;
-
-	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
-}
-
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
-	       const struct i915_request *rq,
-	       const struct rb_node *rb)
-{
-	int hint;
-
-	if (!intel_engine_has_timeslices(engine))
-		return false;
-
-	hint = engine->execlists.queue_priority_hint;
-
-	if (rb) {
-		const struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-		const struct intel_engine_cs *inflight =
-			intel_context_inflight(&ve->context);
-
-		if (!inflight || inflight == engine) {
-			struct i915_request *next;
-
-			rcu_read_lock();
-			next = READ_ONCE(ve->request);
-			if (next)
-				hint = max(hint, rq_prio(next));
-			rcu_read_unlock();
-		}
-	}
-
-	if (!list_is_last(&rq->sched.link, &engine->active.requests))
-		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
-	GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
-	return hint >= effective_prio(rq);
-}
-
-static bool
-timeslice_yield(const struct intel_engine_execlists *el,
-		const struct i915_request *rq)
-{
-	/*
-	 * Once bitten, forever smitten!
-	 *
-	 * If the active context ever busy-waited on a semaphore,
-	 * it will be treated as a hog until the end of its timeslice (i.e.
-	 * until it is scheduled out and replaced by a new submission,
-	 * possibly even its own lite-restore). The HW only sends an interrupt
-	 * on the first miss, and we do know if that semaphore has been
-	 * signaled, or even if it is now stuck on another semaphore. Play
-	 * safe, yield if it might be stuck -- it will be given a fresh
-	 * timeslice in the near future.
-	 */
-	return rq->context->lrc.ccid == READ_ONCE(el->yield);
-}
-
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
-		  const struct i915_request *rq)
-{
-	return timer_expired(&el->timer) || timeslice_yield(el, rq);
-}
-
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
-	if (list_is_last(&rq->sched.link, &engine->active.requests))
-		return engine->execlists.queue_priority_hint;
-
-	return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
-{
-	return READ_ONCE(engine->props.timeslice_duration_ms);
-}
-
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
-	const struct intel_engine_execlists *execlists = &engine->execlists;
-	const struct i915_request *rq = *execlists->active;
-
-	if (!rq || i915_request_completed(rq))
-		return 0;
-
-	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
-		return 0;
-
-	return timeslice(engine);
-}
-
-static void set_timeslice(struct intel_engine_cs *engine)
-{
-	unsigned long duration;
-
-	if (!intel_engine_has_timeslices(engine))
-		return;
-
-	duration = active_timeslice(engine);
-	ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
-
-	set_timer_ms(&engine->execlists.timer, duration);
-}
-
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-	unsigned long duration;
-
-	if (!intel_engine_has_timeslices(engine))
-		return;
-
-	WRITE_ONCE(execlists->switch_priority_hint, prio);
-	if (prio == INT_MIN)
-		return;
-
-	if (timer_pending(&execlists->timer))
-		return;
-
-	duration = timeslice(engine);
-	ENGINE_TRACE(engine,
-		     "start timeslicing, prio:%d, interval:%lu",
-		     prio, duration);
-
-	set_timer_ms(&execlists->timer, duration);
-}
-
-static void record_preemption(struct intel_engine_execlists *execlists)
-{
-	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
-}
-
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
-					    const struct i915_request *rq)
-{
-	if (!rq)
-		return 0;
-
-	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
-	if (unlikely(intel_context_is_banned(rq->context)))
-		return 1;
-
-	return READ_ONCE(engine->props.preempt_timeout_ms);
-}
-
-static void set_preempt_timeout(struct intel_engine_cs *engine,
-				const struct i915_request *rq)
-{
-	if (!intel_engine_has_preempt_reset(engine))
-		return;
-
-	set_timer_ms(&engine->execlists.preempt,
-		     active_preempt_timeout(engine, rq));
-}
-
-static inline void clear_ports(struct i915_request **ports, int count)
-{
-	memset_p((void **)ports, NULL, count);
-}
-
-static inline void
-copy_ports(struct i915_request **dst, struct i915_request **src, int count)
-{
-	/* A memcpy_p() would be very useful here! */
-	while (count--)
-		WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
-}
-
-static void execlists_dequeue(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct i915_request **port = execlists->pending;
-	struct i915_request ** const last_port = port + execlists->port_mask;
-	struct i915_request * const *active;
-	struct i915_request *last;
-	struct rb_node *rb;
-	bool submit = false;
-
-	/*
-	 * Hardware submission is through 2 ports. Conceptually each port
-	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
-	 * static for a context, and unique to each, so we only execute
-	 * requests belonging to a single context from each ring. RING_HEAD
-	 * is maintained by the CS in the context image, it marks the place
-	 * where it got up to last time, and through RING_TAIL we tell the CS
-	 * where we want to execute up to this time.
-	 *
-	 * In this list the requests are in order of execution. Consecutive
-	 * requests from the same context are adjacent in the ringbuffer. We
-	 * can combine these requests into a single RING_TAIL update:
-	 *
-	 *              RING_HEAD...req1...req2
-	 *                                    ^- RING_TAIL
-	 * since to execute req2 the CS must first execute req1.
-	 *
-	 * Our goal then is to point each port to the end of a consecutive
-	 * sequence of requests as being the most optimal (fewest wake ups
-	 * and context switches) submission.
-	 */
-
-	for (rb = rb_first_cached(&execlists->virtual); rb; ) {
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-		struct i915_request *rq = READ_ONCE(ve->request);
-
-		if (!rq) { /* lazily cleanup after another engine handled rq */
-			rb_erase_cached(rb, &execlists->virtual);
-			RB_CLEAR_NODE(rb);
-			rb = rb_first_cached(&execlists->virtual);
-			continue;
-		}
-
-		if (!virtual_matches(ve, rq, engine)) {
-			rb = rb_next(rb);
-			continue;
-		}
-
-		break;
-	}
-
-	/*
-	 * If the queue is higher priority than the last
-	 * request in the currently active context, submit afresh.
-	 * We will resubmit again afterwards in case we need to split
-	 * the active context to interject the preemption request,
-	 * i.e. we will retrigger preemption following the ack in case
-	 * of trouble.
-	 */
-	active = READ_ONCE(execlists->active);
-
-	/*
-	 * In theory we can skip over completed contexts that have not
-	 * yet been processed by events (as those events are in flight):
-	 *
-	 * while ((last = *active) && i915_request_completed(last))
-	 *	active++;
-	 *
-	 * However, the GPU cannot handle this as it will ultimately
-	 * find itself trying to jump back into a context it has just
-	 * completed and barf.
-	 */
-
-	if ((last = *active)) {
-		if (need_preempt(engine, last, rb)) {
-			if (i915_request_completed(last)) {
-				tasklet_hi_schedule(&execlists->tasklet);
-				return;
-			}
-
-			ENGINE_TRACE(engine,
-				     "preempting last=%llx:%lld, prio=%d, hint=%d\n",
-				     last->fence.context,
-				     last->fence.seqno,
-				     last->sched.attr.priority,
-				     execlists->queue_priority_hint);
-			record_preemption(execlists);
-
-			/*
-			 * Don't let the RING_HEAD advance past the breadcrumb
-			 * as we unwind (and until we resubmit) so that we do
-			 * not accidentally tell it to go backwards.
-			 */
-			ring_set_paused(engine, 1);
-
-			/*
-			 * Note that we have not stopped the GPU at this point,
-			 * so we are unwinding the incomplete requests as they
-			 * remain inflight and so by the time we do complete
-			 * the preemption, some of the unwound requests may
-			 * complete!
-			 */
-			__unwind_incomplete_requests(engine);
-
-			last = NULL;
-		} else if (need_timeslice(engine, last, rb) &&
-			   timeslice_expired(execlists, last)) {
-			if (i915_request_completed(last)) {
-				tasklet_hi_schedule(&execlists->tasklet);
-				return;
-			}
-
-			ENGINE_TRACE(engine,
-				     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
-				     last->fence.context,
-				     last->fence.seqno,
-				     last->sched.attr.priority,
-				     execlists->queue_priority_hint,
-				     yesno(timeslice_yield(execlists, last)));
-
-			ring_set_paused(engine, 1);
-			defer_active(engine);
-
-			/*
-			 * Unlike for preemption, if we rewind and continue
-			 * executing the same context as previously active,
-			 * the order of execution will remain the same and
-			 * the tail will only advance. We do not need to
-			 * force a full context restore, as a lite-restore
-			 * is sufficient to resample the monotonic TAIL.
-			 *
-			 * If we switch to any other context, similarly we
-			 * will not rewind TAIL of current context, and
-			 * normal save/restore will preserve state and allow
-			 * us to later continue executing the same request.
-			 */
-			last = NULL;
-		} else {
-			/*
-			 * Otherwise if we already have a request pending
-			 * for execution after the current one, we can
-			 * just wait until the next CS event before
-			 * queuing more. In either case we will force a
-			 * lite-restore preemption event, but if we wait
-			 * we hopefully coalesce several updates into a single
-			 * submission.
-			 */
-			if (!list_is_last(&last->sched.link,
-					  &engine->active.requests)) {
-				/*
-				 * Even if ELSP[1] is occupied and not worthy
-				 * of timeslices, our queue might be.
-				 */
-				start_timeslice(engine, queue_prio(execlists));
-				return;
-			}
-		}
-	}
-
-	while (rb) { /* XXX virtual is always taking precedence */
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-		struct i915_request *rq;
-
-		spin_lock(&ve->base.active.lock);
-
-		rq = ve->request;
-		if (unlikely(!rq)) { /* lost the race to a sibling */
-			spin_unlock(&ve->base.active.lock);
-			rb_erase_cached(rb, &execlists->virtual);
-			RB_CLEAR_NODE(rb);
-			rb = rb_first_cached(&execlists->virtual);
-			continue;
-		}
-
-		GEM_BUG_ON(rq != ve->request);
-		GEM_BUG_ON(rq->engine != &ve->base);
-		GEM_BUG_ON(rq->context != &ve->context);
-
-		if (rq_prio(rq) >= queue_prio(execlists)) {
-			if (!virtual_matches(ve, rq, engine)) {
-				spin_unlock(&ve->base.active.lock);
-				rb = rb_next(rb);
-				continue;
-			}
-
-			if (last && !can_merge_rq(last, rq)) {
-				spin_unlock(&ve->base.active.lock);
-				start_timeslice(engine, rq_prio(rq));
-				return; /* leave this for another sibling */
-			}
-
-			ENGINE_TRACE(engine,
-				     "virtual rq=%llx:%lld%s, new engine? %s\n",
-				     rq->fence.context,
-				     rq->fence.seqno,
-				     i915_request_completed(rq) ? "!" :
-				     i915_request_started(rq) ? "*" :
-				     "",
-				     yesno(engine != ve->siblings[0]));
-
-			WRITE_ONCE(ve->request, NULL);
-			WRITE_ONCE(ve->base.execlists.queue_priority_hint,
-				   INT_MIN);
-			rb_erase_cached(rb, &execlists->virtual);
-			RB_CLEAR_NODE(rb);
-
-			GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-			WRITE_ONCE(rq->engine, engine);
-
-			if (__i915_request_submit(rq)) {
-				/*
-				 * Only after we confirm that we will submit
-				 * this request (i.e. it has not already
-				 * completed), do we want to update the context.
-				 *
-				 * This serves two purposes. It avoids
-				 * unnecessary work if we are resubmitting an
-				 * already completed request after timeslicing.
-				 * But more importantly, it prevents us altering
-				 * ve->siblings[] on an idle context, where
-				 * we may be using ve->siblings[] in
-				 * virtual_context_enter / virtual_context_exit.
-				 */
-				virtual_xfer_context(ve, engine);
-				GEM_BUG_ON(ve->siblings[0] != engine);
-
-				submit = true;
-				last = rq;
-			}
-			i915_request_put(rq);
-
-			/*
-			 * Hmm, we have a bunch of virtual engine requests,
-			 * but the first one was already completed (thanks
-			 * preempt-to-busy!). Keep looking at the veng queue
-			 * until we have no more relevant requests (i.e.
-			 * the normal submit queue has higher priority).
-			 */
-			if (!submit) {
-				spin_unlock(&ve->base.active.lock);
-				rb = rb_first_cached(&execlists->virtual);
-				continue;
-			}
-		}
-
-		spin_unlock(&ve->base.active.lock);
-		break;
-	}
-
-	while ((rb = rb_first_cached(&execlists->queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-		struct i915_request *rq, *rn;
-		int i;
-
-		priolist_for_each_request_consume(rq, rn, p, i) {
-			bool merge = true;
-
-			/*
-			 * Can we combine this request with the current port?
-			 * It has to be the same context/ringbuffer and not
-			 * have any exceptions (e.g. GVT saying never to
-			 * combine contexts).
-			 *
-			 * If we can combine the requests, we can execute both
-			 * by updating the RING_TAIL to point to the end of the
-			 * second request, and so we never need to tell the
-			 * hardware about the first.
-			 */
-			if (last && !can_merge_rq(last, rq)) {
-				/*
-				 * If we are on the second port and cannot
-				 * combine this request with the last, then we
-				 * are done.
-				 */
-				if (port == last_port)
-					goto done;
-
-				/*
-				 * We must not populate both ELSP[] with the
-				 * same LRCA, i.e. we must submit 2 different
-				 * contexts if we submit 2 ELSP.
-				 */
-				if (last->context == rq->context)
-					goto done;
-
-				if (i915_request_has_sentinel(last))
-					goto done;
-
-				/*
-				 * If GVT overrides us we only ever submit
-				 * port[0], leaving port[1] empty. Note that we
-				 * also have to be careful that we don't queue
-				 * the same context (even though a different
-				 * request) to the second port.
-				 */
-				if (ctx_single_port_submission(last->context) ||
-				    ctx_single_port_submission(rq->context))
-					goto done;
-
-				merge = false;
-			}
-
-			if (__i915_request_submit(rq)) {
-				if (!merge) {
-					*port = execlists_schedule_in(last, port - execlists->pending);
-					port++;
-					last = NULL;
-				}
-
-				GEM_BUG_ON(last &&
-					   !can_merge_ctx(last->context,
-							  rq->context));
-				GEM_BUG_ON(last &&
-					   i915_seqno_passed(last->fence.seqno,
-							     rq->fence.seqno));
-
-				submit = true;
-				last = rq;
-			}
-		}
-
-		rb_erase_cached(&p->node, &execlists->queue);
-		i915_priolist_free(p);
-	}
-
-done:
-	/*
-	 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
-	 *
-	 * We choose the priority hint such that if we add a request of greater
-	 * priority than this, we kick the submission tasklet to decide on
-	 * the right order of submitting the requests to hardware. We must
-	 * also be prepared to reorder requests as they are in-flight on the
-	 * HW. We derive the priority hint then as the first "hole" in
-	 * the HW submission ports and if there are no available slots,
-	 * the priority of the lowest executing request, i.e. last.
-	 *
-	 * When we do receive a higher priority request ready to run from the
-	 * user, see queue_request(), the priority hint is bumped to that
-	 * request triggering preemption on the next dequeue (or subsequent
-	 * interrupt for secondary ports).
-	 */
-	execlists->queue_priority_hint = queue_prio(execlists);
-
-	if (submit) {
-		*port = execlists_schedule_in(last, port - execlists->pending);
-		execlists->switch_priority_hint =
-			switch_prio(engine, *execlists->pending);
-
-		/*
-		 * Skip if we ended up with exactly the same set of requests,
-		 * e.g. trying to timeslice a pair of ordered contexts
-		 */
-		if (!memcmp(active, execlists->pending,
-			    (port - execlists->pending + 1) * sizeof(*port))) {
-			do
-				execlists_schedule_out(fetch_and_zero(port));
-			while (port-- != execlists->pending);
-
-			goto skip_submit;
-		}
-		clear_ports(port + 1, last_port - port);
-
-		WRITE_ONCE(execlists->yield, -1);
-		set_preempt_timeout(engine, *active);
-		execlists_submit_ports(engine);
-	} else {
-		start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
-		ring_set_paused(engine, 0);
-	}
-}
-
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
-{
-	struct i915_request * const *port;
-
-	for (port = execlists->pending; *port; port++)
-		execlists_schedule_out(*port);
-	clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
-
-	/* Mark the end of active before we overwrite *active */
-	for (port = xchg(&execlists->active, execlists->pending); *port; port++)
-		execlists_schedule_out(*port);
-	clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
-
-	smp_wmb(); /* complete the seqlock for execlists_active() */
-	WRITE_ONCE(execlists->active, execlists->inflight);
-}
-
-static inline void
-invalidate_csb_entries(const u64 *first, const u64 *last)
-{
-	clflush((void *)first);
-	clflush((void *)last);
-}
-
-/*
- * Starting with Gen12, the status has a new format:
- *
- *     bit  0:     switched to new queue
- *     bit  1:     reserved
- *     bit  2:     semaphore wait mode (poll or signal), only valid when
- *                 switch detail is set to "wait on semaphore"
- *     bits 3-5:   engine class
- *     bits 6-11:  engine instance
- *     bits 12-14: reserved
- *     bits 15-25: sw context id of the lrc the GT switched to
- *     bits 26-31: sw counter of the lrc the GT switched to
- *     bits 32-35: context switch detail
- *                  - 0: ctx complete
- *                  - 1: wait on sync flip
- *                  - 2: wait on vblank
- *                  - 3: wait on scanline
- *                  - 4: wait on semaphore
- *                  - 5: context preempted (not on SEMAPHORE_WAIT or
- *                       WAIT_FOR_EVENT)
- *     bit  36:    reserved
- *     bits 37-43: wait detail (for switch detail 1 to 4)
- *     bits 44-46: reserved
- *     bits 47-57: sw context id of the lrc the GT switched away from
- *     bits 58-63: sw counter of the lrc the GT switched away from
- */
-static inline bool gen12_csb_parse(const u64 csb)
-{
-	bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
-	bool new_queue =
-		lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
-
-	/*
-	 * The context switch detail is not guaranteed to be 5 when a preemption
-	 * occurs, so we can't just check for that. The check below works for
-	 * all the cases we care about, including preemptions of WAIT
-	 * instructions and lite-restore. Preempt-to-idle via the CTRL register
-	 * would require some extra handling, but we don't support that.
-	 */
-	if (!ctx_away_valid || new_queue) {
-		GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
-		return true;
-	}
-
-	/*
-	 * switch detail = 5 is covered by the case above and we do not expect a
-	 * context switch on an unsuccessful wait instruction since we always
-	 * use polling mode.
-	 */
-	GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
-	return false;
-}
-
-static inline bool gen8_csb_parse(const u64 csb)
-{
-	return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
-}
-
-static noinline u64
-wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
-	u64 entry;
-
-	/*
-	 * Reading from the HWSP has one particular advantage: we can detect
-	 * a stale entry. Since the write into HWSP is broken, we have no reason
-	 * to trust the HW at all, the mmio entry may equally be unordered, so
-	 * we prefer the path that is self-checking and as a last resort,
-	 * return the mmio value.
-	 *
-	 * tgl,dg1:HSDES#22011327657
-	 */
-	preempt_disable();
-	if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
-		int idx = csb - engine->execlists.csb_status;
-		int status;
-
-		status = GEN8_EXECLISTS_STATUS_BUF;
-		if (idx >= 6) {
-			status = GEN11_EXECLISTS_STATUS_BUF2;
-			idx -= 6;
-		}
-		status += sizeof(u64) * idx;
-
-		entry = intel_uncore_read64(engine->uncore,
-					    _MMIO(engine->mmio_base + status));
-	}
-	preempt_enable();
-
-	return entry;
-}
-
-static inline u64
-csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
-	u64 entry = READ_ONCE(*csb);
-
-	/*
-	 * Unfortunately, the GPU does not always serialise its write
-	 * of the CSB entries before its write of the CSB pointer, at least
-	 * from the perspective of the CPU, using what is known as a Global
-	 * Observation Point. We may read a new CSB tail pointer, but then
-	 * read the stale CSB entries, causing us to misinterpret the
-	 * context-switch events, and eventually declare the GPU hung.
-	 *
-	 * icl:HSDES#1806554093
-	 * tgl:HSDES#22011248461
-	 */
-	if (unlikely(entry == -1))
-		entry = wa_csb_read(engine, csb);
-
-	/* Consume this entry so that we can spot its future reuse. */
-	WRITE_ONCE(*csb, -1);
-
-	/* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
-	return entry;
-}
-
-static void process_csb(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	u64 * const buf = execlists->csb_status;
-	const u8 num_entries = execlists->csb_size;
-	u8 head, tail;
-
-	/*
-	 * As we modify our execlists state tracking we require exclusive
-	 * access. Either we are inside the tasklet, or the tasklet is disabled
-	 * and we assume that is only inside the reset paths and so serialised.
-	 */
-	GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
-		   !reset_in_progress(execlists));
-	GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
-
-	/*
-	 * Note that csb_write, csb_status may be either in HWSP or mmio.
-	 * When reading from the csb_write mmio register, we have to be
-	 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
-	 * the low 4bits. As it happens we know the next 4bits are always
-	 * zero and so we can simply masked off the low u8 of the register
-	 * and treat it identically to reading from the HWSP (without having
-	 * to use explicit shifting and masking, and probably bifurcating
-	 * the code to handle the legacy mmio read).
-	 */
-	head = execlists->csb_head;
-	tail = READ_ONCE(*execlists->csb_write);
-	if (unlikely(head == tail))
-		return;
-
-	/*
-	 * We will consume all events from HW, or at least pretend to.
-	 *
-	 * The sequence of events from the HW is deterministic, and derived
-	 * from our writes to the ELSP, with a smidgen of variability for
-	 * the arrival of the asynchronous requests wrt to the inflight
-	 * execution. If the HW sends an event that does not correspond with
-	 * the one we are expecting, we have to abandon all hope as we lose
-	 * all tracking of what the engine is actually executing. We will
-	 * only detect we are out of sequence with the HW when we get an
-	 * 'impossible' event because we have already drained our own
-	 * preemption/promotion queue. If this occurs, we know that we likely
-	 * lost track of execution earlier and must unwind and restart, the
-	 * simplest way is by stop processing the event queue and force the
-	 * engine to reset.
-	 */
-	execlists->csb_head = tail;
-	ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
-
-	/*
-	 * Hopefully paired with a wmb() in HW!
-	 *
-	 * We must complete the read of the write pointer before any reads
-	 * from the CSB, so that we do not see stale values. Without an rmb
-	 * (lfence) the HW may speculatively perform the CSB[] reads *before*
-	 * we perform the READ_ONCE(*csb_write).
-	 */
-	rmb();
-	do {
-		bool promote;
-		u64 csb;
-
-		if (++head == num_entries)
-			head = 0;
-
-		/*
-		 * We are flying near dragons again.
-		 *
-		 * We hold a reference to the request in execlist_port[]
-		 * but no more than that. We are operating in softirq
-		 * context and so cannot hold any mutex or sleep. That
-		 * prevents us stopping the requests we are processing
-		 * in port[] from being retired simultaneously (the
-		 * breadcrumb will be complete before we see the
-		 * context-switch). As we only hold the reference to the
-		 * request, any pointer chasing underneath the request
-		 * is subject to a potential use-after-free. Thus we
-		 * store all of the bookkeeping within port[] as
-		 * required, and avoid using unguarded pointers beneath
-		 * request itself. The same applies to the atomic
-		 * status notifier.
-		 */
-
-		csb = csb_read(engine, buf + head);
-		ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
-			     head, upper_32_bits(csb), lower_32_bits(csb));
-
-		if (INTEL_GEN(engine->i915) >= 12)
-			promote = gen12_csb_parse(csb);
-		else
-			promote = gen8_csb_parse(csb);
-		if (promote) {
-			struct i915_request * const *old = execlists->active;
-
-			if (GEM_WARN_ON(!*execlists->pending)) {
-				execlists->error_interrupt |= ERROR_CSB;
-				break;
-			}
-
-			ring_set_paused(engine, 0);
-
-			/* Point active to the new ELSP; prevent overwriting */
-			WRITE_ONCE(execlists->active, execlists->pending);
-			smp_wmb(); /* notify execlists_active() */
-
-			/* cancel old inflight, prepare for switch */
-			trace_ports(execlists, "preempted", old);
-			while (*old)
-				execlists_schedule_out(*old++);
-
-			/* switch pending to inflight */
-			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-			copy_ports(execlists->inflight,
-				   execlists->pending,
-				   execlists_num_ports(execlists));
-			smp_wmb(); /* complete the seqlock */
-			WRITE_ONCE(execlists->active, execlists->inflight);
-
-			/* XXX Magic delay for tgl */
-			ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
-			WRITE_ONCE(execlists->pending[0], NULL);
-		} else {
-			if (GEM_WARN_ON(!*execlists->active)) {
-				execlists->error_interrupt |= ERROR_CSB;
-				break;
-			}
-
-			/* port0 completed, advanced to port1 */
-			trace_ports(execlists, "completed", execlists->active);
-
-			/*
-			 * We rely on the hardware being strongly
-			 * ordered, that the breadcrumb write is
-			 * coherent (visible from the CPU) before the
-			 * user interrupt is processed. One might assume
-			 * that the breadcrumb write being before the
-			 * user interrupt and the CS event for the context
-			 * switch would therefore be before the CS event
-			 * itself...
-			 */
-			if (GEM_SHOW_DEBUG() &&
-			    !i915_request_completed(*execlists->active)) {
-				struct i915_request *rq = *execlists->active;
-				const u32 *regs __maybe_unused =
-					rq->context->lrc_reg_state;
-
-				ENGINE_TRACE(engine,
-					     "context completed before request!\n");
-				ENGINE_TRACE(engine,
-					     "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
-					     ENGINE_READ(engine, RING_START),
-					     ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
-					     ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
-					     ENGINE_READ(engine, RING_CTL),
-					     ENGINE_READ(engine, RING_MI_MODE));
-				ENGINE_TRACE(engine,
-					     "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
-					     i915_ggtt_offset(rq->ring->vma),
-					     rq->head, rq->tail,
-					     rq->fence.context,
-					     lower_32_bits(rq->fence.seqno),
-					     hwsp_seqno(rq));
-				ENGINE_TRACE(engine,
-					     "ctx:{start:%08x, head:%04x, tail:%04x}, ",
-					     regs[CTX_RING_START],
-					     regs[CTX_RING_HEAD],
-					     regs[CTX_RING_TAIL]);
-			}
-
-			execlists_schedule_out(*execlists->active++);
-
-			GEM_BUG_ON(execlists->active - execlists->inflight >
-				   execlists_num_ports(execlists));
-		}
-	} while (head != tail);
-
-	set_timeslice(engine);
-
-	/*
-	 * Gen11 has proven to fail wrt global observation point between
-	 * entry and tail update, failing on the ordering and thus
-	 * we see an old entry in the context status buffer.
-	 *
-	 * Forcibly evict out entries for the next gpu csb update,
-	 * to increase the odds that we get a fresh entries with non
-	 * working hardware. The cost for doing so comes out mostly with
-	 * the wash as hardware, working or not, will need to do the
-	 * invalidation before.
-	 */
-	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
-}
-
-static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
-{
-	lockdep_assert_held(&engine->active.lock);
-	if (!READ_ONCE(engine->execlists.pending[0])) {
-		rcu_read_lock(); /* protect peeking at execlists->active */
-		execlists_dequeue(engine);
-		rcu_read_unlock();
-	}
-}
-
-static void __execlists_hold(struct i915_request *rq)
-{
-	LIST_HEAD(list);
-
-	do {
-		struct i915_dependency *p;
-
-		if (i915_request_is_active(rq))
-			__i915_request_unsubmit(rq);
-
-		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-		list_move_tail(&rq->sched.link, &rq->engine->active.hold);
-		i915_request_set_hold(rq);
-		RQ_TRACE(rq, "on hold\n");
-
-		for_each_waiter(p, rq) {
-			struct i915_request *w =
-				container_of(p->waiter, typeof(*w), sched);
-
-			/* Leave semaphores spinning on the other engines */
-			if (w->engine != rq->engine)
-				continue;
-
-			if (!i915_request_is_ready(w))
-				continue;
-
-			if (i915_request_completed(w))
-				continue;
-
-			if (i915_request_on_hold(w))
-				continue;
-
-			list_move_tail(&w->sched.link, &list);
-		}
-
-		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
-	} while (rq);
-}
-
-static bool execlists_hold(struct intel_engine_cs *engine,
-			   struct i915_request *rq)
-{
-	if (i915_request_on_hold(rq))
-		return false;
-
-	spin_lock_irq(&engine->active.lock);
-
-	if (i915_request_completed(rq)) { /* too late! */
-		rq = NULL;
-		goto unlock;
-	}
-
-	if (rq->engine != engine) { /* preempted virtual engine */
-		struct virtual_engine *ve = to_virtual_engine(rq->engine);
-
-		/*
-		 * intel_context_inflight() is only protected by virtue
-		 * of process_csb() being called only by the tasklet (or
-		 * directly from inside reset while the tasklet is suspended).
-		 * Assert that neither of those are allowed to run while we
-		 * poke at the request queues.
-		 */
-		GEM_BUG_ON(!reset_in_progress(&engine->execlists));
-
-		/*
-		 * An unsubmitted request along a virtual engine will
-		 * remain on the active (this) engine until we are able
-		 * to process the context switch away (and so mark the
-		 * context as no longer in flight). That cannot have happened
-		 * yet, otherwise we would not be hanging!
-		 */
-		spin_lock(&ve->base.active.lock);
-		GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
-		GEM_BUG_ON(ve->request != rq);
-		ve->request = NULL;
-		spin_unlock(&ve->base.active.lock);
-		i915_request_put(rq);
-
-		rq->engine = engine;
-	}
-
-	/*
-	 * Transfer this request onto the hold queue to prevent it
-	 * being resumbitted to HW (and potentially completed) before we have
-	 * released it. Since we may have already submitted following
-	 * requests, we need to remove those as well.
-	 */
-	GEM_BUG_ON(i915_request_on_hold(rq));
-	GEM_BUG_ON(rq->engine != engine);
-	__execlists_hold(rq);
-	GEM_BUG_ON(list_empty(&engine->active.hold));
-
-unlock:
-	spin_unlock_irq(&engine->active.lock);
-	return rq;
-}
-
-static bool hold_request(const struct i915_request *rq)
-{
-	struct i915_dependency *p;
-	bool result = false;
-
-	/*
-	 * If one of our ancestors is on hold, we must also be on hold,
-	 * otherwise we will bypass it and execute before it.
-	 */
-	rcu_read_lock();
-	for_each_signaler(p, rq) {
-		const struct i915_request *s =
-			container_of(p->signaler, typeof(*s), sched);
-
-		if (s->engine != rq->engine)
-			continue;
-
-		result = i915_request_on_hold(s);
-		if (result)
-			break;
-	}
-	rcu_read_unlock();
-
-	return result;
-}
-
-static void __execlists_unhold(struct i915_request *rq)
-{
-	LIST_HEAD(list);
-
-	do {
-		struct i915_dependency *p;
-
-		RQ_TRACE(rq, "hold release\n");
-
-		GEM_BUG_ON(!i915_request_on_hold(rq));
-		GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
-		i915_request_clear_hold(rq);
-		list_move_tail(&rq->sched.link,
-			       i915_sched_lookup_priolist(rq->engine,
-							  rq_prio(rq)));
-		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
-		/* Also release any children on this engine that are ready */
-		for_each_waiter(p, rq) {
-			struct i915_request *w =
-				container_of(p->waiter, typeof(*w), sched);
-
-			/* Propagate any change in error status */
-			if (rq->fence.error)
-				i915_request_set_error_once(w, rq->fence.error);
-
-			if (w->engine != rq->engine)
-				continue;
-
-			if (!i915_request_on_hold(w))
-				continue;
-
-			/* Check that no other parents are also on hold */
-			if (hold_request(w))
-				continue;
-
-			list_move_tail(&w->sched.link, &list);
-		}
-
-		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
-	} while (rq);
-}
-
-static void execlists_unhold(struct intel_engine_cs *engine,
-			     struct i915_request *rq)
-{
-	spin_lock_irq(&engine->active.lock);
-
-	/*
-	 * Move this request back to the priority queue, and all of its
-	 * children and grandchildren that were suspended along with it.
-	 */
-	__execlists_unhold(rq);
-
-	if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
-		engine->execlists.queue_priority_hint = rq_prio(rq);
-		tasklet_hi_schedule(&engine->execlists.tasklet);
-	}
-
-	spin_unlock_irq(&engine->active.lock);
-}
-
-struct execlists_capture {
-	struct work_struct work;
-	struct i915_request *rq;
-	struct i915_gpu_coredump *error;
-};
-
-static void execlists_capture_work(struct work_struct *work)
-{
-	struct execlists_capture *cap = container_of(work, typeof(*cap), work);
-	const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
-	struct intel_engine_cs *engine = cap->rq->engine;
-	struct intel_gt_coredump *gt = cap->error->gt;
-	struct intel_engine_capture_vma *vma;
-
-	/* Compress all the objects attached to the request, slow! */
-	vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
-	if (vma) {
-		struct i915_vma_compress *compress =
-			i915_vma_capture_prepare(gt);
-
-		intel_engine_coredump_add_vma(gt->engine, vma, compress);
-		i915_vma_capture_finish(gt, compress);
-	}
-
-	gt->simulated = gt->engine->simulated;
-	cap->error->simulated = gt->simulated;
-
-	/* Publish the error state, and announce it to the world */
-	i915_error_state_store(cap->error);
-	i915_gpu_coredump_put(cap->error);
-
-	/* Return this request and all that depend upon it for signaling */
-	execlists_unhold(engine, cap->rq);
-	i915_request_put(cap->rq);
-
-	kfree(cap);
-}
-
-static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
-{
-	const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
-	struct execlists_capture *cap;
-
-	cap = kmalloc(sizeof(*cap), gfp);
-	if (!cap)
-		return NULL;
-
-	cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
-	if (!cap->error)
-		goto err_cap;
-
-	cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
-	if (!cap->error->gt)
-		goto err_gpu;
-
-	cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
-	if (!cap->error->gt->engine)
-		goto err_gt;
-
-	cap->error->gt->engine->hung = true;
-
-	return cap;
-
-err_gt:
-	kfree(cap->error->gt);
-err_gpu:
-	kfree(cap->error);
-err_cap:
-	kfree(cap);
-	return NULL;
-}
-
-static struct i915_request *
-active_context(struct intel_engine_cs *engine, u32 ccid)
-{
-	const struct intel_engine_execlists * const el = &engine->execlists;
-	struct i915_request * const *port, *rq;
-
-	/*
-	 * Use the most recent result from process_csb(), but just in case
-	 * we trigger an error (via interrupt) before the first CS event has
-	 * been written, peek at the next submission.
-	 */
-
-	for (port = el->active; (rq = *port); port++) {
-		if (rq->context->lrc.ccid == ccid) {
-			ENGINE_TRACE(engine,
-				     "ccid found at active:%zd\n",
-				     port - el->active);
-			return rq;
-		}
-	}
-
-	for (port = el->pending; (rq = *port); port++) {
-		if (rq->context->lrc.ccid == ccid) {
-			ENGINE_TRACE(engine,
-				     "ccid found at pending:%zd\n",
-				     port - el->pending);
-			return rq;
-		}
-	}
-
-	ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
-	return NULL;
-}
-
-static u32 active_ccid(struct intel_engine_cs *engine)
-{
-	return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
-}
-
-static void execlists_capture(struct intel_engine_cs *engine)
-{
-	struct execlists_capture *cap;
-
-	if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
-		return;
-
-	/*
-	 * We need to _quickly_ capture the engine state before we reset.
-	 * We are inside an atomic section (softirq) here and we are delaying
-	 * the forced preemption event.
-	 */
-	cap = capture_regs(engine);
-	if (!cap)
-		return;
-
-	spin_lock_irq(&engine->active.lock);
-	cap->rq = active_context(engine, active_ccid(engine));
-	if (cap->rq) {
-		cap->rq = active_request(cap->rq->context->timeline, cap->rq);
-		cap->rq = i915_request_get_rcu(cap->rq);
-	}
-	spin_unlock_irq(&engine->active.lock);
-	if (!cap->rq)
-		goto err_free;
-
-	/*
-	 * Remove the request from the execlists queue, and take ownership
-	 * of the request. We pass it to our worker who will _slowly_ compress
-	 * all the pages the _user_ requested for debugging their batch, after
-	 * which we return it to the queue for signaling.
-	 *
-	 * By removing them from the execlists queue, we also remove the
-	 * requests from being processed by __unwind_incomplete_requests()
-	 * during the intel_engine_reset(), and so they will *not* be replayed
-	 * afterwards.
-	 *
-	 * Note that because we have not yet reset the engine at this point,
-	 * it is possible for the request that we have identified as being
-	 * guilty, did in fact complete and we will then hit an arbitration
-	 * point allowing the outstanding preemption to succeed. The likelihood
-	 * of that is very low (as capturing of the engine registers should be
-	 * fast enough to run inside an irq-off atomic section!), so we will
-	 * simply hold that request accountable for being non-preemptible
-	 * long enough to force the reset.
-	 */
-	if (!execlists_hold(engine, cap->rq))
-		goto err_rq;
-
-	INIT_WORK(&cap->work, execlists_capture_work);
-	schedule_work(&cap->work);
-	return;
-
-err_rq:
-	i915_request_put(cap->rq);
-err_free:
-	i915_gpu_coredump_put(cap->error);
-	kfree(cap);
-}
-
-static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
-{
-	const unsigned int bit = I915_RESET_ENGINE + engine->id;
-	unsigned long *lock = &engine->gt->reset.flags;
-
-	if (!intel_has_reset_engine(engine->gt))
-		return;
-
-	if (test_and_set_bit(bit, lock))
-		return;
-
-	ENGINE_TRACE(engine, "reset for %s\n", msg);
-
-	/* Mark this tasklet as disabled to avoid waiting for it to complete */
-	tasklet_disable_nosync(&engine->execlists.tasklet);
-
-	ring_set_paused(engine, 1); /* Freeze the current request in place */
-	execlists_capture(engine);
-	intel_engine_reset(engine, msg);
-
-	tasklet_enable(&engine->execlists.tasklet);
-	clear_and_wake_up_bit(bit, lock);
-}
-
-static bool preempt_timeout(const struct intel_engine_cs *const engine)
-{
-	const struct timer_list *t = &engine->execlists.preempt;
-
-	if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
-		return false;
-
-	if (!timer_expired(t))
-		return false;
-
-	return READ_ONCE(engine->execlists.pending[0]);
-}
-
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
-{
-	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-	bool timeout = preempt_timeout(engine);
-
-	process_csb(engine);
-
-	if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
-		const char *msg;
-
-		/* Generate the error message in priority wrt to the user! */
-		if (engine->execlists.error_interrupt & GENMASK(15, 0))
-			msg = "CS error"; /* thrown by a user payload */
-		else if (engine->execlists.error_interrupt & ERROR_CSB)
-			msg = "invalid CSB event";
-		else
-			msg = "internal error";
-
-		engine->execlists.error_interrupt = 0;
-		execlists_reset(engine, msg);
-	}
-
-	if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&engine->active.lock, flags);
-		__execlists_submission_tasklet(engine);
-		spin_unlock_irqrestore(&engine->active.lock, flags);
-
-		/* Recheck after serialising with direct-submission */
-		if (unlikely(timeout && preempt_timeout(engine))) {
-			cancel_timer(&engine->execlists.preempt);
-			execlists_reset(engine, "preemption time out");
-		}
-	}
-}
-
-static void __execlists_kick(struct intel_engine_execlists *execlists)
-{
-	/* Kick the tasklet for some interrupt coalescing and reset handling */
-	tasklet_hi_schedule(&execlists->tasklet);
-}
-
-#define execlists_kick(t, member) \
-	__execlists_kick(container_of(t, struct intel_engine_execlists, member))
-
-static void execlists_timeslice(struct timer_list *timer)
-{
-	execlists_kick(timer, timer);
-}
-
-static void execlists_preempt(struct timer_list *timer)
-{
-	execlists_kick(timer, preempt);
-}
-
-static void queue_request(struct intel_engine_cs *engine,
-			  struct i915_request *rq)
-{
-	GEM_BUG_ON(!list_empty(&rq->sched.link));
-	list_add_tail(&rq->sched.link,
-		      i915_sched_lookup_priolist(engine, rq_prio(rq)));
-	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-}
-
-static void __submit_queue_imm(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-
-	if (reset_in_progress(execlists))
-		return; /* defer until we restart the engine following reset */
-
-	__execlists_submission_tasklet(engine);
-}
-
-static void submit_queue(struct intel_engine_cs *engine,
-			 const struct i915_request *rq)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-
-	if (rq_prio(rq) <= execlists->queue_priority_hint)
-		return;
-
-	execlists->queue_priority_hint = rq_prio(rq);
-	__submit_queue_imm(engine);
-}
-
-static bool ancestor_on_hold(const struct intel_engine_cs *engine,
-			     const struct i915_request *rq)
-{
-	GEM_BUG_ON(i915_request_on_hold(rq));
-	return !list_empty(&engine->active.hold) && hold_request(rq);
-}
-
-static void flush_csb(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists *el = &engine->execlists;
-
-	if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
-		if (!reset_in_progress(el))
-			process_csb(engine);
-		tasklet_unlock(&el->tasklet);
-	}
-}
-
-static void execlists_submit_request(struct i915_request *request)
-{
-	struct intel_engine_cs *engine = request->engine;
-	unsigned long flags;
-
-	/* Hopefully we clear execlists->pending[] to let us through */
-	flush_csb(engine);
-
-	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->active.lock, flags);
-
-	if (unlikely(ancestor_on_hold(engine, request))) {
-		RQ_TRACE(request, "ancestor on hold\n");
-		list_add_tail(&request->sched.link, &engine->active.hold);
-		i915_request_set_hold(request);
-	} else {
-		queue_request(engine, request);
-
-		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-		GEM_BUG_ON(list_empty(&request->sched.link));
-
-		submit_queue(engine, request);
-	}
-
-	spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void __execlists_context_fini(struct intel_context *ce)
-{
-	intel_ring_put(ce->ring);
-	i915_vma_put(ce->state);
-}
-
-static void execlists_context_destroy(struct kref *kref)
-{
-	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
-
-	GEM_BUG_ON(!i915_active_is_idle(&ce->active));
-	GEM_BUG_ON(intel_context_is_pinned(ce));
-
-	if (ce->state)
-		__execlists_context_fini(ce);
-
-	intel_context_fini(ce);
-	intel_context_free(ce);
-}
-
-static void
-set_redzone(void *vaddr, const struct intel_engine_cs *engine)
-{
-	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		return;
-
-	vaddr += engine->context_size;
-
-	memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
-}
-
-static void
-check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
-{
-	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		return;
-
-	vaddr += engine->context_size;
-
-	if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
-		drm_err_once(&engine->i915->drm,
-			     "%s context redzone overwritten!\n",
-			     engine->name);
-}
-
-static void execlists_context_unpin(struct intel_context *ce)
-{
-	check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
-		      ce->engine);
-}
-
-static void execlists_context_post_unpin(struct intel_context *ce)
-{
-	i915_gem_object_unpin_map(ce->state->obj);
-}
-
-static u32 *
-gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
-{
-	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
-		MI_SRM_LRM_GLOBAL_GTT |
-		MI_LRI_LRM_CS_MMIO;
-	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
-	*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
-		CTX_TIMESTAMP * sizeof(u32);
-	*cs++ = 0;
-
-	*cs++ = MI_LOAD_REGISTER_REG |
-		MI_LRR_SOURCE_CS_MMIO |
-		MI_LRI_LRM_CS_MMIO;
-	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
-	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
-
-	*cs++ = MI_LOAD_REGISTER_REG |
-		MI_LRR_SOURCE_CS_MMIO |
-		MI_LRI_LRM_CS_MMIO;
-	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
-	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
-
-	return cs;
-}
-
-static u32 *
-gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
-{
-	GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
-
-	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
-		MI_SRM_LRM_GLOBAL_GTT |
-		MI_LRI_LRM_CS_MMIO;
-	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
-	*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
-		(lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
-	*cs++ = 0;
-
-	return cs;
-}
-
-static u32 *
-gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
-{
-	GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
-
-	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
-		MI_SRM_LRM_GLOBAL_GTT |
-		MI_LRI_LRM_CS_MMIO;
-	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
-	*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
-		(lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
-	*cs++ = 0;
-
-	*cs++ = MI_LOAD_REGISTER_REG |
-		MI_LRR_SOURCE_CS_MMIO |
-		MI_LRI_LRM_CS_MMIO;
-	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
-	*cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
-
-	return cs;
-}
-
-static u32 *
-gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
-{
-	cs = gen12_emit_timestamp_wa(ce, cs);
-	cs = gen12_emit_cmd_buf_wa(ce, cs);
-	cs = gen12_emit_restore_scratch(ce, cs);
-
-	return cs;
-}
-
-static u32 *
-gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
-{
-	cs = gen12_emit_timestamp_wa(ce, cs);
-	cs = gen12_emit_restore_scratch(ce, cs);
-
-	return cs;
-}
-
-static inline u32 context_wa_bb_offset(const struct intel_context *ce)
-{
-	return PAGE_SIZE * ce->wa_bb_page;
-}
-
-static u32 *context_indirect_bb(const struct intel_context *ce)
-{
-	void *ptr;
-
-	GEM_BUG_ON(!ce->wa_bb_page);
-
-	ptr = ce->lrc_reg_state;
-	ptr -= LRC_STATE_OFFSET; /* back to start of context image */
-	ptr += context_wa_bb_offset(ce);
-
-	return ptr;
-}
-
-static void
-setup_indirect_ctx_bb(const struct intel_context *ce,
-		      const struct intel_engine_cs *engine,
-		      u32 *(*emit)(const struct intel_context *, u32 *))
-{
-	u32 * const start = context_indirect_bb(ce);
-	u32 *cs;
-
-	cs = emit(ce, start);
-	GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
-	while ((unsigned long)cs % CACHELINE_BYTES)
-		*cs++ = MI_NOOP;
-
-	lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
-				    i915_ggtt_offset(ce->state) +
-				    context_wa_bb_offset(ce),
-				    (cs - start) * sizeof(*cs));
-}
-
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
-			     const struct intel_engine_cs *engine,
-			     u32 head)
-{
-	struct intel_ring *ring = ce->ring;
-	u32 *regs = ce->lrc_reg_state;
-
-	GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
-	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
-	regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
-	regs[CTX_RING_HEAD] = head;
-	regs[CTX_RING_TAIL] = ring->tail;
-	regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
-
-	/* RPCS */
-	if (engine->class == RENDER_CLASS) {
-		regs[CTX_R_PWR_CLK_STATE] =
-			intel_sseu_make_rpcs(engine->gt, &ce->sseu);
-
-		i915_oa_init_reg_state(ce, engine);
-	}
-
-	if (ce->wa_bb_page) {
-		u32 *(*fn)(const struct intel_context *ce, u32 *cs);
-
-		fn = gen12_emit_indirect_ctx_xcs;
-		if (ce->engine->class == RENDER_CLASS)
-			fn = gen12_emit_indirect_ctx_rcs;
-
-		/* Mutually exclusive wrt to global indirect bb */
-		GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
-		setup_indirect_ctx_bb(ce, engine, fn);
-	}
-}
-
-static int
-execlists_context_pre_pin(struct intel_context *ce,
-			  struct i915_gem_ww_ctx *ww, void **vaddr)
-{
-	GEM_BUG_ON(!ce->state);
-	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
-	*vaddr = i915_gem_object_pin_map(ce->state->obj,
-					i915_coherent_map_type(ce->engine->i915) |
-					I915_MAP_OVERRIDE);
-
-	return PTR_ERR_OR_ZERO(*vaddr);
-}
-
-static int
-__execlists_context_pin(struct intel_context *ce,
-			struct intel_engine_cs *engine,
-			void *vaddr)
-{
-	ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
-	ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
-	__execlists_update_reg_state(ce, engine, ce->ring->tail);
-
-	return 0;
-}
-
-static int execlists_context_pin(struct intel_context *ce, void *vaddr)
-{
-	return __execlists_context_pin(ce, ce->engine, vaddr);
-}
-
-static int execlists_context_alloc(struct intel_context *ce)
-{
-	return __execlists_context_alloc(ce, ce->engine);
-}
-
-static void execlists_context_reset(struct intel_context *ce)
-{
-	CE_TRACE(ce, "reset\n");
-	GEM_BUG_ON(!intel_context_is_pinned(ce));
-
-	intel_ring_reset(ce->ring, ce->ring->emit);
-
-	/* Scrub away the garbage */
-	execlists_init_reg_state(ce->lrc_reg_state,
-				 ce, ce->engine, ce->ring, true);
-	__execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
-
-	ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static const struct intel_context_ops execlists_context_ops = {
-	.alloc = execlists_context_alloc,
-
-	.pre_pin = execlists_context_pre_pin,
-	.pin = execlists_context_pin,
-	.unpin = execlists_context_unpin,
-	.post_unpin = execlists_context_post_unpin,
-
-	.enter = intel_context_enter_engine,
-	.exit = intel_context_exit_engine,
-
-	.reset = execlists_context_reset,
-	.destroy = execlists_context_destroy,
-};
-
-static u32 hwsp_offset(const struct i915_request *rq)
-{
-	const struct intel_timeline_cacheline *cl;
-
-	/* Before the request is executed, the timeline/cachline is fixed */
-
-	cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
-	if (cl)
-		return cl->ggtt_offset;
-
-	return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
-}
-
-static int gen8_emit_init_breadcrumb(struct i915_request *rq)
-{
-	u32 *cs;
-
-	GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
-	if (!i915_request_timeline(rq)->has_initial_breadcrumb)
-		return 0;
-
-	cs = intel_ring_begin(rq, 6);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	/*
-	 * Check if we have been preempted before we even get started.
-	 *
-	 * After this point i915_request_started() reports true, even if
-	 * we get preempted and so are no longer running.
-	 */
-	*cs++ = MI_ARB_CHECK;
-	*cs++ = MI_NOOP;
-
-	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-	*cs++ = hwsp_offset(rq);
-	*cs++ = 0;
-	*cs++ = rq->fence.seqno - 1;
-
-	intel_ring_advance(rq, cs);
-
-	/* Record the updated position of the request's payload */
-	rq->infix = intel_ring_offset(rq, cs);
-
-	__set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
-
-	return 0;
-}
-
-static int emit_pdps(struct i915_request *rq)
-{
-	const struct intel_engine_cs * const engine = rq->engine;
-	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
-	int err, i;
-	u32 *cs;
-
-	GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
-
-	/*
-	 * Beware ye of the dragons, this sequence is magic!
-	 *
-	 * Small changes to this sequence can cause anything from
-	 * GPU hangs to forcewake errors and machine lockups!
-	 */
-
-	/* Flush any residual operations from the context load */
-	err = engine->emit_flush(rq, EMIT_FLUSH);
-	if (err)
-		return err;
-
-	/* Magic required to prevent forcewake errors! */
-	err = engine->emit_flush(rq, EMIT_INVALIDATE);
-	if (err)
-		return err;
-
-	cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	/* Ensure the LRI have landed before we invalidate & continue */
-	*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
-	for (i = GEN8_3LVL_PDPES; i--; ) {
-		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-		u32 base = engine->mmio_base;
-
-		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
-		*cs++ = upper_32_bits(pd_daddr);
-		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
-		*cs++ = lower_32_bits(pd_daddr);
-	}
-	*cs++ = MI_NOOP;
-
-	intel_ring_advance(rq, cs);
-
-	return 0;
-}
-
-static int execlists_request_alloc(struct i915_request *request)
-{
-	int ret;
-
-	GEM_BUG_ON(!intel_context_is_pinned(request->context));
-
-	/*
-	 * Flush enough space to reduce the likelihood of waiting after
-	 * we start building the request - in which case we will just
-	 * have to repeat work.
-	 */
-	request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
-	/*
-	 * Note that after this point, we have committed to using
-	 * this request as it is being used to both track the
-	 * state of engine initialisation and liveness of the
-	 * golden renderstate above. Think twice before you try
-	 * to cancel/unwind this request now.
-	 */
-
-	if (!i915_vm_is_4lvl(request->context->vm)) {
-		ret = emit_pdps(request);
-		if (ret)
-			return ret;
-	}
-
-	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
-	if (ret)
-		return ret;
-
-	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
-	return 0;
-}
-
-/*
- * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
- * PIPE_CONTROL instruction. This is required for the flush to happen correctly
- * but there is a slight complication as this is applied in WA batch where the
- * values are only initialized once so we cannot take register value at the
- * beginning and reuse it further; hence we save its value to memory, upload a
- * constant value with bit21 set and then we restore it back with the saved value.
- * To simplify the WA, a constant value is formed by using the default value
- * of this register. This shouldn't be a problem because we are only modifying
- * it for a short period and this batch in non-premptible. We can ofcourse
- * use additional instructions that read the actual value of the register
- * at that time and set our bit of interest but it makes the WA complicated.
- *
- * This WA is also required for Gen9 so extracting as a function avoids
- * code duplication.
- */
-static u32 *
-gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
-{
-	/* NB no one else is allowed to scribble over scratch + 256! */
-	*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
-	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = intel_gt_scratch_offset(engine->gt,
-					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
-	*batch++ = 0;
-
-	*batch++ = MI_LOAD_REGISTER_IMM(1);
-	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
-
-	batch = gen8_emit_pipe_control(batch,
-				       PIPE_CONTROL_CS_STALL |
-				       PIPE_CONTROL_DC_FLUSH_ENABLE,
-				       0);
-
-	*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
-	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = intel_gt_scratch_offset(engine->gt,
-					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
-	*batch++ = 0;
-
-	return batch;
-}
-
-/*
- * Typically we only have one indirect_ctx and per_ctx batch buffer which are
- * initialized at the beginning and shared across all contexts but this field
- * helps us to have multiple batches at different offsets and select them based
- * on a criteria. At the moment this batch always start at the beginning of the page
- * and at this point we don't have multiple wa_ctx batch buffers.
- *
- * The number of WA applied are not known at the beginning; we use this field
- * to return the no of DWORDS written.
- *
- * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
- * so it adds NOOPs as padding to make it cacheline aligned.
- * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
- * makes a complete batch buffer.
- */
-static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
-	/* WaDisableCtxRestoreArbitration:bdw,chv */
-	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
-	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-	if (IS_BROADWELL(engine->i915))
-		batch = gen8_emit_flush_coherentl3_wa(engine, batch);
-
-	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
-	/* Actual scratch location is at 128 bytes offset */
-	batch = gen8_emit_pipe_control(batch,
-				       PIPE_CONTROL_FLUSH_L3 |
-				       PIPE_CONTROL_STORE_DATA_INDEX |
-				       PIPE_CONTROL_CS_STALL |
-				       PIPE_CONTROL_QW_WRITE,
-				       LRC_PPHWSP_SCRATCH_ADDR);
-
-	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
-	/* Pad to end of cacheline */
-	while ((unsigned long)batch % CACHELINE_BYTES)
-		*batch++ = MI_NOOP;
-
-	/*
-	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
-	 * execution depends on the length specified in terms of cache lines
-	 * in the register CTX_RCS_INDIRECT_CTX
-	 */
-
-	return batch;
-}
-
-struct lri {
-	i915_reg_t reg;
-	u32 value;
-};
-
-static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
-{
-	GEM_BUG_ON(!count || count > 63);
-
-	*batch++ = MI_LOAD_REGISTER_IMM(count);
-	do {
-		*batch++ = i915_mmio_reg_offset(lri->reg);
-		*batch++ = lri->value;
-	} while (lri++, --count);
-	*batch++ = MI_NOOP;
-
-	return batch;
-}
-
-static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
-	static const struct lri lri[] = {
-		/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
-		{
-			COMMON_SLICE_CHICKEN2,
-			__MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
-				       0),
-		},
-
-		/* BSpec: 11391 */
-		{
-			FF_SLICE_CHICKEN,
-			__MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
-				       FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
-		},
-
-		/* BSpec: 11299 */
-		{
-			_3D_CHICKEN3,
-			__MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
-				       _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
-		}
-	};
-
-	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
-	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
-	batch = gen8_emit_flush_coherentl3_wa(engine, batch);
-
-	/* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
-	batch = gen8_emit_pipe_control(batch,
-				       PIPE_CONTROL_FLUSH_L3 |
-				       PIPE_CONTROL_STORE_DATA_INDEX |
-				       PIPE_CONTROL_CS_STALL |
-				       PIPE_CONTROL_QW_WRITE,
-				       LRC_PPHWSP_SCRATCH_ADDR);
-
-	batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
-
-	/* WaMediaPoolStateCmdInWABB:bxt,glk */
-	if (HAS_POOLED_EU(engine->i915)) {
-		/*
-		 * EU pool configuration is setup along with golden context
-		 * during context initialization. This value depends on
-		 * device type (2x6 or 3x6) and needs to be updated based
-		 * on which subslice is disabled especially for 2x6
-		 * devices, however it is safe to load default
-		 * configuration of 3x6 device instead of masking off
-		 * corresponding bits because HW ignores bits of a disabled
-		 * subslice and drops down to appropriate config. Please
-		 * see render_state_setup() in i915_gem_render_state.c for
-		 * possible configurations, to avoid duplication they are
-		 * not shown here again.
-		 */
-		*batch++ = GEN9_MEDIA_POOL_STATE;
-		*batch++ = GEN9_MEDIA_POOL_ENABLE;
-		*batch++ = 0x00777000;
-		*batch++ = 0;
-		*batch++ = 0;
-		*batch++ = 0;
-	}
-
-	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
-	/* Pad to end of cacheline */
-	while ((unsigned long)batch % CACHELINE_BYTES)
-		*batch++ = MI_NOOP;
-
-	return batch;
-}
-
-static u32 *
-gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
-	int i;
-
-	/*
-	 * WaPipeControlBefore3DStateSamplePattern: cnl
-	 *
-	 * Ensure the engine is idle prior to programming a
-	 * 3DSTATE_SAMPLE_PATTERN during a context restore.
-	 */
-	batch = gen8_emit_pipe_control(batch,
-				       PIPE_CONTROL_CS_STALL,
-				       0);
-	/*
-	 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
-	 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
-	 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
-	 * confusing. Since gen8_emit_pipe_control() already advances the
-	 * batch by 6 dwords, we advance the other 10 here, completing a
-	 * cacheline. It's not clear if the workaround requires this padding
-	 * before other commands, or if it's just the regular padding we would
-	 * already have for the workaround bb, so leave it here for now.
-	 */
-	for (i = 0; i < 10; i++)
-		*batch++ = MI_NOOP;
-
-	/* Pad to end of cacheline */
-	while ((unsigned long)batch % CACHELINE_BYTES)
-		*batch++ = MI_NOOP;
-
-	return batch;
-}
-
-#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
-
-static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int err;
-
-	obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
-
-	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err;
-	}
-
-	err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
-	if (err)
-		goto err;
-
-	engine->wa_ctx.vma = vma;
-	return 0;
-
-err:
-	i915_gem_object_put(obj);
-	return err;
-}
-
-static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
-{
-	i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
-
-	/* Called on error unwind, clear all flags to prevent further use */
-	memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
-}
-
-typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
-
-static int intel_init_workaround_bb(struct intel_engine_cs *engine)
-{
-	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-	struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
-					    &wa_ctx->per_ctx };
-	wa_bb_func_t wa_bb_fn[2];
-	void *batch, *batch_ptr;
-	unsigned int i;
-	int ret;
-
-	if (engine->class != RENDER_CLASS)
-		return 0;
-
-	switch (INTEL_GEN(engine->i915)) {
-	case 12:
-	case 11:
-		return 0;
-	case 10:
-		wa_bb_fn[0] = gen10_init_indirectctx_bb;
-		wa_bb_fn[1] = NULL;
-		break;
-	case 9:
-		wa_bb_fn[0] = gen9_init_indirectctx_bb;
-		wa_bb_fn[1] = NULL;
-		break;
-	case 8:
-		wa_bb_fn[0] = gen8_init_indirectctx_bb;
-		wa_bb_fn[1] = NULL;
-		break;
-	default:
-		MISSING_CASE(INTEL_GEN(engine->i915));
-		return 0;
-	}
-
-	ret = lrc_setup_wa_ctx(engine);
-	if (ret) {
-		drm_dbg(&engine->i915->drm,
-			"Failed to setup context WA page: %d\n", ret);
-		return ret;
-	}
-
-	batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
-
-	/*
-	 * Emit the two workaround batch buffers, recording the offset from the
-	 * start of the workaround batch buffer object for each and their
-	 * respective sizes.
-	 */
-	batch_ptr = batch;
-	for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
-		wa_bb[i]->offset = batch_ptr - batch;
-		if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
-						  CACHELINE_BYTES))) {
-			ret = -EINVAL;
-			break;
-		}
-		if (wa_bb_fn[i])
-			batch_ptr = wa_bb_fn[i](engine, batch_ptr);
-		wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
-	}
-	GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
-
-	__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
-	__i915_gem_object_release_map(wa_ctx->vma->obj);
-	if (ret)
-		lrc_destroy_wa_ctx(engine);
-
-	return ret;
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	const unsigned int reset_value = execlists->csb_size - 1;
-
-	ring_set_paused(engine, 0);
-
-	/*
-	 * Sometimes Icelake forgets to reset its pointers on a GPU reset.
-	 * Bludgeon them with a mmio update to be sure.
-	 */
-	ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
-		     0xffff << 16 | reset_value << 8 | reset_value);
-	ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
-	/*
-	 * After a reset, the HW starts writing into CSB entry [0]. We
-	 * therefore have to set our HEAD pointer back one entry so that
-	 * the *first* entry we check is entry 0. To complicate this further,
-	 * as we don't wait for the first interrupt after reset, we have to
-	 * fake the HW write to point back to the last entry so that our
-	 * inline comparison of our cached head position against the last HW
-	 * write works even before the first interrupt.
-	 */
-	execlists->csb_head = reset_value;
-	WRITE_ONCE(*execlists->csb_write, reset_value);
-	wmb(); /* Make sure this is visible to HW (paranoia?) */
-
-	/* Check that the GPU does indeed update the CSB entries! */
-	memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
-	invalidate_csb_entries(&execlists->csb_status[0],
-			       &execlists->csb_status[reset_value]);
-
-	/* Once more for luck and our trusty paranoia */
-	ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
-		     0xffff << 16 | reset_value << 8 | reset_value);
-	ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
-	GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
-}
-
-static void execlists_sanitize(struct intel_engine_cs *engine)
-{
-	GEM_BUG_ON(execlists_active(&engine->execlists));
-
-	/*
-	 * Poison residual state on resume, in case the suspend didn't!
-	 *
-	 * We have to assume that across suspend/resume (or other loss
-	 * of control) that the contents of our pinned buffers has been
-	 * lost, replaced by garbage. Since this doesn't always happen,
-	 * let's poison such state so that we more quickly spot when
-	 * we falsely assume it has been preserved.
-	 */
-	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
-
-	reset_csb_pointers(engine);
-
-	/*
-	 * The kernel_context HWSP is stored in the status_page. As above,
-	 * that may be lost on resume/initialisation, and so we need to
-	 * reset the value in the HWSP.
-	 */
-	intel_timeline_reset_seqno(engine->kernel_context->timeline);
-
-	/* And scrub the dirty cachelines for the HWSP */
-	clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
-}
-
-static void enable_error_interrupt(struct intel_engine_cs *engine)
-{
-	u32 status;
-
-	engine->execlists.error_interrupt = 0;
-	ENGINE_WRITE(engine, RING_EMR, ~0u);
-	ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
-
-	status = ENGINE_READ(engine, RING_ESR);
-	if (unlikely(status)) {
-		drm_err(&engine->i915->drm,
-			"engine '%s' resumed still in error: %08x\n",
-			engine->name, status);
-		__intel_gt_reset(engine->gt, engine->mask);
-	}
-
-	/*
-	 * On current gen8+, we have 2 signals to play with
-	 *
-	 * - I915_ERROR_INSTUCTION (bit 0)
-	 *
-	 *    Generate an error if the command parser encounters an invalid
-	 *    instruction
-	 *
-	 *    This is a fatal error.
-	 *
-	 * - CP_PRIV (bit 2)
-	 *
-	 *    Generate an error on privilege violation (where the CP replaces
-	 *    the instruction with a no-op). This also fires for writes into
-	 *    read-only scratch pages.
-	 *
-	 *    This is a non-fatal error, parsing continues.
-	 *
-	 * * there are a few others defined for odd HW that we do not use
-	 *
-	 * Since CP_PRIV fires for cases where we have chosen to ignore the
-	 * error (as the HW is validating and suppressing the mistakes), we
-	 * only unmask the instruction error bit.
-	 */
-	ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
-}
-
-static void enable_execlists(struct intel_engine_cs *engine)
-{
-	u32 mode;
-
-	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
-
-	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
-
-	if (INTEL_GEN(engine->i915) >= 11)
-		mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
-	else
-		mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
-	ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
-
-	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
-
-	ENGINE_WRITE_FW(engine,
-			RING_HWS_PGA,
-			i915_ggtt_offset(engine->status_page.vma));
-	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
-
-	enable_error_interrupt(engine);
-
-	engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-}
-
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
-	bool unexpected = false;
-
-	if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
-		drm_dbg(&engine->i915->drm,
-			"STOP_RING still set in RING_MI_MODE\n");
-		unexpected = true;
-	}
-
-	return unexpected;
-}
-
-static int execlists_resume(struct intel_engine_cs *engine)
-{
-	intel_mocs_init_engine(engine);
-
-	intel_breadcrumbs_reset(engine->breadcrumbs);
-
-	if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
-		struct drm_printer p = drm_debug_printer(__func__);
-
-		intel_engine_dump(engine, &p, NULL);
-	}
-
-	enable_execlists(engine);
-
-	return 0;
-}
-
-static void execlists_reset_prepare(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	unsigned long flags;
-
-	ENGINE_TRACE(engine, "depth<-%d\n",
-		     atomic_read(&execlists->tasklet.count));
-
-	/*
-	 * Prevent request submission to the hardware until we have
-	 * completed the reset in i915_gem_reset_finish(). If a request
-	 * is completed by one engine, it may then queue a request
-	 * to a second via its execlists->tasklet *just* as we are
-	 * calling engine->resume() and also writing the ELSP.
-	 * Turning off the execlists->tasklet until the reset is over
-	 * prevents the race.
-	 */
-	__tasklet_disable_sync_once(&execlists->tasklet);
-	GEM_BUG_ON(!reset_in_progress(execlists));
-
-	/* And flush any current direct submission. */
-	spin_lock_irqsave(&engine->active.lock, flags);
-	spin_unlock_irqrestore(&engine->active.lock, flags);
-
-	/*
-	 * We stop engines, otherwise we might get failed reset and a
-	 * dead gpu (on elk). Also as modern gpu as kbl can suffer
-	 * from system hang if batchbuffer is progressing when
-	 * the reset is issued, regardless of READY_TO_RESET ack.
-	 * Thus assume it is best to stop engines on all gens
-	 * where we have a gpu reset.
-	 *
-	 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
-	 *
-	 * FIXME: Wa for more modern gens needs to be validated
-	 */
-	ring_set_paused(engine, 1);
-	intel_engine_stop_cs(engine);
-
-	engine->execlists.reset_ccid = active_ccid(engine);
-}
-
-static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
-{
-	int x;
-
-	x = lrc_ring_mi_mode(engine);
-	if (x != -1) {
-		regs[x + 1] &= ~STOP_RING;
-		regs[x + 1] |= STOP_RING << 16;
-	}
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
-					const struct intel_engine_cs *engine)
-{
-	u32 *regs = ce->lrc_reg_state;
-
-	__reset_stop_ring(regs, engine);
-}
-
-static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct intel_context *ce;
-	struct i915_request *rq;
-	u32 head;
-
-	mb(); /* paranoia: read the CSB pointers from after the reset */
-	clflush(execlists->csb_write);
-	mb();
-
-	process_csb(engine); /* drain preemption events */
-
-	/* Following the reset, we need to reload the CSB read/write pointers */
-	reset_csb_pointers(engine);
-
-	/*
-	 * Save the currently executing context, even if we completed
-	 * its request, it was still running at the time of the
-	 * reset and will have been clobbered.
-	 */
-	rq = active_context(engine, engine->execlists.reset_ccid);
-	if (!rq)
-		goto unwind;
-
-	ce = rq->context;
-	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
-	if (i915_request_completed(rq)) {
-		/* Idle context; tidy up the ring so we can restart afresh */
-		head = intel_ring_wrap(ce->ring, rq->tail);
-		goto out_replay;
-	}
-
-	/* We still have requests in-flight; the engine should be active */
-	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
-	/* Context has requests still in-flight; it should not be idle! */
-	GEM_BUG_ON(i915_active_is_idle(&ce->active));
-
-	rq = active_request(ce->timeline, rq);
-	head = intel_ring_wrap(ce->ring, rq->head);
-	GEM_BUG_ON(head == ce->ring->tail);
-
-	/*
-	 * If this request hasn't started yet, e.g. it is waiting on a
-	 * semaphore, we need to avoid skipping the request or else we
-	 * break the signaling chain. However, if the context is corrupt
-	 * the request will not restart and we will be stuck with a wedged
-	 * device. It is quite often the case that if we issue a reset
-	 * while the GPU is loading the context image, that the context
-	 * image becomes corrupt.
-	 *
-	 * Otherwise, if we have not started yet, the request should replay
-	 * perfectly and we do not need to flag the result as being erroneous.
-	 */
-	if (!i915_request_started(rq))
-		goto out_replay;
-
-	/*
-	 * If the request was innocent, we leave the request in the ELSP
-	 * and will try to replay it on restarting. The context image may
-	 * have been corrupted by the reset, in which case we may have
-	 * to service a new GPU hang, but more likely we can continue on
-	 * without impact.
-	 *
-	 * If the request was guilty, we presume the context is corrupt
-	 * and have to at least restore the RING register in the context
-	 * image back to the expected values to skip over the guilty request.
-	 */
-	__i915_request_reset(rq, stalled);
-
-	/*
-	 * We want a simple context + ring to execute the breadcrumb update.
-	 * We cannot rely on the context being intact across the GPU hang,
-	 * so clear it and rebuild just what we need for the breadcrumb.
-	 * All pending requests for this context will be zapped, and any
-	 * future request will be after userspace has had the opportunity
-	 * to recreate its own state.
-	 */
-out_replay:
-	ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
-		     head, ce->ring->tail);
-	__execlists_reset_reg_state(ce, engine);
-	__execlists_update_reg_state(ce, engine, head);
-	ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
-
-unwind:
-	/* Push back any incomplete requests for replay after the reset. */
-	cancel_port_requests(execlists);
-	__unwind_incomplete_requests(engine);
-}
-
-static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
-{
-	unsigned long flags;
-
-	ENGINE_TRACE(engine, "\n");
-
-	spin_lock_irqsave(&engine->active.lock, flags);
-
-	__execlists_reset(engine, stalled);
-
-	spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
-	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-
-	/* The driver is wedged; don't process any more events. */
-	WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
-}
-
-static void execlists_reset_cancel(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct i915_request *rq, *rn;
-	struct rb_node *rb;
-	unsigned long flags;
-
-	ENGINE_TRACE(engine, "\n");
-
-	/*
-	 * Before we call engine->cancel_requests(), we should have exclusive
-	 * access to the submission state. This is arranged for us by the
-	 * caller disabling the interrupt generation, the tasklet and other
-	 * threads that may then access the same state, giving us a free hand
-	 * to reset state. However, we still need to let lockdep be aware that
-	 * we know this state may be accessed in hardirq context, so we
-	 * disable the irq around this manipulation and we want to keep
-	 * the spinlock focused on its duties and not accidentally conflate
-	 * coverage to the submission's irq state. (Similarly, although we
-	 * shouldn't need to disable irq around the manipulation of the
-	 * submission's irq state, we also wish to remind ourselves that
-	 * it is irq state.)
-	 */
-	spin_lock_irqsave(&engine->active.lock, flags);
-
-	__execlists_reset(engine, true);
-
-	/* Mark all executing requests as skipped. */
-	list_for_each_entry(rq, &engine->active.requests, sched.link)
-		mark_eio(rq);
-	intel_engine_signal_breadcrumbs(engine);
-
-	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&execlists->queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-		int i;
-
-		priolist_for_each_request_consume(rq, rn, p, i) {
-			mark_eio(rq);
-			__i915_request_submit(rq);
-		}
-
-		rb_erase_cached(&p->node, &execlists->queue);
-		i915_priolist_free(p);
-	}
-
-	/* On-hold requests will be flushed to timeline upon their release */
-	list_for_each_entry(rq, &engine->active.hold, sched.link)
-		mark_eio(rq);
-
-	/* Cancel all attached virtual engines */
-	while ((rb = rb_first_cached(&execlists->virtual))) {
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-
-		rb_erase_cached(rb, &execlists->virtual);
-		RB_CLEAR_NODE(rb);
-
-		spin_lock(&ve->base.active.lock);
-		rq = fetch_and_zero(&ve->request);
-		if (rq) {
-			mark_eio(rq);
-
-			rq->engine = engine;
-			__i915_request_submit(rq);
-			i915_request_put(rq);
-
-			ve->base.execlists.queue_priority_hint = INT_MIN;
-		}
-		spin_unlock(&ve->base.active.lock);
-	}
-
-	/* Remaining _unready_ requests will be nop'ed when submitted */
-
-	execlists->queue_priority_hint = INT_MIN;
-	execlists->queue = RB_ROOT_CACHED;
-
-	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
-	execlists->tasklet.func = nop_submission_tasklet;
-
-	spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void execlists_reset_finish(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-
-	/*
-	 * After a GPU reset, we may have requests to replay. Do so now while
-	 * we still have the forcewake to be sure that the GPU is not allowed
-	 * to sleep before we restart and reload a context.
-	 */
-	GEM_BUG_ON(!reset_in_progress(execlists));
-	if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
-		execlists->tasklet.func(execlists->tasklet.data);
-
-	if (__tasklet_enable(&execlists->tasklet))
-		/* And kick in case we missed a new request submission. */
-		tasklet_hi_schedule(&execlists->tasklet);
-	ENGINE_TRACE(engine, "depth->%d\n",
-		     atomic_read(&execlists->tasklet.count));
-}
-
-static int gen8_emit_bb_start_noarb(struct i915_request *rq,
-				    u64 offset, u32 len,
-				    const unsigned int flags)
-{
-	u32 *cs;
-
-	cs = intel_ring_begin(rq, 4);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	/*
-	 * WaDisableCtxRestoreArbitration:bdw,chv
-	 *
-	 * We don't need to perform MI_ARB_ENABLE as often as we do (in
-	 * particular all the gen that do not need the w/a at all!), if we
-	 * took care to make sure that on every switch into this context
-	 * (both ordinary and for preemption) that arbitrartion was enabled
-	 * we would be fine.  However, for gen8 there is another w/a that
-	 * requires us to not preempt inside GPGPU execution, so we keep
-	 * arbitration disabled for gen8 batches. Arbitration will be
-	 * re-enabled before we close the request
-	 * (engine->emit_fini_breadcrumb).
-	 */
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
-	/* FIXME(BDW+): Address space and security selectors. */
-	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
-		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
-	*cs++ = lower_32_bits(offset);
-	*cs++ = upper_32_bits(offset);
-
-	intel_ring_advance(rq, cs);
-
-	return 0;
-}
-
-static int gen8_emit_bb_start(struct i915_request *rq,
-			      u64 offset, u32 len,
-			      const unsigned int flags)
-{
-	u32 *cs;
-
-	cs = intel_ring_begin(rq, 6);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
-	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
-		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
-	*cs++ = lower_32_bits(offset);
-	*cs++ = upper_32_bits(offset);
-
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-	*cs++ = MI_NOOP;
-
-	intel_ring_advance(rq, cs);
-
-	return 0;
-}
-
-static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
-{
-	ENGINE_WRITE(engine, RING_IMR,
-		     ~(engine->irq_enable_mask | engine->irq_keep_mask));
-	ENGINE_POSTING_READ(engine, RING_IMR);
-}
-
-static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
-{
-	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-}
-
-static int gen8_emit_flush(struct i915_request *request, u32 mode)
-{
-	u32 cmd, *cs;
-
-	cs = intel_ring_begin(request, 4);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	cmd = MI_FLUSH_DW + 1;
-
-	/* We always require a command barrier so that subsequent
-	 * commands, such as breadcrumb interrupts, are strictly ordered
-	 * wrt the contents of the write cache being flushed to memory
-	 * (and thus being coherent from the CPU).
-	 */
-	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
-	if (mode & EMIT_INVALIDATE) {
-		cmd |= MI_INVALIDATE_TLB;
-		if (request->engine->class == VIDEO_DECODE_CLASS)
-			cmd |= MI_INVALIDATE_BSD;
-	}
-
-	*cs++ = cmd;
-	*cs++ = LRC_PPHWSP_SCRATCH_ADDR;
-	*cs++ = 0; /* upper addr */
-	*cs++ = 0; /* value */
-	intel_ring_advance(request, cs);
-
-	return 0;
-}
-
-static int gen8_emit_flush_render(struct i915_request *request,
-				  u32 mode)
-{
-	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 *cs, flags = 0;
-	int len;
-
-	flags |= PIPE_CONTROL_CS_STALL;
-
-	if (mode & EMIT_FLUSH) {
-		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
-		flags |= PIPE_CONTROL_FLUSH_ENABLE;
-	}
-
-	if (mode & EMIT_INVALIDATE) {
-		flags |= PIPE_CONTROL_TLB_INVALIDATE;
-		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_QW_WRITE;
-		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
-		/*
-		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
-		 * pipe control.
-		 */
-		if (IS_GEN(request->engine->i915, 9))
-			vf_flush_wa = true;
-
-		/* WaForGAMHang:kbl */
-		if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
-			dc_flush_wa = true;
-	}
-
-	len = 6;
-
-	if (vf_flush_wa)
-		len += 6;
-
-	if (dc_flush_wa)
-		len += 12;
-
-	cs = intel_ring_begin(request, len);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	if (vf_flush_wa)
-		cs = gen8_emit_pipe_control(cs, 0, 0);
-
-	if (dc_flush_wa)
-		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
-					    0);
-
-	cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
-	if (dc_flush_wa)
-		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
-
-	intel_ring_advance(request, cs);
-
-	return 0;
-}
-
-static int gen11_emit_flush_render(struct i915_request *request,
-				   u32 mode)
-{
-	if (mode & EMIT_FLUSH) {
-		u32 *cs;
-		u32 flags = 0;
-
-		flags |= PIPE_CONTROL_CS_STALL;
-
-		flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
-		flags |= PIPE_CONTROL_FLUSH_ENABLE;
-		flags |= PIPE_CONTROL_QW_WRITE;
-		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
-		cs = intel_ring_begin(request, 6);
-		if (IS_ERR(cs))
-			return PTR_ERR(cs);
-
-		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-		intel_ring_advance(request, cs);
-	}
-
-	if (mode & EMIT_INVALIDATE) {
-		u32 *cs;
-		u32 flags = 0;
-
-		flags |= PIPE_CONTROL_CS_STALL;
-
-		flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_TLB_INVALIDATE;
-		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_QW_WRITE;
-		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
-		cs = intel_ring_begin(request, 6);
-		if (IS_ERR(cs))
-			return PTR_ERR(cs);
-
-		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-		intel_ring_advance(request, cs);
-	}
-
-	return 0;
-}
-
-static u32 preparser_disable(bool state)
-{
-	return MI_ARB_CHECK | 1 << 8 | state;
-}
-
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
-{
-	static const i915_reg_t vd[] = {
-		GEN12_VD0_AUX_NV,
-		GEN12_VD1_AUX_NV,
-		GEN12_VD2_AUX_NV,
-		GEN12_VD3_AUX_NV,
-	};
-
-	static const i915_reg_t ve[] = {
-		GEN12_VE0_AUX_NV,
-		GEN12_VE1_AUX_NV,
-	};
-
-	if (engine->class == VIDEO_DECODE_CLASS)
-		return vd[engine->instance];
-
-	if (engine->class == VIDEO_ENHANCEMENT_CLASS)
-		return ve[engine->instance];
-
-	GEM_BUG_ON("unknown aux_inv_reg\n");
-
-	return INVALID_MMIO_REG;
-}
-
-static u32 *
-gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
-	*cs++ = MI_LOAD_REGISTER_IMM(1);
-	*cs++ = i915_mmio_reg_offset(inv_reg);
-	*cs++ = AUX_INV;
-	*cs++ = MI_NOOP;
-
-	return cs;
-}
-
-static int gen12_emit_flush_render(struct i915_request *request,
-				   u32 mode)
-{
-	if (mode & EMIT_FLUSH) {
-		u32 flags = 0;
-		u32 *cs;
-
-		flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_FLUSH_L3;
-		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-		/* Wa_1409600907:tgl */
-		flags |= PIPE_CONTROL_DEPTH_STALL;
-		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
-		flags |= PIPE_CONTROL_FLUSH_ENABLE;
-
-		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-		flags |= PIPE_CONTROL_QW_WRITE;
-
-		flags |= PIPE_CONTROL_CS_STALL;
-
-		cs = intel_ring_begin(request, 6);
-		if (IS_ERR(cs))
-			return PTR_ERR(cs);
-
-		cs = gen12_emit_pipe_control(cs,
-					     PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
-					     flags, LRC_PPHWSP_SCRATCH_ADDR);
-		intel_ring_advance(request, cs);
-	}
-
-	if (mode & EMIT_INVALIDATE) {
-		u32 flags = 0;
-		u32 *cs;
-
-		flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_TLB_INVALIDATE;
-		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
-		flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-		flags |= PIPE_CONTROL_QW_WRITE;
-
-		flags |= PIPE_CONTROL_CS_STALL;
-
-		cs = intel_ring_begin(request, 8 + 4);
-		if (IS_ERR(cs))
-			return PTR_ERR(cs);
-
-		/*
-		 * Prevent the pre-parser from skipping past the TLB
-		 * invalidate and loading a stale page for the batch
-		 * buffer / request payload.
-		 */
-		*cs++ = preparser_disable(true);
-
-		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
-		/* hsdes: 1809175790 */
-		cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
-
-		*cs++ = preparser_disable(false);
-		intel_ring_advance(request, cs);
-	}
-
-	return 0;
-}
-
-static int gen12_emit_flush(struct i915_request *request, u32 mode)
-{
-	intel_engine_mask_t aux_inv = 0;
-	u32 cmd, *cs;
-
-	cmd = 4;
-	if (mode & EMIT_INVALIDATE)
-		cmd += 2;
-	if (mode & EMIT_INVALIDATE)
-		aux_inv = request->engine->mask & ~BIT(BCS0);
-	if (aux_inv)
-		cmd += 2 * hweight8(aux_inv) + 2;
-
-	cs = intel_ring_begin(request, cmd);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	if (mode & EMIT_INVALIDATE)
-		*cs++ = preparser_disable(true);
-
-	cmd = MI_FLUSH_DW + 1;
-
-	/* We always require a command barrier so that subsequent
-	 * commands, such as breadcrumb interrupts, are strictly ordered
-	 * wrt the contents of the write cache being flushed to memory
-	 * (and thus being coherent from the CPU).
-	 */
-	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
-	if (mode & EMIT_INVALIDATE) {
-		cmd |= MI_INVALIDATE_TLB;
-		if (request->engine->class == VIDEO_DECODE_CLASS)
-			cmd |= MI_INVALIDATE_BSD;
-	}
-
-	*cs++ = cmd;
-	*cs++ = LRC_PPHWSP_SCRATCH_ADDR;
-	*cs++ = 0; /* upper addr */
-	*cs++ = 0; /* value */
-
-	if (aux_inv) { /* hsdes: 1809175790 */
-		struct intel_engine_cs *engine;
-		unsigned int tmp;
-
-		*cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
-		for_each_engine_masked(engine, request->engine->gt,
-				       aux_inv, tmp) {
-			*cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
-			*cs++ = AUX_INV;
-		}
-		*cs++ = MI_NOOP;
-	}
-
-	if (mode & EMIT_INVALIDATE)
-		*cs++ = preparser_disable(false);
-
-	intel_ring_advance(request, cs);
-
-	return 0;
-}
-
-static void assert_request_valid(struct i915_request *rq)
-{
-	struct intel_ring *ring __maybe_unused = rq->ring;
-
-	/* Can we unwind this request without appearing to go forwards? */
-	GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
-}
-
-/*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
-static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
-{
-	/* Ensure there's always at least one preemption point per-request. */
-	*cs++ = MI_ARB_CHECK;
-	*cs++ = MI_NOOP;
-	request->wa_tail = intel_ring_offset(request, cs);
-
-	/* Check that entire request is less than half the ring */
-	assert_request_valid(request);
-
-	return cs;
-}
-
-static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
-	*cs++ = MI_SEMAPHORE_WAIT |
-		MI_SEMAPHORE_GLOBAL_GTT |
-		MI_SEMAPHORE_POLL |
-		MI_SEMAPHORE_SAD_EQ_SDD;
-	*cs++ = 0;
-	*cs++ = intel_hws_preempt_address(request->engine);
-	*cs++ = 0;
-
-	return cs;
-}
-
-static __always_inline u32*
-gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
-	*cs++ = MI_USER_INTERRUPT;
-
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-	if (intel_engine_has_semaphores(request->engine))
-		cs = emit_preempt_busywait(request, cs);
+	REG16(0x610),
+	REG16(0x614),
+	REG16(0x618),
+	REG16(0x61c),
+	REG16(0x620),
+	REG16(0x624),
+	REG16(0x628),
+	REG16(0x62c),
+	REG16(0x630),
+	REG16(0x634),
+	REG16(0x638),
+	REG16(0x63c),
+	REG16(0x640),
+	REG16(0x644),
+	REG16(0x648),
+	REG16(0x64c),
+	REG16(0x650),
+	REG16(0x654),
+	REG16(0x658),
+	REG16(0x65c),
+	REG16(0x660),
+	REG16(0x664),
+	REG16(0x668),
+	REG16(0x66c),
+	REG16(0x670),
+	REG16(0x674),
+	REG16(0x678),
+	REG16(0x67c),
+	REG(0x68),
 
-	request->tail = intel_ring_offset(request, cs);
-	assert_ring_tail_valid(request->ring, request->tail);
+	END
+};
 
-	return gen8_emit_wa_tail(request, cs);
-}
+static const u8 gen11_rcs_offsets[] = {
+	NOP(1),
+	LRI(15, POSTED),
+	REG16(0x244),
+	REG(0x034),
+	REG(0x030),
+	REG(0x038),
+	REG(0x03c),
+	REG(0x168),
+	REG(0x140),
+	REG(0x110),
+	REG(0x11c),
+	REG(0x114),
+	REG(0x118),
+	REG(0x1c0),
+	REG(0x1c4),
+	REG(0x1c8),
+	REG(0x180),
 
-static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
-{
-	return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
-}
+	NOP(1),
+	LRI(9, POSTED),
+	REG16(0x3a8),
+	REG16(0x28c),
+	REG16(0x288),
+	REG16(0x284),
+	REG16(0x280),
+	REG16(0x27c),
+	REG16(0x278),
+	REG16(0x274),
+	REG16(0x270),
 
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
-	return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
-}
+	LRI(1, POSTED),
+	REG(0x1b0),
 
-static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
-	cs = gen8_emit_pipe_control(cs,
-				    PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
-				    PIPE_CONTROL_DEPTH_CACHE_FLUSH |
-				    PIPE_CONTROL_DC_FLUSH_ENABLE,
-				    0);
-
-	/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
-	cs = gen8_emit_ggtt_write_rcs(cs,
-				      request->fence.seqno,
-				      hwsp_offset(request),
-				      PIPE_CONTROL_FLUSH_ENABLE |
-				      PIPE_CONTROL_CS_STALL);
-
-	return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
+	NOP(10),
+	LRI(1, 0),
+	REG(0x0c8),
 
-static u32 *
-gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
-	cs = gen8_emit_ggtt_write_rcs(cs,
-				      request->fence.seqno,
-				      hwsp_offset(request),
-				      PIPE_CONTROL_CS_STALL |
-				      PIPE_CONTROL_TILE_CACHE_FLUSH |
-				      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
-				      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
-				      PIPE_CONTROL_DC_FLUSH_ENABLE |
-				      PIPE_CONTROL_FLUSH_ENABLE);
-
-	return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
+	END
+};
 
-/*
- * Note that the CS instruction pre-parser will not stall on the breadcrumb
- * flush and will continue pre-fetching the instructions after it before the
- * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
- * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
- * of the next request before the memory has been flushed, we're guaranteed that
- * we won't access the batch itself too early.
- * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
- * so, if the current request is modifying an instruction in the next request on
- * the same intel_context, we might pre-fetch and then execute the pre-update
- * instruction. To avoid this, the users of self-modifying code should either
- * disable the parser around the code emitting the memory writes, via a new flag
- * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
- * the in-kernel use-cases we've opted to use a separate context, see
- * reloc_gpu() as an example.
- * All the above applies only to the instructions themselves. Non-inline data
- * used by the instructions is not pre-fetched.
- */
+static const u8 gen12_rcs_offsets[] = {
+	NOP(1),
+	LRI(13, POSTED),
+	REG16(0x244),
+	REG(0x034),
+	REG(0x030),
+	REG(0x038),
+	REG(0x03c),
+	REG(0x168),
+	REG(0x140),
+	REG(0x110),
+	REG(0x1c0),
+	REG(0x1c4),
+	REG(0x1c8),
+	REG(0x180),
+	REG16(0x2b4),
 
-static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
-	*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
-		MI_SEMAPHORE_GLOBAL_GTT |
-		MI_SEMAPHORE_POLL |
-		MI_SEMAPHORE_SAD_EQ_SDD;
-	*cs++ = 0;
-	*cs++ = intel_hws_preempt_address(request->engine);
-	*cs++ = 0;
-	*cs++ = 0;
-	*cs++ = MI_NOOP;
+	NOP(5),
+	LRI(9, POSTED),
+	REG16(0x3a8),
+	REG16(0x28c),
+	REG16(0x288),
+	REG16(0x284),
+	REG16(0x280),
+	REG16(0x27c),
+	REG16(0x278),
+	REG16(0x274),
+	REG16(0x270),
 
-	return cs;
-}
+	LRI(3, POSTED),
+	REG(0x1b0),
+	REG16(0x5a8),
+	REG16(0x5ac),
 
-static __always_inline u32*
-gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
-	*cs++ = MI_USER_INTERRUPT;
+	NOP(6),
+	LRI(1, 0),
+	REG(0x0c8),
+	NOP(3 + 9 + 1),
 
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-	if (intel_engine_has_semaphores(request->engine))
-		cs = gen12_emit_preempt_busywait(request, cs);
+	LRI(51, POSTED),
+	REG16(0x588),
+	REG16(0x588),
+	REG16(0x588),
+	REG16(0x588),
+	REG16(0x588),
+	REG16(0x588),
+	REG(0x028),
+	REG(0x09c),
+	REG(0x0c0),
+	REG(0x178),
+	REG(0x17c),
+	REG16(0x358),
+	REG(0x170),
+	REG(0x150),
+	REG(0x154),
+	REG(0x158),
+	REG16(0x41c),
+	REG16(0x600),
+	REG16(0x604),
+	REG16(0x608),
+	REG16(0x60c),
+	REG16(0x610),
+	REG16(0x614),
+	REG16(0x618),
+	REG16(0x61c),
+	REG16(0x620),
+	REG16(0x624),
+	REG16(0x628),
+	REG16(0x62c),
+	REG16(0x630),
+	REG16(0x634),
+	REG16(0x638),
+	REG16(0x63c),
+	REG16(0x640),
+	REG16(0x644),
+	REG16(0x648),
+	REG16(0x64c),
+	REG16(0x650),
+	REG16(0x654),
+	REG16(0x658),
+	REG16(0x65c),
+	REG16(0x660),
+	REG16(0x664),
+	REG16(0x668),
+	REG16(0x66c),
+	REG16(0x670),
+	REG16(0x674),
+	REG16(0x678),
+	REG16(0x67c),
+	REG(0x068),
+	REG(0x084),
+	NOP(1),
 
-	request->tail = intel_ring_offset(request, cs);
-	assert_ring_tail_valid(request->ring, request->tail);
+	END
+};
 
-	return gen8_emit_wa_tail(request, cs);
-}
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
 
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
+static const u8 *reg_offsets(const struct intel_engine_cs *engine)
 {
-	/* XXX Stalling flush before seqno write; post-sync not */
-	cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
-	return gen12_emit_fini_breadcrumb_tail(rq, cs);
-}
+	/*
+	 * The gen12+ lists only have the registers we program in the basic
+	 * default state. We rely on the context image using relative
+	 * addressing to automatic fixup the register state between the
+	 * physical engines for virtual engine.
+	 */
+	GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+		   !intel_engine_has_relative_mmio(engine));
 
-static u32 *
-gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
-	cs = gen12_emit_ggtt_write_rcs(cs,
-				       request->fence.seqno,
-				       hwsp_offset(request),
-				       PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
-				       PIPE_CONTROL_CS_STALL |
-				       PIPE_CONTROL_TILE_CACHE_FLUSH |
-				       PIPE_CONTROL_FLUSH_L3 |
-				       PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
-				       PIPE_CONTROL_DEPTH_CACHE_FLUSH |
-				       /* Wa_1409600907:tgl */
-				       PIPE_CONTROL_DEPTH_STALL |
-				       PIPE_CONTROL_DC_FLUSH_ENABLE |
-				       PIPE_CONTROL_FLUSH_ENABLE);
-
-	return gen12_emit_fini_breadcrumb_tail(request, cs);
+	if (engine->class == RENDER_CLASS) {
+		if (INTEL_GEN(engine->i915) >= 12)
+			return gen12_rcs_offsets;
+		else if (INTEL_GEN(engine->i915) >= 11)
+			return gen11_rcs_offsets;
+		else if (INTEL_GEN(engine->i915) >= 9)
+			return gen9_rcs_offsets;
+		else
+			return gen8_rcs_offsets;
+	} else {
+		if (INTEL_GEN(engine->i915) >= 12)
+			return gen12_xcs_offsets;
+		else if (INTEL_GEN(engine->i915) >= 9)
+			return gen9_xcs_offsets;
+		else
+			return gen8_xcs_offsets;
+	}
 }
 
-static void execlists_park(struct intel_engine_cs *engine)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
 {
-	cancel_timer(&engine->execlists.timer);
-	cancel_timer(&engine->execlists.preempt);
+	if (INTEL_GEN(engine->i915) >= 12)
+		return 0x60;
+	else if (INTEL_GEN(engine->i915) >= 9)
+		return 0x54;
+	else if (engine->class == RENDER_CLASS)
+		return 0x58;
+	else
+		return -1;
 }
 
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
 {
-	engine->submit_request = execlists_submit_request;
-	engine->schedule = i915_schedule;
-	engine->execlists.tasklet.func = execlists_submission_tasklet;
-
-	engine->reset.prepare = execlists_reset_prepare;
-	engine->reset.rewind = execlists_reset_rewind;
-	engine->reset.cancel = execlists_reset_cancel;
-	engine->reset.finish = execlists_reset_finish;
-
-	engine->park = execlists_park;
-	engine->unpark = NULL;
-
-	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
-	if (!intel_vgpu_active(engine->i915)) {
-		engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
-		if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
-			engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-			if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-				engine->flags |= I915_ENGINE_HAS_TIMESLICES;
-		}
-	}
-
 	if (INTEL_GEN(engine->i915) >= 12)
-		engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
-
-	if (intel_engine_has_preemption(engine))
-		engine->emit_bb_start = gen8_emit_bb_start;
+		return 0x74;
+	else if (INTEL_GEN(engine->i915) >= 9)
+		return 0x68;
+	else if (engine->class == RENDER_CLASS)
+		return 0xd8;
 	else
-		engine->emit_bb_start = gen8_emit_bb_start_noarb;
+		return -1;
 }
 
-static void execlists_shutdown(struct intel_engine_cs *engine)
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
 {
-	/* Synchronise with residual timers and any softirq they raise */
-	del_timer_sync(&engine->execlists.timer);
-	del_timer_sync(&engine->execlists.preempt);
-	tasklet_kill(&engine->execlists.tasklet);
+	if (INTEL_GEN(engine->i915) >= 12)
+		return 0x12;
+	else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+		return 0x18;
+	else
+		return -1;
 }
 
-static void execlists_release(struct intel_engine_cs *engine)
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
 {
-	engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+	int x;
 
-	execlists_shutdown(engine);
+	x = lrc_ring_wa_bb_per_ctx(engine);
+	if (x < 0)
+		return x;
 
-	intel_engine_cleanup_common(engine);
-	lrc_destroy_wa_ctx(engine);
+	return x + 2;
 }
 
-static void
-logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
 {
-	/* Default vfuncs which can be overriden by each engine. */
-
-	engine->resume = execlists_resume;
-
-	engine->cops = &execlists_context_ops;
-	engine->request_alloc = execlists_request_alloc;
-
-	engine->emit_flush = gen8_emit_flush;
-	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
-	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
-	if (INTEL_GEN(engine->i915) >= 12) {
-		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
-		engine->emit_flush = gen12_emit_flush;
-	}
-	engine->set_default_submission = intel_execlists_set_default_submission;
+	int x;
 
-	if (INTEL_GEN(engine->i915) < 11) {
-		engine->irq_enable = gen8_logical_ring_enable_irq;
-		engine->irq_disable = gen8_logical_ring_disable_irq;
-	} else {
-		/*
-		 * TODO: On Gen11 interrupt masks need to be clear
-		 * to allow C6 entry. Keep interrupts enabled at
-		 * and take the hit of generating extra interrupts
-		 * until a more refined solution exists.
-		 */
-	}
+	x = lrc_ring_indirect_ptr(engine);
+	if (x < 0)
+		return x;
+
+	return x + 2;
 }
 
-static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine)
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
 {
-	unsigned int shift = 0;
-
-	if (INTEL_GEN(engine->i915) < 11) {
-		const u8 irq_shifts[] = {
-			[RCS0]  = GEN8_RCS_IRQ_SHIFT,
-			[BCS0]  = GEN8_BCS_IRQ_SHIFT,
-			[VCS0]  = GEN8_VCS0_IRQ_SHIFT,
-			[VCS1]  = GEN8_VCS1_IRQ_SHIFT,
-			[VECS0] = GEN8_VECS_IRQ_SHIFT,
-		};
-
-		shift = irq_shifts[engine->id];
-	}
+	if (engine->class != RENDER_CLASS)
+		return -1;
 
-	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
-	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
-	engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
-	engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+	if (INTEL_GEN(engine->i915) >= 12)
+		return 0xb6;
+	else if (INTEL_GEN(engine->i915) >= 11)
+		return 0xaa;
+	else
+		return -1;
 }
 
-static void rcs_submission_override(struct intel_engine_cs *engine)
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
 {
 	switch (INTEL_GEN(engine->i915)) {
+	default:
+		MISSING_CASE(INTEL_GEN(engine->i915));
+		fallthrough;
 	case 12:
-		engine->emit_flush = gen12_emit_flush_render;
-		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
-		break;
+		return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
 	case 11:
-		engine->emit_flush = gen11_emit_flush_render;
-		engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
-		break;
-	default:
-		engine->emit_flush = gen8_emit_flush_render;
-		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
-		break;
+		return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+	case 10:
+		return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+	case 9:
+		return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+	case 8:
+		return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
 	}
 }
 
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+static void
+lrc_setup_indirect_ctx(u32 *regs,
+		       const struct intel_engine_cs *engine,
+		       u32 ctx_bb_ggtt_addr,
+		       u32 size)
 {
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct drm_i915_private *i915 = engine->i915;
-	struct intel_uncore *uncore = engine->uncore;
-	u32 base = engine->mmio_base;
-
-	tasklet_init(&engine->execlists.tasklet,
-		     execlists_submission_tasklet, (unsigned long)engine);
-	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
-	timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
-
-	logical_ring_default_vfuncs(engine);
-	logical_ring_default_irqs(engine);
-
-	if (engine->class == RENDER_CLASS)
-		rcs_submission_override(engine);
-
-	if (intel_init_workaround_bb(engine))
-		/*
-		 * We continue even if we fail to initialize WA batch
-		 * because we only expect rare glitches but nothing
-		 * critical to prevent us from using GPU
-		 */
-		drm_err(&i915->drm, "WA batch buffer initialization failed\n");
-
-	if (HAS_LOGICAL_RING_ELSQ(i915)) {
-		execlists->submit_reg = uncore->regs +
-			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
-		execlists->ctrl_reg = uncore->regs +
-			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
-	} else {
-		execlists->submit_reg = uncore->regs +
-			i915_mmio_reg_offset(RING_ELSP(base));
-	}
-
-	execlists->csb_status =
-		(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
-
-	execlists->csb_write =
-		&engine->status_page.addr[intel_hws_csb_write_index(i915)];
-
-	if (INTEL_GEN(i915) < 11)
-		execlists->csb_size = GEN8_CSB_ENTRIES;
-	else
-		execlists->csb_size = GEN11_CSB_ENTRIES;
-
-	if (INTEL_GEN(engine->i915) >= 11) {
-		execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
-		execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
-	}
-
-	/* Finally, take ownership and responsibility for cleanup! */
-	engine->sanitize = execlists_sanitize;
-	engine->release = execlists_release;
+	GEM_BUG_ON(!size);
+	GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+	GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+	regs[lrc_ring_indirect_ptr(engine) + 1] =
+		ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
 
-	return 0;
+	GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+	regs[lrc_ring_indirect_offset(engine) + 1] =
+		lrc_ring_indirect_offset_default(engine) << 6;
 }
 
-static void init_common_reg_state(u32 * const regs,
-				  const struct intel_engine_cs *engine,
-				  const struct intel_ring *ring,
-				  bool inhibit)
+static void init_common_regs(u32 * const regs,
+			     const struct intel_context *ce,
+			     const struct intel_engine_cs *engine,
+			     bool inhibit)
 {
 	u32 ctl;
 
@@ -5288,12 +640,11 @@ static void init_common_reg_state(u32 * const regs,
 					   CTX_CTRL_RS_CTX_ENABLE);
 	regs[CTX_CONTEXT_CONTROL] = ctl;
 
-	regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
-	regs[CTX_TIMESTAMP] = 0;
+	regs[CTX_TIMESTAMP] = ce->runtime.last;
 }
 
-static void init_wa_bb_reg_state(u32 * const regs,
-				 const struct intel_engine_cs *engine)
+static void init_wa_bb_regs(u32 * const regs,
+			    const struct intel_engine_cs *engine)
 {
 	const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
 
@@ -5306,14 +657,14 @@ static void init_wa_bb_reg_state(u32 * const regs,
 	}
 
 	if (wa_ctx->indirect_ctx.size) {
-		lrc_ring_setup_indirect_ctx(regs, engine,
-					    i915_ggtt_offset(wa_ctx->vma) +
-					    wa_ctx->indirect_ctx.offset,
-					    wa_ctx->indirect_ctx.size);
+		lrc_setup_indirect_ctx(regs, engine,
+				       i915_ggtt_offset(wa_ctx->vma) +
+				       wa_ctx->indirect_ctx.offset,
+				       wa_ctx->indirect_ctx.size);
 	}
 }
 
-static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
+static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
 {
 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 		/* 64b PPGTT (48bit canonical)
@@ -5337,11 +688,21 @@ static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
 		return i915_vm_to_ppgtt(vm);
 }
 
-static void execlists_init_reg_state(u32 *regs,
-				     const struct intel_context *ce,
-				     const struct intel_engine_cs *engine,
-				     const struct intel_ring *ring,
-				     bool inhibit)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
+{
+	int x;
+
+	x = lrc_ring_mi_mode(engine);
+	if (x != -1) {
+		regs[x + 1] &= ~STOP_RING;
+		regs[x + 1] |= STOP_RING << 16;
+	}
+}
+
+static void __lrc_init_regs(u32 *regs,
+			    const struct intel_context *ce,
+			    const struct intel_engine_cs *engine,
+			    bool inhibit)
 {
 	/*
 	 * A context is actually a big batch buffer with several
@@ -5353,73 +714,90 @@ static void execlists_init_reg_state(u32 *regs,
 	 *
 	 * Must keep consistent with virtual_update_register_offsets().
 	 */
+
+	if (inhibit)
+		memset(regs, 0, PAGE_SIZE);
+
 	set_offsets(regs, reg_offsets(engine), engine, inhibit);
 
-	init_common_reg_state(regs, engine, ring, inhibit);
-	init_ppgtt_reg_state(regs, vm_alias(ce->vm));
+	init_common_regs(regs, ce, engine, inhibit);
+	init_ppgtt_regs(regs, vm_alias(ce->vm));
 
-	init_wa_bb_reg_state(regs, engine);
+	init_wa_bb_regs(regs, engine);
 
 	__reset_stop_ring(regs, engine);
 }
 
-static int
-populate_lr_context(struct intel_context *ce,
-		    struct drm_i915_gem_object *ctx_obj,
+void lrc_init_regs(const struct intel_context *ce,
+		   const struct intel_engine_cs *engine,
+		   bool inhibit)
+{
+	__lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
+}
+
+void lrc_reset_regs(const struct intel_context *ce,
+		    const struct intel_engine_cs *engine)
+{
+	__reset_stop_ring(ce->lrc_reg_state, engine);
+}
+
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
+{
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return;
+
+	vaddr += engine->context_size;
+
+	memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
+}
+
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
+{
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return;
+
+	vaddr += engine->context_size;
+
+	if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+		drm_err_once(&engine->i915->drm,
+			     "%s context redzone overwritten!\n",
+			     engine->name);
+}
+
+void lrc_init_state(struct intel_context *ce,
 		    struct intel_engine_cs *engine,
-		    struct intel_ring *ring)
+		    void *state)
 {
 	bool inhibit = true;
-	void *vaddr;
 
-	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
-		return PTR_ERR(vaddr);
-	}
-
-	set_redzone(vaddr, engine);
+	set_redzone(state, engine);
 
 	if (engine->default_state) {
 		shmem_read(engine->default_state, 0,
-			   vaddr, engine->context_size);
+			   state, engine->context_size);
 		__set_bit(CONTEXT_VALID_BIT, &ce->flags);
 		inhibit = false;
 	}
 
 	/* Clear the ppHWSP (inc. per-context counters) */
-	memset(vaddr, 0, PAGE_SIZE);
+	memset(state, 0, PAGE_SIZE);
 
 	/*
 	 * The second page of the context object contains some registers which
 	 * must be set up prior to the first execution.
 	 */
-	execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
-				 ce, engine, ring, inhibit);
-
-	__i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
-	i915_gem_object_unpin_map(ctx_obj);
-	return 0;
+	__lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
 }
 
-static struct intel_timeline *pinned_timeline(struct intel_context *ce)
+static struct i915_vma *
+__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
 {
-	struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-
-	return intel_timeline_create_from_engine(ce->engine,
-						 page_unmask_bits(tl));
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
-				     struct intel_engine_cs *engine)
-{
-	struct drm_i915_gem_object *ctx_obj;
-	struct intel_ring *ring;
+	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 	u32 context_size;
-	int ret;
 
-	GEM_BUG_ON(ce->state);
 	context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
 
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -5430,14 +808,43 @@ static int __execlists_context_alloc(struct intel_context *ce,
 		context_size += PAGE_SIZE;
 	}
 
-	ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
-	if (IS_ERR(ctx_obj))
-		return PTR_ERR(ctx_obj);
+	obj = i915_gem_object_create_shmem(engine->i915, context_size);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
 
-	vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
+	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		goto error_deref_obj;
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	return vma;
+}
+
+static struct intel_timeline *
+pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
+{
+	struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
+
+	return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
+}
+
+int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
+{
+	struct intel_ring *ring;
+	struct i915_vma *vma;
+	int err;
+
+	GEM_BUG_ON(ce->state);
+
+	vma = __lrc_alloc_state(ce, engine);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+	if (IS_ERR(ring)) {
+		err = PTR_ERR(ring);
+		goto err_vma;
 	}
 
 	if (!page_mask_bits(ce->timeline)) {
@@ -5448,704 +855,712 @@ static int __execlists_context_alloc(struct intel_context *ce,
 		 * a dynamically allocated cacheline for everyone else.
 		 */
 		if (unlikely(ce->timeline))
-			tl = pinned_timeline(ce);
+			tl = pinned_timeline(ce, engine);
 		else
 			tl = intel_timeline_create(engine->gt);
 		if (IS_ERR(tl)) {
-			ret = PTR_ERR(tl);
-			goto error_deref_obj;
+			err = PTR_ERR(tl);
+			goto err_ring;
 		}
 
-		ce->timeline = tl;
-	}
+		ce->timeline = tl;
+	}
+
+	ce->ring = ring;
+	ce->state = vma;
+
+	return 0;
+
+err_ring:
+	intel_ring_put(ring);
+err_vma:
+	i915_vma_put(vma);
+	return err;
+}
+
+void lrc_reset(struct intel_context *ce)
+{
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+
+	intel_ring_reset(ce->ring, ce->ring->emit);
+
+	/* Scrub away the garbage */
+	lrc_init_regs(ce, ce->engine, true);
+	ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
+}
+
+int
+lrc_pre_pin(struct intel_context *ce,
+	    struct intel_engine_cs *engine,
+	    struct i915_gem_ww_ctx *ww,
+	    void **vaddr)
+{
+	GEM_BUG_ON(!ce->state);
+	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+	*vaddr = i915_gem_object_pin_map(ce->state->obj,
+					 i915_coherent_map_type(ce->engine->i915) |
+					 I915_MAP_OVERRIDE);
+
+	return PTR_ERR_OR_ZERO(*vaddr);
+}
+
+int
+lrc_pin(struct intel_context *ce,
+	struct intel_engine_cs *engine,
+	void *vaddr)
+{
+	ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
+
+	if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+		lrc_init_state(ce, engine, vaddr);
+
+	ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
+	return 0;
+}
+
+void lrc_unpin(struct intel_context *ce)
+{
+	check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
+		      ce->engine);
+}
+
+void lrc_post_unpin(struct intel_context *ce)
+{
+	i915_gem_object_unpin_map(ce->state->obj);
+}
+
+void lrc_fini(struct intel_context *ce)
+{
+	if (!ce->state)
+		return;
+
+	intel_ring_put(fetch_and_zero(&ce->ring));
+	i915_vma_put(fetch_and_zero(&ce->state));
+}
+
+void lrc_destroy(struct kref *kref)
+{
+	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+	GEM_BUG_ON(!i915_active_is_idle(&ce->active));
+	GEM_BUG_ON(intel_context_is_pinned(ce));
+
+	lrc_fini(ce);
+
+	intel_context_fini(ce);
+	intel_context_free(ce);
+}
+
+static u32 *
+gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
+{
+	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+		MI_SRM_LRM_GLOBAL_GTT |
+		MI_LRI_LRM_CS_MMIO;
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+	*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+		CTX_TIMESTAMP * sizeof(u32);
+	*cs++ = 0;
+
+	*cs++ = MI_LOAD_REGISTER_REG |
+		MI_LRR_SOURCE_CS_MMIO |
+		MI_LRI_LRM_CS_MMIO;
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+	*cs++ = MI_LOAD_REGISTER_REG |
+		MI_LRR_SOURCE_CS_MMIO |
+		MI_LRI_LRM_CS_MMIO;
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+	return cs;
+}
+
+static u32 *
+gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
+{
+	GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
+
+	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+		MI_SRM_LRM_GLOBAL_GTT |
+		MI_LRI_LRM_CS_MMIO;
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+	*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+		(lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
+	*cs++ = 0;
+
+	return cs;
+}
+
+static u32 *
+gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
+{
+	GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
+
+	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+		MI_SRM_LRM_GLOBAL_GTT |
+		MI_LRI_LRM_CS_MMIO;
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+	*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+		(lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
+	*cs++ = 0;
+
+	*cs++ = MI_LOAD_REGISTER_REG |
+		MI_LRR_SOURCE_CS_MMIO |
+		MI_LRI_LRM_CS_MMIO;
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+	*cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
 
-	ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
-	if (IS_ERR(ring)) {
-		ret = PTR_ERR(ring);
-		goto error_deref_obj;
-	}
+	return cs;
+}
 
-	ret = populate_lr_context(ce, ctx_obj, engine, ring);
-	if (ret) {
-		drm_dbg(&engine->i915->drm,
-			"Failed to populate LRC: %d\n", ret);
-		goto error_ring_free;
-	}
+static u32 *
+gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+{
+	cs = gen12_emit_timestamp_wa(ce, cs);
+	cs = gen12_emit_cmd_buf_wa(ce, cs);
+	cs = gen12_emit_restore_scratch(ce, cs);
 
-	ce->ring = ring;
-	ce->state = vma;
+	return cs;
+}
 
-	return 0;
+static u32 *
+gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+{
+	cs = gen12_emit_timestamp_wa(ce, cs);
+	cs = gen12_emit_restore_scratch(ce, cs);
 
-error_ring_free:
-	intel_ring_put(ring);
-error_deref_obj:
-	i915_gem_object_put(ctx_obj);
-	return ret;
+	return cs;
 }
 
-static struct list_head *virtual_queue(struct virtual_engine *ve)
+static u32 context_wa_bb_offset(const struct intel_context *ce)
 {
-	return &ve->base.execlists.default_priolist.requests[0];
+	return PAGE_SIZE * ce->wa_bb_page;
 }
 
-static void rcu_virtual_context_destroy(struct work_struct *wrk)
+static u32 *context_indirect_bb(const struct intel_context *ce)
 {
-	struct virtual_engine *ve =
-		container_of(wrk, typeof(*ve), rcu.work);
-	unsigned int n;
+	void *ptr;
 
-	GEM_BUG_ON(ve->context.inflight);
+	GEM_BUG_ON(!ce->wa_bb_page);
 
-	/* Preempt-to-busy may leave a stale request behind. */
-	if (unlikely(ve->request)) {
-		struct i915_request *old;
+	ptr = ce->lrc_reg_state;
+	ptr -= LRC_STATE_OFFSET; /* back to start of context image */
+	ptr += context_wa_bb_offset(ce);
 
-		spin_lock_irq(&ve->base.active.lock);
+	return ptr;
+}
 
-		old = fetch_and_zero(&ve->request);
-		if (old) {
-			GEM_BUG_ON(!i915_request_completed(old));
-			__i915_request_submit(old);
-			i915_request_put(old);
-		}
+static void
+setup_indirect_ctx_bb(const struct intel_context *ce,
+		      const struct intel_engine_cs *engine,
+		      u32 *(*emit)(const struct intel_context *, u32 *))
+{
+	u32 * const start = context_indirect_bb(ce);
+	u32 *cs;
 
-		spin_unlock_irq(&ve->base.active.lock);
-	}
+	cs = emit(ce, start);
+	GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+	while ((unsigned long)cs % CACHELINE_BYTES)
+		*cs++ = MI_NOOP;
 
-	/*
-	 * Flush the tasklet in case it is still running on another core.
-	 *
-	 * This needs to be done before we remove ourselves from the siblings'
-	 * rbtrees as in the case it is running in parallel, it may reinsert
-	 * the rb_node into a sibling.
-	 */
-	tasklet_kill(&ve->base.execlists.tasklet);
+	lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
+			       i915_ggtt_offset(ce->state) +
+			       context_wa_bb_offset(ce),
+			       (cs - start) * sizeof(*cs));
+}
 
-	/* Decouple ourselves from the siblings, no more access allowed. */
-	for (n = 0; n < ve->num_siblings; n++) {
-		struct intel_engine_cs *sibling = ve->siblings[n];
-		struct rb_node *node = &ve->nodes[sibling->id].rb;
+/*
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
+ *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
+ *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
+ *      bits 53-54:    mbz, reserved for use by hardware
+ *      bits 55-63:    group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ *      bits 32-36:    reserved
+ *      bits 37-47:    SW context ID
+ *      bits 48:53:    engine instance
+ *      bit 54:        mbz, reserved for use by hardware
+ *      bits 55-60:    SW counter
+ *      bits 61-63:    engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
+ */
+static u32 lrc_descriptor(const struct intel_context *ce)
+{
+	u32 desc;
 
-		if (RB_EMPTY_NODE(node))
-			continue;
+	desc = INTEL_LEGACY_32B_CONTEXT;
+	if (i915_vm_is_4lvl(ce->vm))
+		desc = INTEL_LEGACY_64B_CONTEXT;
+	desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-		spin_lock_irq(&sibling->active.lock);
+	desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+	if (IS_GEN(ce->vm->i915, 8))
+		desc |= GEN8_CTX_L3LLC_COHERENT;
 
-		/* Detachment is lazily performed in the execlists tasklet */
-		if (!RB_EMPTY_NODE(node))
-			rb_erase_cached(node, &sibling->execlists.virtual);
+	return i915_ggtt_offset(ce->state) | desc;
+}
 
-		spin_unlock_irq(&sibling->active.lock);
-	}
-	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
-	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+u32 lrc_update_regs(const struct intel_context *ce,
+		    const struct intel_engine_cs *engine,
+		    u32 head)
+{
+	struct intel_ring *ring = ce->ring;
+	u32 *regs = ce->lrc_reg_state;
 
-	if (ve->context.state)
-		__execlists_context_fini(&ve->context);
-	intel_context_fini(&ve->context);
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 
-	intel_breadcrumbs_free(ve->base.breadcrumbs);
-	intel_engine_free_request_pool(&ve->base);
+	regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+	regs[CTX_RING_HEAD] = head;
+	regs[CTX_RING_TAIL] = ring->tail;
+	regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
 
-	kfree(ve->bonds);
-	kfree(ve);
-}
+	/* RPCS */
+	if (engine->class == RENDER_CLASS) {
+		regs[CTX_R_PWR_CLK_STATE] =
+			intel_sseu_make_rpcs(engine->gt, &ce->sseu);
 
-static void virtual_context_destroy(struct kref *kref)
-{
-	struct virtual_engine *ve =
-		container_of(kref, typeof(*ve), context.ref);
+		i915_oa_init_reg_state(ce, engine);
+	}
 
-	GEM_BUG_ON(!list_empty(&ve->context.signals));
+	if (ce->wa_bb_page) {
+		u32 *(*fn)(const struct intel_context *ce, u32 *cs);
 
-	/*
-	 * When destroying the virtual engine, we have to be aware that
-	 * it may still be in use from an hardirq/softirq context causing
-	 * the resubmission of a completed request (background completion
-	 * due to preempt-to-busy). Before we can free the engine, we need
-	 * to flush the submission code and tasklets that are still potentially
-	 * accessing the engine. Flushing the tasklets requires process context,
-	 * and since we can guard the resubmit onto the engine with an RCU read
-	 * lock, we can delegate the free of the engine to an RCU worker.
-	 */
-	INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
-	queue_rcu_work(system_wq, &ve->rcu);
-}
+		fn = gen12_emit_indirect_ctx_xcs;
+		if (ce->engine->class == RENDER_CLASS)
+			fn = gen12_emit_indirect_ctx_rcs;
 
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
-{
-	int swp;
+		/* Mutually exclusive wrt to global indirect bb */
+		GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
+		setup_indirect_ctx_bb(ce, engine, fn);
+	}
 
-	/*
-	 * Pick a random sibling on starting to help spread the load around.
-	 *
-	 * New contexts are typically created with exactly the same order
-	 * of siblings, and often started in batches. Due to the way we iterate
-	 * the array of sibling when submitting requests, sibling[0] is
-	 * prioritised for dequeuing. If we make sure that sibling[0] is fairly
-	 * randomised across the system, we also help spread the load by the
-	 * first engine we inspect being different each time.
-	 *
-	 * NB This does not force us to execute on this engine, it will just
-	 * typically be the first we inspect for submission.
-	 */
-	swp = prandom_u32_max(ve->num_siblings);
-	if (swp)
-		swap(ve->siblings[swp], ve->siblings[0]);
+	return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
 }
 
-static int virtual_context_alloc(struct intel_context *ce)
+void lrc_update_offsets(struct intel_context *ce,
+			struct intel_engine_cs *engine)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
-	return __execlists_context_alloc(ce, ve->siblings[0]);
+	set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
 }
 
-static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+void lrc_check_regs(const struct intel_context *ce,
+		    const struct intel_engine_cs *engine,
+		    const char *when)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	const struct intel_ring *ring = ce->ring;
+	u32 *regs = ce->lrc_reg_state;
+	bool valid = true;
+	int x;
 
-	/* Note: we must use a real engine class for setting up reg state */
-	return __execlists_context_pin(ce, ve->siblings[0], vaddr);
-}
+	if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+		pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+		       engine->name,
+		       regs[CTX_RING_START],
+		       i915_ggtt_offset(ring->vma));
+		regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+		valid = false;
+	}
 
-static void virtual_context_enter(struct intel_context *ce)
-{
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-	unsigned int n;
+	if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+	    (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+		pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+		       engine->name,
+		       regs[CTX_RING_CTL],
+		       (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+		regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+		valid = false;
+	}
 
-	for (n = 0; n < ve->num_siblings; n++)
-		intel_engine_pm_get(ve->siblings[n]);
+	x = lrc_ring_mi_mode(engine);
+	if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+		pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+		       engine->name, regs[x + 1]);
+		regs[x + 1] &= ~STOP_RING;
+		regs[x + 1] |= STOP_RING << 16;
+		valid = false;
+	}
 
-	intel_timeline_enter(ce->timeline);
+	WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
 }
 
-static void virtual_context_exit(struct intel_context *ce)
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static u32 *
+gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-	unsigned int n;
-
-	intel_timeline_exit(ce->timeline);
-
-	for (n = 0; n < ve->num_siblings; n++)
-		intel_engine_pm_put(ve->siblings[n]);
-}
+	/* NB no one else is allowed to scribble over scratch + 256! */
+	*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+	*batch++ = intel_gt_scratch_offset(engine->gt,
+					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
+	*batch++ = 0;
 
-static const struct intel_context_ops virtual_context_ops = {
-	.alloc = virtual_context_alloc,
+	*batch++ = MI_LOAD_REGISTER_IMM(1);
+	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+	*batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
 
-	.pre_pin = execlists_context_pre_pin,
-	.pin = virtual_context_pin,
-	.unpin = execlists_context_unpin,
-	.post_unpin = execlists_context_post_unpin,
+	batch = gen8_emit_pipe_control(batch,
+				       PIPE_CONTROL_CS_STALL |
+				       PIPE_CONTROL_DC_FLUSH_ENABLE,
+				       0);
 
-	.enter = virtual_context_enter,
-	.exit = virtual_context_exit,
+	*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+	*batch++ = intel_gt_scratch_offset(engine->gt,
+					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
+	*batch++ = 0;
 
-	.destroy = virtual_context_destroy,
-};
+	return batch;
+}
 
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+/*
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
+ *
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
+ */
+static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 {
-	struct i915_request *rq;
-	intel_engine_mask_t mask;
-
-	rq = READ_ONCE(ve->request);
-	if (!rq)
-		return 0;
-
-	/* The rq is ready for submission; rq->execution_mask is now stable. */
-	mask = rq->execution_mask;
-	if (unlikely(!mask)) {
-		/* Invalid selection, submit to a random engine in error */
-		i915_request_set_error_once(rq, -ENODEV);
-		mask = ve->siblings[0]->mask;
-	}
-
-	ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
-		     rq->fence.context, rq->fence.seqno,
-		     mask, ve->base.execlists.queue_priority_hint);
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 
-	return mask;
-}
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+	if (IS_BROADWELL(engine->i915))
+		batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
-static void virtual_submission_tasklet(unsigned long data)
-{
-	struct virtual_engine * const ve = (struct virtual_engine *)data;
-	const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
-	intel_engine_mask_t mask;
-	unsigned int n;
-
-	rcu_read_lock();
-	mask = virtual_submission_mask(ve);
-	rcu_read_unlock();
-	if (unlikely(!mask))
-		return;
+	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+	/* Actual scratch location is at 128 bytes offset */
+	batch = gen8_emit_pipe_control(batch,
+				       PIPE_CONTROL_FLUSH_L3 |
+				       PIPE_CONTROL_STORE_DATA_INDEX |
+				       PIPE_CONTROL_CS_STALL |
+				       PIPE_CONTROL_QW_WRITE,
+				       LRC_PPHWSP_SCRATCH_ADDR);
 
-	local_irq_disable();
-	for (n = 0; n < ve->num_siblings; n++) {
-		struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
-		struct ve_node * const node = &ve->nodes[sibling->id];
-		struct rb_node **parent, *rb;
-		bool first;
-
-		if (!READ_ONCE(ve->request))
-			break; /* already handled by a sibling's tasklet */
-
-		if (unlikely(!(mask & sibling->mask))) {
-			if (!RB_EMPTY_NODE(&node->rb)) {
-				spin_lock(&sibling->active.lock);
-				rb_erase_cached(&node->rb,
-						&sibling->execlists.virtual);
-				RB_CLEAR_NODE(&node->rb);
-				spin_unlock(&sibling->active.lock);
-			}
-			continue;
-		}
+	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 
-		spin_lock(&sibling->active.lock);
+	/* Pad to end of cacheline */
+	while ((unsigned long)batch % CACHELINE_BYTES)
+		*batch++ = MI_NOOP;
 
-		if (!RB_EMPTY_NODE(&node->rb)) {
-			/*
-			 * Cheat and avoid rebalancing the tree if we can
-			 * reuse this node in situ.
-			 */
-			first = rb_first_cached(&sibling->execlists.virtual) ==
-				&node->rb;
-			if (prio == node->prio || (prio > node->prio && first))
-				goto submit_engine;
+	/*
+	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+	 * execution depends on the length specified in terms of cache lines
+	 * in the register CTX_RCS_INDIRECT_CTX
+	 */
 
-			rb_erase_cached(&node->rb, &sibling->execlists.virtual);
-		}
+	return batch;
+}
 
-		rb = NULL;
-		first = true;
-		parent = &sibling->execlists.virtual.rb_root.rb_node;
-		while (*parent) {
-			struct ve_node *other;
-
-			rb = *parent;
-			other = rb_entry(rb, typeof(*other), rb);
-			if (prio > other->prio) {
-				parent = &rb->rb_left;
-			} else {
-				parent = &rb->rb_right;
-				first = false;
-			}
-		}
+struct lri {
+	i915_reg_t reg;
+	u32 value;
+};
 
-		rb_link_node(&node->rb, rb, parent);
-		rb_insert_color_cached(&node->rb,
-				       &sibling->execlists.virtual,
-				       first);
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
+{
+	GEM_BUG_ON(!count || count > 63);
 
-submit_engine:
-		GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
-		node->prio = prio;
-		if (first && prio > sibling->execlists.queue_priority_hint)
-			tasklet_hi_schedule(&sibling->execlists.tasklet);
+	*batch++ = MI_LOAD_REGISTER_IMM(count);
+	do {
+		*batch++ = i915_mmio_reg_offset(lri->reg);
+		*batch++ = lri->value;
+	} while (lri++, --count);
+	*batch++ = MI_NOOP;
 
-		spin_unlock(&sibling->active.lock);
-	}
-	local_irq_enable();
+	return batch;
 }
 
-static void virtual_submit_request(struct i915_request *rq)
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
-	struct i915_request *old;
-	unsigned long flags;
-
-	ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
-		     rq->fence.context,
-		     rq->fence.seqno);
+	static const struct lri lri[] = {
+		/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+		{
+			COMMON_SLICE_CHICKEN2,
+			__MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+				       0),
+		},
 
-	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+		/* BSpec: 11391 */
+		{
+			FF_SLICE_CHICKEN,
+			__MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+				       FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+		},
 
-	spin_lock_irqsave(&ve->base.active.lock, flags);
+		/* BSpec: 11299 */
+		{
+			_3D_CHICKEN3,
+			__MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+				       _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+		}
+	};
 
-	old = ve->request;
-	if (old) { /* background completion event from preempt-to-busy */
-		GEM_BUG_ON(!i915_request_completed(old));
-		__i915_request_submit(old);
-		i915_request_put(old);
-	}
+	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 
-	if (i915_request_completed(rq)) {
-		__i915_request_submit(rq);
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+	batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
-		ve->base.execlists.queue_priority_hint = INT_MIN;
-		ve->request = NULL;
-	} else {
-		ve->base.execlists.queue_priority_hint = rq_prio(rq);
-		ve->request = i915_request_get(rq);
+	/* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+	batch = gen8_emit_pipe_control(batch,
+				       PIPE_CONTROL_FLUSH_L3 |
+				       PIPE_CONTROL_STORE_DATA_INDEX |
+				       PIPE_CONTROL_CS_STALL |
+				       PIPE_CONTROL_QW_WRITE,
+				       LRC_PPHWSP_SCRATCH_ADDR);
 
-		GEM_BUG_ON(!list_empty(virtual_queue(ve)));
-		list_move_tail(&rq->sched.link, virtual_queue(ve));
+	batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
-		tasklet_hi_schedule(&ve->base.execlists.tasklet);
+	/* WaMediaPoolStateCmdInWABB:bxt,glk */
+	if (HAS_POOLED_EU(engine->i915)) {
+		/*
+		 * EU pool configuration is setup along with golden context
+		 * during context initialization. This value depends on
+		 * device type (2x6 or 3x6) and needs to be updated based
+		 * on which subslice is disabled especially for 2x6
+		 * devices, however it is safe to load default
+		 * configuration of 3x6 device instead of masking off
+		 * corresponding bits because HW ignores bits of a disabled
+		 * subslice and drops down to appropriate config. Please
+		 * see render_state_setup() in i915_gem_render_state.c for
+		 * possible configurations, to avoid duplication they are
+		 * not shown here again.
+		 */
+		*batch++ = GEN9_MEDIA_POOL_STATE;
+		*batch++ = GEN9_MEDIA_POOL_ENABLE;
+		*batch++ = 0x00777000;
+		*batch++ = 0;
+		*batch++ = 0;
+		*batch++ = 0;
 	}
 
-	spin_unlock_irqrestore(&ve->base.active.lock, flags);
-}
-
-static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
-		  const struct intel_engine_cs *master)
-{
-	int i;
+	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 
-	for (i = 0; i < ve->num_bonds; i++) {
-		if (ve->bonds[i].master == master)
-			return &ve->bonds[i];
-	}
+	/* Pad to end of cacheline */
+	while ((unsigned long)batch % CACHELINE_BYTES)
+		*batch++ = MI_NOOP;
 
-	return NULL;
+	return batch;
 }
 
-static void
-virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+static u32 *
+gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
-	intel_engine_mask_t allowed, exec;
-	struct ve_bond *bond;
-
-	allowed = ~to_request(signal)->engine->mask;
+	int i;
 
-	bond = virtual_find_bond(ve, to_request(signal)->engine);
-	if (bond)
-		allowed &= bond->sibling_mask;
+	/*
+	 * WaPipeControlBefore3DStateSamplePattern: cnl
+	 *
+	 * Ensure the engine is idle prior to programming a
+	 * 3DSTATE_SAMPLE_PATTERN during a context restore.
+	 */
+	batch = gen8_emit_pipe_control(batch,
+				       PIPE_CONTROL_CS_STALL,
+				       0);
+	/*
+	 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
+	 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
+	 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
+	 * confusing. Since gen8_emit_pipe_control() already advances the
+	 * batch by 6 dwords, we advance the other 10 here, completing a
+	 * cacheline. It's not clear if the workaround requires this padding
+	 * before other commands, or if it's just the regular padding we would
+	 * already have for the workaround bb, so leave it here for now.
+	 */
+	for (i = 0; i < 10; i++)
+		*batch++ = MI_NOOP;
 
-	/* Restrict the bonded request to run on only the available engines */
-	exec = READ_ONCE(rq->execution_mask);
-	while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
-		;
+	/* Pad to end of cacheline */
+	while ((unsigned long)batch % CACHELINE_BYTES)
+		*batch++ = MI_NOOP;
 
-	/* Prevent the master from being re-run on the bonded engines */
-	to_request(signal)->execution_mask &= ~allowed;
+	return batch;
 }
 
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
-			       unsigned int count)
+#define CTX_WA_BB_SIZE (PAGE_SIZE)
+
+static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
 {
-	struct virtual_engine *ve;
-	unsigned int n;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	int err;
 
-	if (count == 0)
-		return ERR_PTR(-EINVAL);
-
-	if (count == 1)
-		return intel_context_create(siblings[0]);
-
-	ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
-	if (!ve)
-		return ERR_PTR(-ENOMEM);
+	obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
 
-	ve->base.i915 = siblings[0]->i915;
-	ve->base.gt = siblings[0]->gt;
-	ve->base.uncore = siblings[0]->uncore;
-	ve->base.id = -1;
+	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
 
-	ve->base.class = OTHER_CLASS;
-	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
-	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
-	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+	err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
+	if (err)
+		goto err;
 
-	/*
-	 * The decision on whether to submit a request using semaphores
-	 * depends on the saturated state of the engine. We only compute
-	 * this during HW submission of the request, and we need for this
-	 * state to be globally applied to all requests being submitted
-	 * to this engine. Virtual engines encompass more than one physical
-	 * engine and so we cannot accurately tell in advance if one of those
-	 * engines is already saturated and so cannot afford to use a semaphore
-	 * and be pessimized in priority for doing so -- if we are the only
-	 * context using semaphores after all other clients have stopped, we
-	 * will be starved on the saturated system. Such a global switch for
-	 * semaphores is less than ideal, but alas is the current compromise.
-	 */
-	ve->base.saturated = ALL_ENGINES;
+	engine->wa_ctx.vma = vma;
+	return 0;
 
-	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+err:
+	i915_gem_object_put(obj);
+	return err;
+}
 
-	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
-	intel_engine_init_execlists(&ve->base);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
+{
+	i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
 
-	ve->base.cops = &virtual_context_ops;
-	ve->base.request_alloc = execlists_request_alloc;
+	/* Called on error unwind, clear all flags to prevent further use */
+	memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
+}
 
-	ve->base.schedule = i915_schedule;
-	ve->base.submit_request = virtual_submit_request;
-	ve->base.bond_execute = virtual_bond_execute;
+typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
 
-	INIT_LIST_HEAD(virtual_queue(ve));
-	ve->base.execlists.queue_priority_hint = INT_MIN;
-	tasklet_init(&ve->base.execlists.tasklet,
-		     virtual_submission_tasklet,
-		     (unsigned long)ve);
+void lrc_init_wa_ctx(struct intel_engine_cs *engine)
+{
+	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+	struct i915_wa_ctx_bb *wa_bb[] = {
+		&wa_ctx->indirect_ctx, &wa_ctx->per_ctx
+	};
+	wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
+	void *batch, *batch_ptr;
+	unsigned int i;
+	int err;
 
-	intel_context_init(&ve->context, &ve->base);
+	if (engine->class != RENDER_CLASS)
+		return;
 
-	ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
-	if (!ve->base.breadcrumbs) {
-		err = -ENOMEM;
-		goto err_put;
+	switch (INTEL_GEN(engine->i915)) {
+	case 12:
+	case 11:
+		return;
+	case 10:
+		wa_bb_fn[0] = gen10_init_indirectctx_bb;
+		wa_bb_fn[1] = NULL;
+		break;
+	case 9:
+		wa_bb_fn[0] = gen9_init_indirectctx_bb;
+		wa_bb_fn[1] = NULL;
+		break;
+	case 8:
+		wa_bb_fn[0] = gen8_init_indirectctx_bb;
+		wa_bb_fn[1] = NULL;
+		break;
+	default:
+		MISSING_CASE(INTEL_GEN(engine->i915));
+		return;
 	}
 
-	for (n = 0; n < count; n++) {
-		struct intel_engine_cs *sibling = siblings[n];
-
-		GEM_BUG_ON(!is_power_of_2(sibling->mask));
-		if (sibling->mask & ve->base.mask) {
-			DRM_DEBUG("duplicate %s entry in load balancer\n",
-				  sibling->name);
-			err = -EINVAL;
-			goto err_put;
-		}
-
+	err = lrc_setup_wa_ctx(engine);
+	if (err) {
 		/*
-		 * The virtual engine implementation is tightly coupled to
-		 * the execlists backend -- we push out request directly
-		 * into a tree inside each physical engine. We could support
-		 * layering if we handle cloning of the requests and
-		 * submitting a copy into each backend.
+		 * We continue even if we fail to initialize WA batch
+		 * because we only expect rare glitches but nothing
+		 * critical to prevent us from using GPU
 		 */
-		if (sibling->execlists.tasklet.func !=
-		    execlists_submission_tasklet) {
-			err = -ENODEV;
-			goto err_put;
-		}
-
-		GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
-		RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+		drm_err(&engine->i915->drm,
+			"Ignoring context switch w/a allocation error:%d\n",
+			err);
+		return;
+	}
 
-		ve->siblings[ve->num_siblings++] = sibling;
-		ve->base.mask |= sibling->mask;
+	batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
 
-		/*
-		 * All physical engines must be compatible for their emission
-		 * functions (as we build the instructions during request
-		 * construction and do not alter them before submission
-		 * on the physical engine). We use the engine class as a guide
-		 * here, although that could be refined.
-		 */
-		if (ve->base.class != OTHER_CLASS) {
-			if (ve->base.class != sibling->class) {
-				DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
-					  sibling->class, ve->base.class);
-				err = -EINVAL;
-				goto err_put;
-			}
-			continue;
+	/*
+	 * Emit the two workaround batch buffers, recording the offset from the
+	 * start of the workaround batch buffer object for each and their
+	 * respective sizes.
+	 */
+	batch_ptr = batch;
+	for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
+		wa_bb[i]->offset = batch_ptr - batch;
+		if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+						  CACHELINE_BYTES))) {
+			err = -EINVAL;
+			break;
 		}
-
-		ve->base.class = sibling->class;
-		ve->base.uabi_class = sibling->uabi_class;
-		snprintf(ve->base.name, sizeof(ve->base.name),
-			 "v%dx%d", ve->base.class, count);
-		ve->base.context_size = sibling->context_size;
-
-		ve->base.emit_bb_start = sibling->emit_bb_start;
-		ve->base.emit_flush = sibling->emit_flush;
-		ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
-		ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
-		ve->base.emit_fini_breadcrumb_dw =
-			sibling->emit_fini_breadcrumb_dw;
-
-		ve->base.flags = sibling->flags;
+		if (wa_bb_fn[i])
+			batch_ptr = wa_bb_fn[i](engine, batch_ptr);
+		wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
 	}
+	GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
 
-	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
-
-	virtual_engine_initial_hint(ve);
-	return &ve->context;
+	__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
+	__i915_gem_object_release_map(wa_ctx->vma->obj);
 
-err_put:
-	intel_context_put(&ve->context);
-	return ERR_PTR(err);
+	/* Verify that we can handle failure to setup the wa_ctx */
+	if (err || i915_inject_probe_error(engine->i915, -ENODEV))
+		lrc_fini_wa_ctx(engine);
 }
 
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
 {
-	struct virtual_engine *se = to_virtual_engine(src);
-	struct intel_context *dst;
-
-	dst = intel_execlists_create_virtual(se->siblings,
-					     se->num_siblings);
-	if (IS_ERR(dst))
-		return dst;
-
-	if (se->num_bonds) {
-		struct virtual_engine *de = to_virtual_engine(dst->engine);
-
-		de->bonds = kmemdup(se->bonds,
-				    sizeof(*se->bonds) * se->num_bonds,
-				    GFP_KERNEL);
-		if (!de->bonds) {
-			intel_context_put(dst);
-			return ERR_PTR(-ENOMEM);
-		}
-
-		de->num_bonds = se->num_bonds;
-	}
-
-	return dst;
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+	ce->runtime.num_underflow++;
+	ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
 }
 
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
-				     const struct intel_engine_cs *master,
-				     const struct intel_engine_cs *sibling)
+void lrc_update_runtime(struct intel_context *ce)
 {
-	struct virtual_engine *ve = to_virtual_engine(engine);
-	struct ve_bond *bond;
-	int n;
-
-	/* Sanity check the sibling is part of the virtual engine */
-	for (n = 0; n < ve->num_siblings; n++)
-		if (sibling == ve->siblings[n])
-			break;
-	if (n == ve->num_siblings)
-		return -EINVAL;
-
-	bond = virtual_find_bond(ve, master);
-	if (bond) {
-		bond->sibling_mask |= sibling->mask;
-		return 0;
-	}
-
-	bond = krealloc(ve->bonds,
-			sizeof(*bond) * (ve->num_bonds + 1),
-			GFP_KERNEL);
-	if (!bond)
-		return -ENOMEM;
-
-	bond[ve->num_bonds].master = master;
-	bond[ve->num_bonds].sibling_mask = sibling->mask;
-
-	ve->bonds = bond;
-	ve->num_bonds++;
-
-	return 0;
-}
+	u32 old;
+	s32 dt;
 
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
-				   struct drm_printer *m,
-				   void (*show_request)(struct drm_printer *m,
-							struct i915_request *rq,
-							const char *prefix),
-				   unsigned int max)
-{
-	const struct intel_engine_execlists *execlists = &engine->execlists;
-	struct i915_request *rq, *last;
-	unsigned long flags;
-	unsigned int count;
-	struct rb_node *rb;
-
-	spin_lock_irqsave(&engine->active.lock, flags);
-
-	last = NULL;
-	count = 0;
-	list_for_each_entry(rq, &engine->active.requests, sched.link) {
-		if (count++ < max - 1)
-			show_request(m, rq, "\t\tE ");
-		else
-			last = rq;
-	}
-	if (last) {
-		if (count > max) {
-			drm_printf(m,
-				   "\t\t...skipping %d executing requests...\n",
-				   count - max);
-		}
-		show_request(m, last, "\t\tE ");
-	}
+	if (intel_context_is_barrier(ce))
+		return;
 
-	if (execlists->switch_priority_hint != INT_MIN)
-		drm_printf(m, "\t\tSwitch priority hint: %d\n",
-			   READ_ONCE(execlists->switch_priority_hint));
-	if (execlists->queue_priority_hint != INT_MIN)
-		drm_printf(m, "\t\tQueue priority hint: %d\n",
-			   READ_ONCE(execlists->queue_priority_hint));
-
-	last = NULL;
-	count = 0;
-	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-		int i;
-
-		priolist_for_each_request(rq, p, i) {
-			if (count++ < max - 1)
-				show_request(m, rq, "\t\tQ ");
-			else
-				last = rq;
-		}
-	}
-	if (last) {
-		if (count > max) {
-			drm_printf(m,
-				   "\t\t...skipping %d queued requests...\n",
-				   count - max);
-		}
-		show_request(m, last, "\t\tQ ");
-	}
+	old = ce->runtime.last;
+	ce->runtime.last = lrc_get_runtime(ce);
+	dt = ce->runtime.last - old;
 
-	last = NULL;
-	count = 0;
-	for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-		struct i915_request *rq = READ_ONCE(ve->request);
-
-		if (rq) {
-			if (count++ < max - 1)
-				show_request(m, rq, "\t\tV ");
-			else
-				last = rq;
-		}
-	}
-	if (last) {
-		if (count > max) {
-			drm_printf(m,
-				   "\t\t...skipping %d virtual requests...\n",
-				   count - max);
-		}
-		show_request(m, last, "\t\tV ");
+	if (unlikely(dt < 0)) {
+		CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+			 old, ce->runtime.last, dt);
+		st_update_runtime_underflow(ce, dt);
+		return;
 	}
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
-			    struct intel_context *ce,
-			    u32 head,
-			    bool scrub)
-{
-	GEM_BUG_ON(!intel_context_is_pinned(ce));
-
-	/*
-	 * We want a simple context + ring to execute the breadcrumb update.
-	 * We cannot rely on the context being intact across the GPU hang,
-	 * so clear it and rebuild just what we need for the breadcrumb.
-	 * All pending requests for this context will be zapped, and any
-	 * future request will be after userspace has had the opportunity
-	 * to recreate its own state.
-	 */
-	if (scrub)
-		restore_default_state(ce, engine);
-
-	/* Rerun the request; its payload has been neutered (if guilty). */
-	__execlists_update_reg_state(ce, engine, head);
-}
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
-	return engine->set_default_submission ==
-	       intel_execlists_set_default_submission;
+	ewma_runtime_add(&ce->runtime.avg, dt);
+	ce->runtime.total += dt;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2d287f25497a3b6a782a508b386d01e6af00420..7f697845c4cf6d6da3b1c99ed59e2db8b0d73186 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -1,90 +1,20 @@
+/* SPDX-License-Identifier: MIT */
 /*
  * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
  */
 
-#ifndef _INTEL_LRC_H_
-#define _INTEL_LRC_H_
+#ifndef __INTEL_LRC_H__
+#define __INTEL_LRC_H__
 
 #include <linux/types.h>
 
-struct drm_printer;
+#include "intel_context.h"
+#include "intel_lrc_reg.h"
 
-struct drm_i915_private;
-struct i915_gem_context;
-struct i915_request;
-struct intel_context;
+struct drm_i915_gem_object;
 struct intel_engine_cs;
+struct intel_ring;
 
-/* Execlists regs */
-#define RING_ELSP(base)				_MMIO((base) + 0x230)
-#define RING_EXECLIST_STATUS_LO(base)		_MMIO((base) + 0x234)
-#define RING_EXECLIST_STATUS_HI(base)		_MMIO((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base)		_MMIO((base) + 0x244)
-#define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
-#define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
-#define   CTX_CTRL_RS_CTX_ENABLE		(1 << 1)
-#define	  CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT	(1 << 2)
-#define	  GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE	(1 << 8)
-#define RING_CONTEXT_STATUS_PTR(base)		_MMIO((base) + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(base)		_MMIO((base) + 0x510)
-#define RING_EXECLIST_CONTROL(base)		_MMIO((base) + 0x550)
-
-#define	  EL_CTRL_LOAD				(1 << 0)
-
-/* The docs specify that the write pointer wraps around after 5h, "After status
- * is written out to the last available status QW at offset 5h, this pointer
- * wraps to 0."
- *
- * Therefore, one must infer than even though there are 3 bits available, 6 and
- * 7 appear to be * reserved.
- */
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x7
-#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
-#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-
-#define GEN11_CSB_ENTRIES 12
-#define GEN11_CSB_PTR_MASK 0xf
-#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
-#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
-
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID	(GEN11_MAX_CONTEXT_HW_ID - 1)
-
-enum {
-	INTEL_CONTEXT_SCHEDULE_IN = 0,
-	INTEL_CONTEXT_SCHEDULE_OUT,
-	INTEL_CONTEXT_SCHEDULE_PREEMPTED,
-};
-
-/* Logical Rings */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-
-/* Logical Ring Contexts */
 /* At the start of the context image is its per-process HWS page */
 #define LRC_PPHWSP_PN	(0)
 #define LRC_PPHWSP_SZ	(1)
@@ -96,32 +26,57 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
 #define LRC_PPHWSP_SCRATCH		0x34
 #define LRC_PPHWSP_SCRATCH_ADDR		(LRC_PPHWSP_SCRATCH * sizeof(u32))
 
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
-			    struct intel_context *ce,
-			    u32 head,
-			    bool scrub);
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
-				   struct drm_printer *m,
-				   void (*show_request)(struct drm_printer *m,
-							struct i915_request *rq,
-							const char *prefix),
-				   unsigned int max);
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
-			       unsigned int count);
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src);
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
-				     const struct intel_engine_cs *master,
-				     const struct intel_engine_cs *sibling);
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
-#endif /* _INTEL_LRC_H_ */
+void lrc_init_wa_ctx(struct intel_engine_cs *engine);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine);
+
+int lrc_alloc(struct intel_context *ce,
+	      struct intel_engine_cs *engine);
+void lrc_reset(struct intel_context *ce);
+void lrc_fini(struct intel_context *ce);
+void lrc_destroy(struct kref *kref);
+
+int
+lrc_pre_pin(struct intel_context *ce,
+	    struct intel_engine_cs *engine,
+	    struct i915_gem_ww_ctx *ww,
+	    void **vaddr);
+int
+lrc_pin(struct intel_context *ce,
+	struct intel_engine_cs *engine,
+	void *vaddr);
+void lrc_unpin(struct intel_context *ce);
+void lrc_post_unpin(struct intel_context *ce);
+
+void lrc_init_state(struct intel_context *ce,
+		    struct intel_engine_cs *engine,
+		    void *state);
+
+void lrc_init_regs(const struct intel_context *ce,
+		   const struct intel_engine_cs *engine,
+		   bool clear);
+void lrc_reset_regs(const struct intel_context *ce,
+		    const struct intel_engine_cs *engine);
+
+u32 lrc_update_regs(const struct intel_context *ce,
+		    const struct intel_engine_cs *engine,
+		    u32 head);
+void lrc_update_offsets(struct intel_context *ce,
+			struct intel_engine_cs *engine);
+
+void lrc_check_regs(const struct intel_context *ce,
+		    const struct intel_engine_cs *engine,
+		    const char *when);
+
+void lrc_update_runtime(struct intel_context *ce);
+static inline u32 lrc_get_runtime(const struct intel_context *ce)
+{
+	/*
+	 * We can use either ppHWSP[16] which is recorded before the context
+	 * switch (and so excludes the cost of context switches) or use the
+	 * value from the context image itself, which is saved/restored earlier
+	 * and so includes the cost of the save.
+	 */
+	return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 1b51f7b9a5c3b6c0bac338347ec883c7236c3d28..65fe76738335003074b272e610cb4f386ab7c542 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,6 +9,8 @@
 
 #include <linux/types.h>
 
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
 /* GEN8 to GEN12 Reg State Context */
 #define CTX_CONTEXT_CONTROL		(0x02 + 1)
 #define CTX_RING_HEAD			(0x04 + 1)
@@ -52,4 +54,43 @@
 #define GEN8_EXECLISTS_STATUS_BUF 0x370
 #define GEN11_EXECLISTS_STATUS_BUF2 0x3c0
 
+/* Execlists regs */
+#define RING_ELSP(base)				_MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base)		_MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base)		_MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base)		_MMIO((base) + 0x244)
+#define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	REG_BIT(0)
+#define   CTX_CTRL_RS_CTX_ENABLE		REG_BIT(1)
+#define	  CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT	REG_BIT(2)
+#define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	REG_BIT(3)
+#define	  GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE	REG_BIT(8)
+#define RING_CONTEXT_STATUS_PTR(base)		_MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base)		_MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base)		_MMIO((base) + 0x550)
+#define	  EL_CTRL_LOAD				REG_BIT(0)
+
+/*
+ * The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK	(GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK	(GEN8_CSB_PTR_MASK << 0)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK		(GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK	(GEN11_CSB_PTR_MASK << 0)
+
+#define MAX_CONTEXT_HW_ID	(1 << 21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID	(1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID	(1 << 11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID	(GEN11_MAX_CONTEXT_HW_ID - 1)
+
 #endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index ab6870242e18c1bf04307098883cb8c7a5cfbfc7..8acb84960cd06c438ece24b0ce0902dc5e75c6c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -24,8 +24,8 @@
 
 #include "intel_engine.h"
 #include "intel_gt.h"
+#include "intel_lrc_reg.h"
 #include "intel_mocs.h"
-#include "intel_lrc.h"
 #include "intel_ring.h"
 
 /* structures required */
@@ -472,7 +472,7 @@ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
 	return table->table[I915_MOCS_PTE].l3cc_value;
 }
 
-static inline u32 l3cc_combine(u16 low, u16 high)
+static u32 l3cc_combine(u16 low, u16 high)
 {
 	return low | (u32)high << 16;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 46d9aceda64cdd260332794dec7dd974e9e58645..96b85a10ef33a1f1a477d724d2642ca1e2e3523a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -80,7 +80,7 @@ void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
 	kfree(pt);
 }
 
-static inline void
+static void
 write_dma_entry(struct drm_i915_gem_object * const pdma,
 		const unsigned short idx,
 		const u64 encoded_entry)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index d7b8e4457fc28e6e54b35fc38a92b19922d645f1..35504c97f11d45a0db1b64d61707b01b5d598e08 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -49,7 +49,7 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
 	return rc6_to_gt(rc)->i915;
 }
 
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
 {
 	intel_uncore_write_fw(uncore, reg, val);
 }
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
similarity index 96%
rename from drivers/gpu/drm/i915/intel_region_lmem.c
rename to drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 40d8f1a95df61fdf286419ad97da8a446f593ace..60393ce5614d40d98f4af31b591ce119680998d5 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -95,10 +95,10 @@ region_lmem_init(struct intel_memory_region *mem)
 	return ret;
 }
 
-const struct intel_memory_region_ops intel_region_lmem_ops = {
+static const struct intel_memory_region_ops intel_region_lmem_ops = {
 	.init = region_lmem_init,
 	.release = region_lmem_release,
-	.create_object = __i915_gem_lmem_object_create,
+	.init_object = __i915_gem_lmem_object_init,
 };
 
 struct intel_memory_region *
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
similarity index 80%
rename from drivers/gpu/drm/i915/intel_region_lmem.h
rename to drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 213def7c7b8a555bc06fa1269837dd5cfa8a3ade..8ea43e538dab8fc6308b73d5ad45851e176c91e6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -8,8 +8,6 @@
 
 struct drm_i915_private;
 
-extern const struct intel_memory_region_ops intel_region_lmem_ops;
-
 struct intel_memory_region *
 intel_setup_fake_lmem(struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ea2a77c7b4692a0cfc769c966e8fcbf1e960b1f6..ca816ba22197123358cc8a032b1b02face075861 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,7 +27,8 @@
 
 #include "i915_drv.h"
 #include "intel_renderstate.h"
-#include "gt/intel_context.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
 #include "intel_ring.h"
 
 static const struct intel_renderstate_rodata *
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3654c955e6be8c7713b1a8cb9f4a9f0bab98bb7b..61410cd6292782f3e5ed48790e442a94f1527931 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
 	intel_uncore_rmw_fw(uncore, reg, clr, 0);
 }
 
-static void engine_skip_context(struct i915_request *rq)
+static void skip_context(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
 	struct intel_context *hung_ctx = rq->context;
 
-	if (!i915_request_is_active(rq))
-		return;
+	list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
+		if (!i915_request_is_active(rq))
+			return;
 
-	lockdep_assert_held(&engine->active.lock);
-	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
 		if (rq->context == hung_ctx) {
 			i915_request_set_error_once(rq, -EIO);
 			__i915_request_skip(rq);
 		}
+	}
 }
 
 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -152,15 +151,14 @@ static void mark_innocent(struct i915_request *rq)
 void __i915_request_reset(struct i915_request *rq, bool guilty)
 {
 	RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
-
-	GEM_BUG_ON(i915_request_completed(rq));
+	GEM_BUG_ON(__i915_request_is_complete(rq));
 
 	rcu_read_lock(); /* protect the GEM context */
 	if (guilty) {
 		i915_request_set_error_once(rq, -EIO);
 		__i915_request_skip(rq);
 		if (mark_guilty(rq))
-			engine_skip_context(rq);
+			skip_context(rq);
 	} else {
 		i915_request_set_error_once(rq, -EAGAIN);
 		mark_innocent(rq);
@@ -231,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
 	if (ret) {
-		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+		GT_TRACE(gt, "Wait for media reset failed\n");
 		goto out;
 	}
 
@@ -239,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
 	if (ret) {
-		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+		GT_TRACE(gt, "Wait for render reset failed\n");
 		goto out;
 	}
 
@@ -265,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
 					   5000, 0,
 					   NULL);
 	if (ret) {
-		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+		GT_TRACE(gt, "Wait for render reset failed\n");
 		goto out;
 	}
 
@@ -276,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
 					   5000, 0,
 					   NULL);
 	if (ret) {
-		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+		GT_TRACE(gt, "Wait for media reset failed\n");
 		goto out;
 	}
 
@@ -305,9 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
 					   500, 0,
 					   NULL);
 	if (err)
-		drm_dbg(&gt->i915->drm,
-			"Wait for 0x%08x engines reset failed\n",
-			hw_domain_mask);
+		GT_TRACE(gt,
+			 "Wait for 0x%08x engines reset failed\n",
+			 hw_domain_mask);
 
 	return err;
 }
@@ -407,8 +405,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
 		return 0;
 
 	if (ret) {
-		drm_dbg(&engine->i915->drm,
-			"Wait for SFC forced lock ack failed\n");
+		ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
 		return ret;
 	}
 
@@ -499,6 +496,9 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
 	u32 request, mask, ack;
 	int ret;
 
+	if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
+		return -ETIMEDOUT;
+
 	ack = intel_uncore_read_fw(uncore, reg);
 	if (ack & RESET_CTL_CAT_ERROR) {
 		/*
@@ -754,8 +754,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
 	if (err)
 		return err;
 
+	local_bh_disable();
 	for_each_engine(engine, gt, id)
 		__intel_engine_reset(engine, stalled_mask & engine->mask);
+	local_bh_enable();
 
 	intel_ggtt_restore_fences(gt->ggtt);
 
@@ -833,9 +835,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
 	set_bit(I915_WEDGED, &gt->reset.flags);
 
 	/* Mark all executing requests as skipped */
+	local_bh_disable();
 	for_each_engine(engine, gt, id)
 		if (engine->reset.cancel)
 			engine->reset.cancel(engine);
+	local_bh_enable();
 
 	reset_finish(gt, awake);
 
@@ -1105,25 +1109,12 @@ void intel_gt_reset(struct intel_gt *gt,
 	goto finish;
 }
 
-static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
+static int intel_gt_reset_engine(struct intel_engine_cs *engine)
 {
 	return __intel_gt_reset(engine->gt, engine->mask);
 }
 
-/**
- * intel_engine_reset - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no drm_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- *  - identifies the request that caused the hang and it is dropped
- *  - reset engine (which will force the engine to idle)
- *  - re-init/configure engine
- */
-int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
 {
 	struct intel_gt *gt = engine->gt;
 	bool uses_guc = intel_engine_in_guc_submission_mode(engine);
@@ -1148,8 +1139,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 		ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
 	if (ret) {
 		/* If we fail here, we expect to fallback to a global reset */
-		drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
-			uses_guc ? "GuC " : "", engine->name, ret);
+		ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
 		goto out;
 	}
 
@@ -1174,6 +1164,30 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 	return ret;
 }
 
+/**
+ * intel_engine_reset - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ *  - identifies the request that caused the hang and it is dropped
+ *  - reset engine (which will force the engine to idle)
+ *  - re-init/configure engine
+ */
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+{
+	int err;
+
+	local_bh_disable();
+	err = __intel_engine_reset_bh(engine, msg);
+	local_bh_enable();
+
+	return err;
+}
+
 static void intel_gt_reset_global(struct intel_gt *gt,
 				  u32 engine_mask,
 				  const char *reason)
@@ -1186,7 +1200,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
 
 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
-	drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
+	GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
 
 	/* Use a watchdog to ensure that our reset completes */
@@ -1260,18 +1274,20 @@ void intel_gt_handle_error(struct intel_gt *gt,
 	 * single reset fails.
 	 */
 	if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+		local_bh_disable();
 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
 					     &gt->reset.flags))
 				continue;
 
-			if (intel_engine_reset(engine, msg) == 0)
+			if (__intel_engine_reset_bh(engine, msg) == 0)
 				engine_mask &= ~engine->mask;
 
 			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
 					      &gt->reset.flags);
 		}
+		local_bh_enable();
 	}
 
 	if (!engine_mask)
@@ -1380,6 +1396,17 @@ void intel_gt_init_reset(struct intel_gt *gt)
 	mutex_init(&gt->reset.mutex);
 	init_srcu_struct(&gt->reset.backoff_srcu);
 
+	/*
+	 * While undesirable to wait inside the shrinker, complain anyway.
+	 *
+	 * If we have to wait during shrinking, we guarantee forward progress
+	 * by forcing the reset. Therefore during the reset we must not
+	 * re-enter the shrinker. By declaring that we take the reset mutex
+	 * within the shrinker, we forbid ourselves from performing any
+	 * fs-reclaim or taking related locks during reset.
+	 */
+	i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
+
 	/* no GPU until we are ready! */
 	__set_bit(I915_WEDGED, &gt->reset.flags);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index a0eec7c11c0ccd1d7a484ffecc18353519ac1ebf..7dbf5cc8a33344eaa514a4214134e3c0f1512c82 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt,
 		    const char *reason);
 int intel_engine_reset(struct intel_engine_cs *engine,
 		       const char *reason);
+int __intel_engine_reset_bh(struct intel_engine_cs *engine,
+			    const char *reason);
 
 void __i915_request_reset(struct i915_request *rq, bool guilty);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 4034a4bac7f08669927f5e8d9f1478ca46baf57a..78d1360caa0fdc4902c7d42bf1c379b6706906fa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -5,9 +5,11 @@
  */
 
 #include "gem/i915_gem_object.h"
+
 #include "i915_drv.h"
 #include "i915_vma.h"
 #include "intel_engine.h"
+#include "intel_gpu_commands.h"
 #include "intel_ring.h"
 #include "intel_timeline.h"
 
@@ -40,7 +42,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
 
-	if (vma->obj->stolen)
+	if (i915_gem_object_is_stolen(vma->obj))
 		flags |= PIN_MAPPABLE;
 	else
 		flags |= PIN_HIGH;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ecf3a6118a6d7989a36488672faca28edf37fdc6..4984ff5654240442c9b61ede4192c35289a7a1a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -122,31 +122,27 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
 		hwsp = RING_HWS_PGA(engine->mmio_base);
 	}
 
-	intel_uncore_write(engine->uncore, hwsp, offset);
-	intel_uncore_posting_read(engine->uncore, hwsp);
+	intel_uncore_write_fw(engine->uncore, hwsp, offset);
+	intel_uncore_posting_read_fw(engine->uncore, hwsp);
 }
 
 static void flush_cs_tlb(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	if (!IS_GEN_RANGE(dev_priv, 6, 7))
+	if (!IS_GEN_RANGE(engine->i915, 6, 7))
 		return;
 
 	/* ring should be idle before issuing a sync flush*/
-	drm_WARN_ON(&dev_priv->drm,
-		    (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
-	ENGINE_WRITE(engine, RING_INSTPM,
-		     _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
-					INSTPM_SYNC_FLUSH));
-	if (intel_wait_for_register(engine->uncore,
-				    RING_INSTPM(engine->mmio_base),
-				    INSTPM_SYNC_FLUSH, 0,
-				    1000))
-		drm_err(&dev_priv->drm,
-			"%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-			engine->name);
+	GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+	ENGINE_WRITE_FW(engine, RING_INSTPM,
+			_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+					   INSTPM_SYNC_FLUSH));
+	if (__intel_wait_for_register_fw(engine->uncore,
+					 RING_INSTPM(engine->mmio_base),
+					 INSTPM_SYNC_FLUSH, 0,
+					 2000, 0, NULL))
+		ENGINE_TRACE(engine,
+			     "wait for SyncFlush to complete for TLB invalidation timed out\n");
 }
 
 static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -157,44 +153,6 @@ static void ring_setup_status_page(struct intel_engine_cs *engine)
 	flush_cs_tlb(engine);
 }
 
-static bool stop_ring(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	if (INTEL_GEN(dev_priv) > 2) {
-		ENGINE_WRITE(engine,
-			     RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
-		if (intel_wait_for_register(engine->uncore,
-					    RING_MI_MODE(engine->mmio_base),
-					    MODE_IDLE,
-					    MODE_IDLE,
-					    1000)) {
-			drm_err(&dev_priv->drm,
-				"%s : timed out trying to stop ring\n",
-				engine->name);
-
-			/*
-			 * Sometimes we observe that the idle flag is not
-			 * set even though the ring is empty. So double
-			 * check before giving up.
-			 */
-			if (ENGINE_READ(engine, RING_HEAD) !=
-			    ENGINE_READ(engine, RING_TAIL))
-				return false;
-		}
-	}
-
-	ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
-
-	ENGINE_WRITE(engine, RING_HEAD, 0);
-	ENGINE_WRITE(engine, RING_TAIL, 0);
-
-	/* The ring must be empty before it is disabled */
-	ENGINE_WRITE(engine, RING_CTL, 0);
-
-	return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
 static struct i915_address_space *vm_alias(struct i915_address_space *vm)
 {
 	if (i915_is_ggtt(vm))
@@ -212,9 +170,16 @@ static void set_pp_dir(struct intel_engine_cs *engine)
 {
 	struct i915_address_space *vm = vm_alias(engine->gt->vm);
 
-	if (vm) {
-		ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
-		ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
+	if (!vm)
+		return;
+
+	ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+	ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
+
+	if (INTEL_GEN(engine->i915) >= 7) {
+		ENGINE_WRITE_FW(engine,
+				RING_MODE_GEN7,
+				_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	}
 }
 
@@ -222,38 +187,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct intel_ring *ring = engine->legacy.ring;
-	int ret = 0;
 
 	ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
 		     ring->head, ring->tail);
 
-	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
-
-	/* WaClearRingBufHeadRegAtInit:ctg,elk */
-	if (!stop_ring(engine)) {
-		/* G45 ring initialization often fails to reset head to zero */
-		drm_dbg(&dev_priv->drm, "%s head not reset to zero "
-			"ctl %08x head %08x tail %08x start %08x\n",
-			engine->name,
-			ENGINE_READ(engine, RING_CTL),
-			ENGINE_READ(engine, RING_HEAD),
-			ENGINE_READ(engine, RING_TAIL),
-			ENGINE_READ(engine, RING_START));
-
-		if (!stop_ring(engine)) {
-			drm_err(&dev_priv->drm,
-				"failed to set %s head to zero "
-				"ctl %08x head %08x tail %08x start %08x\n",
-				engine->name,
-				ENGINE_READ(engine, RING_CTL),
-				ENGINE_READ(engine, RING_HEAD),
-				ENGINE_READ(engine, RING_TAIL),
-				ENGINE_READ(engine, RING_START));
-			ret = -EIO;
-			goto out;
-		}
-	}
-
 	if (HWS_NEEDS_PHYSICAL(dev_priv))
 		ring_setup_phys_status_page(engine);
 	else
@@ -270,7 +207,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
 	 * also enforces ordering), otherwise the hw might lose the new ring
 	 * register values.
 	 */
-	ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
+	ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
 
 	/* Check that the ring offsets point within the ring! */
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -280,53 +217,98 @@ static int xcs_resume(struct intel_engine_cs *engine)
 	set_pp_dir(engine);
 
 	/* First wake the ring up to an empty/idle ring */
-	ENGINE_WRITE(engine, RING_HEAD, ring->head);
-	ENGINE_WRITE(engine, RING_TAIL, ring->head);
+	ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+	ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
 	ENGINE_POSTING_READ(engine, RING_TAIL);
 
-	ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
+	ENGINE_WRITE_FW(engine, RING_CTL,
+			RING_CTL_SIZE(ring->size) | RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
-	if (intel_wait_for_register(engine->uncore,
-				    RING_CTL(engine->mmio_base),
-				    RING_VALID, RING_VALID,
-				    50)) {
-		drm_err(&dev_priv->drm, "%s initialization failed "
-			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
-			  engine->name,
-			  ENGINE_READ(engine, RING_CTL),
-			  ENGINE_READ(engine, RING_CTL) & RING_VALID,
-			  ENGINE_READ(engine, RING_HEAD), ring->head,
-			  ENGINE_READ(engine, RING_TAIL), ring->tail,
-			  ENGINE_READ(engine, RING_START),
-			  i915_ggtt_offset(ring->vma));
-		ret = -EIO;
-		goto out;
+	if (__intel_wait_for_register_fw(engine->uncore,
+					 RING_CTL(engine->mmio_base),
+					 RING_VALID, RING_VALID,
+					 5000, 0, NULL)) {
+		drm_err(&dev_priv->drm,
+			"%s initialization failed; "
+			"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+			engine->name,
+			ENGINE_READ(engine, RING_CTL),
+			ENGINE_READ(engine, RING_CTL) & RING_VALID,
+			ENGINE_READ(engine, RING_HEAD), ring->head,
+			ENGINE_READ(engine, RING_TAIL), ring->tail,
+			ENGINE_READ(engine, RING_START),
+			i915_ggtt_offset(ring->vma));
+		return -EIO;
 	}
 
 	if (INTEL_GEN(dev_priv) > 2)
-		ENGINE_WRITE(engine,
-			     RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+		ENGINE_WRITE_FW(engine,
+				RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 
 	/* Now awake, let it get started */
 	if (ring->tail != ring->head) {
-		ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+		ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
 		ENGINE_POSTING_READ(engine, RING_TAIL);
 	}
 
 	/* Papering over lost _interrupts_ immediately following the restart */
 	intel_engine_signal_breadcrumbs(engine);
-out:
-	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+	return 0;
+}
 
-	return ret;
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+	struct intel_timeline *tl;
+
+	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+		intel_timeline_reset_seqno(tl);
 }
 
-static void reset_prepare(struct intel_engine_cs *engine)
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+	/*
+	 * Poison residual state on resume, in case the suspend didn't!
+	 *
+	 * We have to assume that across suspend/resume (or other loss
+	 * of control) that the contents of our pinned buffers has been
+	 * lost, replaced by garbage. Since this doesn't always happen,
+	 * let's poison such state so that we more quickly spot when
+	 * we falsely assume it has been preserved.
+	 */
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+	/*
+	 * The kernel_context HWSP is stored in the status_page. As above,
+	 * that may be lost on resume/initialisation, and so we need to
+	 * reset the value in the HWSP.
+	 */
+	sanitize_hwsp(engine);
+
+	/* And scrub the dirty cachelines for the HWSP */
+	clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static bool stop_ring(struct intel_engine_cs *engine)
 {
-	struct intel_uncore *uncore = engine->uncore;
-	const u32 base = engine->mmio_base;
+	/* Empty the ring by skipping to the end */
+	ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+	ENGINE_POSTING_READ(engine, RING_HEAD);
+
+	/* The ring must be empty before it is disabled */
+	ENGINE_WRITE_FW(engine, RING_CTL, 0);
+	ENGINE_POSTING_READ(engine, RING_CTL);
+
+	/* Then reset the disabled ring */
+	ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+	ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+	return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
 
+static void reset_prepare(struct intel_engine_cs *engine)
+{
 	/*
 	 * We stop engines, otherwise we might get failed reset and a
 	 * dead gpu (on elk). Also as modern gpu as kbl can suffer
@@ -338,30 +320,35 @@ static void reset_prepare(struct intel_engine_cs *engine)
 	 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
 	 *
 	 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+	 * WaClearRingBufHeadRegAtInit:ctg,elk
 	 *
 	 * FIXME: Wa for more modern gens needs to be validated
 	 */
 	ENGINE_TRACE(engine, "\n");
+	intel_engine_stop_cs(engine);
 
-	if (intel_engine_stop_cs(engine))
-		ENGINE_TRACE(engine, "timed out on STOP_RING\n");
-
-	intel_uncore_write_fw(uncore,
-			      RING_HEAD(base),
-			      intel_uncore_read_fw(uncore, RING_TAIL(base)));
-	intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
-	intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
-	intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
-	intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
-	/* The ring must be empty before it is disabled */
-	intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+	if (!stop_ring(engine)) {
+		/* G45 ring initialization often fails to reset head to zero */
+		drm_dbg(&engine->i915->drm,
+			"%s head not reset to zero "
+			"ctl %08x head %08x tail %08x start %08x\n",
+			engine->name,
+			ENGINE_READ_FW(engine, RING_CTL),
+			ENGINE_READ_FW(engine, RING_HEAD),
+			ENGINE_READ_FW(engine, RING_TAIL),
+			ENGINE_READ_FW(engine, RING_START));
+	}
 
-	/* Check acts as a post */
-	if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
-		ENGINE_TRACE(engine, "ring head [%x] not parked\n",
-			     intel_uncore_read_fw(uncore, RING_HEAD(base)));
+	if (!stop_ring(engine)) {
+		drm_err(&engine->i915->drm,
+			"failed to set %s head to zero "
+			"ctl %08x head %08x tail %08x start %08x\n",
+			engine->name,
+			ENGINE_READ_FW(engine, RING_CTL),
+			ENGINE_READ_FW(engine, RING_HEAD),
+			ENGINE_READ_FW(engine, RING_TAIL),
+			ENGINE_READ_FW(engine, RING_START));
+	}
 }
 
 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -372,12 +359,14 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
 	rq = NULL;
 	spin_lock_irqsave(&engine->active.lock, flags);
+	rcu_read_lock();
 	list_for_each_entry(pos, &engine->active.requests, sched.link) {
-		if (!i915_request_completed(pos)) {
+		if (!__i915_request_is_complete(pos)) {
 			rq = pos;
 			break;
 		}
 	}
+	rcu_read_unlock();
 
 	/*
 	 * The guilty request will get skipped on a hung engine.
@@ -441,10 +430,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
 	spin_lock_irqsave(&engine->active.lock, flags);
 
 	/* Mark all submitted requests as skipped. */
-	list_for_each_entry(request, &engine->active.requests, sched.link) {
-		i915_request_set_error_once(request, -EIO);
-		i915_request_mark_complete(request);
-	}
+	list_for_each_entry(request, &engine->active.requests, sched.link)
+		i915_request_mark_eio(request);
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -603,6 +590,7 @@ static int ring_context_pin(struct intel_context *ce, void *unused)
 static void ring_context_reset(struct intel_context *ce)
 {
 	intel_ring_reset(ce->ring, ce->ring->emit);
+	clear_bit(CONTEXT_VALID_BIT, &ce->flags);
 }
 
 static const struct intel_context_ops ring_context_ops = {
@@ -654,9 +642,9 @@ static int load_pd_dir(struct i915_request *rq,
 	return rq->engine->emit_flush(rq, EMIT_FLUSH);
 }
 
-static inline int mi_set_context(struct i915_request *rq,
-				 struct intel_context *ce,
-				 u32 flags)
+static int mi_set_context(struct i915_request *rq,
+			  struct intel_context *ce,
+			  u32 flags)
 {
 	struct intel_engine_cs *engine = rq->engine;
 	struct drm_i915_private *i915 = engine->i915;
@@ -1071,6 +1059,8 @@ static void setup_common(struct intel_engine_cs *engine)
 	setup_irq(engine);
 
 	engine->resume = xcs_resume;
+	engine->sanitize = xcs_sanitize;
+
 	engine->reset.prepare = reset_prepare;
 	engine->reset.rewind = reset_rewind;
 	engine->reset.cancel = reset_cancel;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index b629eeb1400297c08a90ff73a42fec93601f35f2..ee5835c29c03bda633ea898c17c50628e001afb2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -43,7 +43,7 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
 	return mask & ~rps->pm_intrmsk_mbz;
 }
 
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
 {
 	intel_uncore_write_fw(uncore, reg, val);
 }
@@ -400,7 +400,7 @@ static unsigned int gen5_invert_freq(struct intel_rps *rps,
 	return val;
 }
 
-static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+static int __gen5_rps_set(struct intel_rps *rps, u8 val)
 {
 	struct intel_uncore *uncore = rps_to_uncore(rps);
 	u16 rgvswctl;
@@ -410,7 +410,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
 	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
 	if (rgvswctl & MEMCTL_CMD_STS) {
 		DRM_DEBUG("gpu busy, RCS change rejected\n");
-		return false; /* still busy with another command */
+		return -EBUSY; /* still busy with another command */
 	}
 
 	/* Invert the frequency bin into an ips delay */
@@ -426,7 +426,18 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
 	rgvswctl |= MEMCTL_CMD_STS;
 	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
 
-	return true;
+	return 0;
+}
+
+static int gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+	int err;
+
+	spin_lock_irq(&mchdev_lock);
+	err = __gen5_rps_set(rps, val);
+	spin_unlock_irq(&mchdev_lock);
+
+	return err;
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -557,7 +568,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
 			"stuck trying to change perf mode\n");
 	mdelay(1);
 
-	gen5_rps_set(rps, rps->cur_freq);
+	__gen5_rps_set(rps, rps->cur_freq);
 
 	rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
 	rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
@@ -599,7 +610,7 @@ static void gen5_rps_disable(struct intel_rps *rps)
 	intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
 
 	/* Go back to the starting frequency */
-	gen5_rps_set(rps, rps->idle_freq);
+	__gen5_rps_set(rps, rps->idle_freq);
 	mdelay(1);
 	rgvswctl |= MEMCTL_CMD_STS;
 	intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
@@ -797,20 +808,19 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
 	struct drm_i915_private *i915 = rps_to_i915(rps);
 	int err;
 
-	if (INTEL_GEN(i915) < 6)
-		return 0;
-
 	if (val == rps->last_freq)
 		return 0;
 
 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
 		err = vlv_rps_set(rps, val);
-	else
+	else if (INTEL_GEN(i915) >= 6)
 		err = gen6_rps_set(rps, val);
+	else
+		err = gen5_rps_set(rps, val);
 	if (err)
 		return err;
 
-	if (update)
+	if (update && INTEL_GEN(i915) >= 6)
 		gen6_rps_set_thresholds(rps, val);
 	rps->last_freq = val;
 
@@ -852,6 +862,8 @@ void intel_rps_park(struct intel_rps *rps)
 {
 	int adj;
 
+	GEM_BUG_ON(atomic_read(&rps->num_waiters));
+
 	if (!intel_rps_clear_active(rps))
 		return;
 
@@ -907,28 +919,27 @@ void intel_rps_park(struct intel_rps *rps)
 
 void intel_rps_boost(struct i915_request *rq)
 {
-	struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
-	unsigned long flags;
-
-	if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
+	if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
 		return;
 
 	/* Serializes with i915_request_retire() */
-	spin_lock_irqsave(&rq->lock, flags);
-	if (!i915_request_has_waitboost(rq) &&
-	    !dma_fence_is_signaled_locked(&rq->fence)) {
-		set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+	if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
+		struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+
+		if (atomic_fetch_inc(&rps->num_waiters))
+			return;
+
+		if (!intel_rps_is_active(rps))
+			return;
 
 		GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
 			 rq->fence.context, rq->fence.seqno);
 
-		if (!atomic_fetch_inc(&rps->num_waiters) &&
-		    READ_ONCE(rps->cur_freq) < rps->boost_freq)
+		if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
 			schedule_work(&rps->work);
 
-		atomic_inc(&rps->boosts);
+		WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
 	}
-	spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 int intel_rps_set(struct intel_rps *rps, u8 val)
@@ -1798,7 +1809,7 @@ void gen5_rps_irq_handler(struct intel_rps *rps)
 			 rps->min_freq_softlimit,
 			 rps->max_freq_softlimit);
 
-	if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+	if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
 		rps->cur_freq = new_freq;
 
 	spin_unlock(&mchdev_lock);
@@ -2109,7 +2120,7 @@ bool i915_gpu_turbo_disable(void)
 
 	spin_lock_irq(&mchdev_lock);
 	rps->max_freq_softlimit = rps->min_freq;
-	ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+	ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
 	spin_unlock_irq(&mchdev_lock);
 
 	drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 38083f0402d9cc7f0e4fda9f35ac059b7345aef5..029fe13cf303ebcd4f093afaf390a7dd685910b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -93,7 +93,7 @@ struct intel_rps {
 	} power;
 
 	atomic_t num_waiters;
-	atomic_t boosts;
+	unsigned int boosts;
 
 	/* manual wa residency calculations */
 	struct intel_rps_ei ei;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 8015964043eb7ad4e0bce14f88373dbd2577f6f3..037b0e3ccbedfc5cde9f72dbc963f67b8ab3f417 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -319,6 +319,25 @@ __intel_timeline_create(struct intel_gt *gt,
 	return timeline;
 }
 
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+				  unsigned int offset)
+{
+	struct i915_vma *hwsp = engine->status_page.vma;
+	struct intel_timeline *tl;
+
+	tl = __intel_timeline_create(engine->gt, hwsp, offset);
+	if (IS_ERR(tl))
+		return tl;
+
+	/* Borrow a nearby lock; we only create these timelines during init */
+	mutex_lock(&hwsp->vm->mutex);
+	list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+	mutex_unlock(&hwsp->vm->mutex);
+
+	return tl;
+}
+
 void __intel_timeline_pin(struct intel_timeline *tl)
 {
 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
@@ -563,11 +582,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 
 	rcu_read_lock();
 	cl = rcu_dereference(from->hwsp_cacheline);
-	if (i915_request_completed(from)) /* confirm cacheline is valid */
+	if (i915_request_signaled(from)) /* confirm cacheline is valid */
 		goto unlock;
 	if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
 		goto unlock; /* seqno wrapped and completed! */
-	if (unlikely(i915_request_completed(from)))
+	if (unlikely(__i915_request_is_complete(from)))
 		goto release;
 	rcu_read_unlock();
 
@@ -615,6 +634,86 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
 }
 
+void intel_gt_show_timelines(struct intel_gt *gt,
+			     struct drm_printer *m,
+			     void (*show_request)(struct drm_printer *m,
+						  const struct i915_request *rq,
+						  const char *prefix,
+						  int indent))
+{
+	struct intel_gt_timelines *timelines = &gt->timelines;
+	struct intel_timeline *tl, *tn;
+	LIST_HEAD(free);
+
+	spin_lock(&timelines->lock);
+	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+		unsigned long count, ready, inflight;
+		struct i915_request *rq, *rn;
+		struct dma_fence *fence;
+
+		if (!mutex_trylock(&tl->mutex)) {
+			drm_printf(m, "Timeline %llx: busy; skipping\n",
+				   tl->fence_context);
+			continue;
+		}
+
+		intel_timeline_get(tl);
+		GEM_BUG_ON(!atomic_read(&tl->active_count));
+		atomic_inc(&tl->active_count); /* pin the list element */
+		spin_unlock(&timelines->lock);
+
+		count = 0;
+		ready = 0;
+		inflight = 0;
+		list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+			if (i915_request_completed(rq))
+				continue;
+
+			count++;
+			if (i915_request_is_ready(rq))
+				ready++;
+			if (i915_request_is_active(rq))
+				inflight++;
+		}
+
+		drm_printf(m, "Timeline %llx: { ", tl->fence_context);
+		drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
+			   count, ready, inflight);
+		drm_printf(m, ", seqno: { current: %d, last: %d }",
+			   *tl->hwsp_seqno, tl->seqno);
+		fence = i915_active_fence_get(&tl->last_request);
+		if (fence) {
+			drm_printf(m, ", engine: %s",
+				   to_request(fence)->engine->name);
+			dma_fence_put(fence);
+		}
+		drm_printf(m, " }\n");
+
+		if (show_request) {
+			list_for_each_entry_safe(rq, rn, &tl->requests, link)
+				show_request(m, rq, "", 2);
+		}
+
+		mutex_unlock(&tl->mutex);
+		spin_lock(&timelines->lock);
+
+		/* Resume list iteration after reacquiring spinlock */
+		list_safe_reset_next(tl, tn, link);
+		if (atomic_dec_and_test(&tl->active_count))
+			list_del(&tl->link);
+
+		/* Defer the final release to after the spinlock */
+		if (refcount_dec_and_test(&tl->kref.refcount)) {
+			GEM_BUG_ON(atomic_read(&tl->active_count));
+			list_add(&tl->link, &free);
+		}
+	}
+	spin_unlock(&timelines->lock);
+
+	list_for_each_entry_safe(tl, tn, &free, link)
+		__intel_timeline_free(&tl->kref);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "gt/selftests/mock_timeline.c"
 #include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 9882cd911d8edf6e4a383f269459e5fd4e3cb7f2..dcdee692a80e72f17dd07acaad62394a35f7a0e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,6 +31,8 @@
 #include "i915_syncmap.h"
 #include "intel_timeline_types.h"
 
+struct drm_printer;
+
 struct intel_timeline *
 __intel_timeline_create(struct intel_gt *gt,
 			struct i915_vma *global_hwsp,
@@ -42,14 +44,9 @@ intel_timeline_create(struct intel_gt *gt)
 	return __intel_timeline_create(gt, NULL, 0);
 }
 
-static inline struct intel_timeline *
+struct intel_timeline *
 intel_timeline_create_from_engine(struct intel_engine_cs *engine,
-				  unsigned int offset)
-{
-	return __intel_timeline_create(engine->gt,
-				       engine->status_page.vma,
-				       offset);
-}
+				  unsigned int offset);
 
 static inline struct intel_timeline *
 intel_timeline_get(struct intel_timeline *timeline)
@@ -106,4 +103,18 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 void intel_gt_init_timelines(struct intel_gt *gt);
 void intel_gt_fini_timelines(struct intel_gt *gt);
 
+void intel_gt_show_timelines(struct intel_gt *gt,
+			     struct drm_printer *m,
+			     void (*show_request)(struct drm_printer *m,
+						  const struct i915_request *rq,
+						  const char *prefix,
+						  int indent));
+
+static inline bool
+intel_timeline_is_last(const struct intel_timeline *tl,
+		       const struct i915_request *rq)
+{
+	return list_is_last_rcu(&rq->link, &tl->requests);
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 4474f487f58991ad0bf133a7ee6e8bf54bb8ea79..e360f50706bf040278c4198935dd941aa8f63af7 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -84,6 +84,8 @@ struct intel_timeline {
 	struct list_head link;
 	struct intel_gt *gt;
 
+	struct list_head engine_link;
+
 	struct kref kref;
 	struct rcu_head rcu;
 };
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index adc9a8ea410a3a2242c454c2347a41728375b0da..ec366cf9ef561fdc706a23a725c94dbc6619c4a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
 #include "intel_gt.h"
 #include "intel_ring.h"
 #include "intel_workarounds.h"
@@ -194,7 +195,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
 }
 
 static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
+wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
 {
 	wa_add(wal, reg, clear, set, clear);
 }
@@ -202,21 +203,32 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
 static void
 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
 {
-	wa_write_masked_or(wal, reg, ~0, set);
+	wa_write_clr_set(wal, reg, ~0, set);
 }
 
 static void
 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
 {
-	wa_write_masked_or(wal, reg, set, set);
+	wa_write_clr_set(wal, reg, set, set);
 }
 
 static void
 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
 {
-	wa_write_masked_or(wal, reg, clr, 0);
+	wa_write_clr_set(wal, reg, clr, 0);
 }
 
+/*
+ * WA operations on "masked register". A masked register has the upper 16 bits
+ * documented as "masked" in b-spec. Its purpose is to allow writing to just a
+ * portion of the register without a rmw: you simply write in the upper 16 bits
+ * the mask of bits you are going to modify.
+ *
+ * The wa_masked_* family of functions already does the necessary operations to
+ * calculate the mask based on the parameters passed, so user only has to
+ * provide the lower 16 bits of that register.
+ */
+
 static void
 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
@@ -229,38 +241,36 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 	wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
 }
 
-#define WA_SET_BIT_MASKED(addr, mask) \
-	wa_masked_en(wal, (addr), (mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
-	wa_masked_dis(wal, (addr), (mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
-	wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void
+wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
+		    u32 mask, u32 val)
+{
+	wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
+}
 
 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
 				      struct i915_wa_list *wal)
 {
-	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
 }
 
 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
 				      struct i915_wa_list *wal)
 {
-	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
 }
 
 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
 				      struct i915_wa_list *wal)
 {
-	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
 
 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
-	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+	wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
 
 	/* WaDisablePartialInstShootdown:bdw,chv */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+	wa_masked_en(wal, GEN8_ROW_CHICKEN,
+		     PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
 	/* Use Force Non-Coherent whenever executing a 3D context. This is a
 	 * workaround for for a possible hang in the unlikely event a TLB
@@ -268,9 +278,9 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 */
 	/* WaForceEnableNonCoherent:bdw,chv */
 	/* WaHdcDisableFetchWhenMasked:bdw,chv */
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
-			  HDC_FORCE_NON_COHERENT);
+	wa_masked_en(wal, HDC_CHICKEN0,
+		     HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+		     HDC_FORCE_NON_COHERENT);
 
 	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
 	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
@@ -280,10 +290,10 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 *
 	 * This optimization is off by default for BDW and CHV; turn it on.
 	 */
-	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+	wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
 
 	/* Wa4x4STCOptimizationDisable:bdw,chv */
-	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+	wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
 
 	/*
 	 * BSpec recommends 8x4 when MSAA is used,
@@ -293,7 +303,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 * disable bit, which we don't touch here, but it's good
 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
 	 */
-	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+	wa_masked_field_set(wal, GEN7_GT_MODE,
 			    GEN6_WIZ_HASHING_MASK,
 			    GEN6_WIZ_HASHING_16x4);
 }
@@ -306,24 +316,24 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
 	gen8_ctx_workarounds_init(engine, wal);
 
 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+	wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 
 	/* WaDisableDopClockGating:bdw
 	 *
 	 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
 	 * to disable EUTC clock gating.
 	 */
-	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
-			  DOP_CLOCK_GATING_DISABLE);
+	wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+		     DOP_CLOCK_GATING_DISABLE);
 
-	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-			  GEN8_SAMPLER_POWER_BYPASS_DIS);
+	wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+		     GEN8_SAMPLER_POWER_BYPASS_DIS);
 
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  /* WaForceContextSaveRestoreNonCoherent:bdw */
-			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
-			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+	wa_masked_en(wal, HDC_CHICKEN0,
+		     /* WaForceContextSaveRestoreNonCoherent:bdw */
+		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+		     /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+		     (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 }
 
 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -332,10 +342,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
 	gen8_ctx_workarounds_init(engine, wal);
 
 	/* WaDisableThreadStallDopClockGating:chv */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+	wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 
 	/* Improve HiZ throughput on CHV. */
-	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+	wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
 }
 
 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -349,38 +359,38 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
 		 * Must match Display Engine. See
 		 * WaCompressedResourceDisplayNewHashMode.
 		 */
-		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
-		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+			     GEN9_PBE_COMPRESSED_HASH_SELECTION);
+		wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+			     GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
 	}
 
 	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
 	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-			  FLOW_CONTROL_ENABLE |
-			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+	wa_masked_en(wal, GEN8_ROW_CHICKEN,
+		     FLOW_CONTROL_ENABLE |
+		     PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
 	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-			  GEN9_ENABLE_YV12_BUGFIX |
-			  GEN9_ENABLE_GPGPU_PREEMPTION);
+	wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+		     GEN9_ENABLE_YV12_BUGFIX |
+		     GEN9_ENABLE_GPGPU_PREEMPTION);
 
 	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
 	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(CACHE_MODE_1,
-			  GEN8_4x4_STC_OPTIMIZATION_DISABLE |
-			  GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+	wa_masked_en(wal, CACHE_MODE_1,
+		     GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+		     GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
 
 	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
-	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
-			  GEN9_CCS_TLB_PREFETCH_ENABLE);
+	wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
+		      GEN9_CCS_TLB_PREFETCH_ENABLE);
 
 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
-			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+	wa_masked_en(wal, HDC_CHICKEN0,
+		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+		     HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
 
 	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
 	 * both tied to WaForceContextSaveRestoreNonCoherent
@@ -396,19 +406,19 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 */
 
 	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  HDC_FORCE_NON_COHERENT);
+	wa_masked_en(wal, HDC_CHICKEN0,
+		     HDC_FORCE_NON_COHERENT);
 
 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
 	if (IS_SKYLAKE(i915) ||
 	    IS_KABYLAKE(i915) ||
 	    IS_COFFEELAKE(i915) ||
 	    IS_COMETLAKE(i915))
-		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-				  GEN8_SAMPLER_POWER_BYPASS_DIS);
+		wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+			     GEN8_SAMPLER_POWER_BYPASS_DIS);
 
 	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
-	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+	wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
 	/*
 	 * Supporting preemption with fine-granularity requires changes in the
@@ -422,16 +432,16 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 */
 
 	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
-	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+	wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
 
 	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
-	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
 	if (IS_GEN9_LP(i915))
-		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+		wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
 }
 
 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
@@ -465,7 +475,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
 		return;
 
 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
-	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+	wa_masked_field_set(wal, GEN7_GT_MODE,
 			    GEN9_IZ_HASHING_MASK(2) |
 			    GEN9_IZ_HASHING_MASK(1) |
 			    GEN9_IZ_HASHING_MASK(0),
@@ -487,12 +497,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
 	gen9_ctx_workarounds_init(engine, wal);
 
 	/* WaDisableThreadStallDopClockGating:bxt */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-			  STALL_DOP_GATING_DISABLE);
+	wa_masked_en(wal, GEN8_ROW_CHICKEN,
+		     STALL_DOP_GATING_DISABLE);
 
 	/* WaToEnableHwFixForPushConstHWBug:bxt */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 }
 
 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -504,12 +514,12 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
 
 	/* WaToEnableHwFixForPushConstHWBug:kbl */
 	if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
-		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+			     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
-	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
-			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+		     GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 }
 
 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -518,8 +528,8 @@ static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
 	gen9_ctx_workarounds_init(engine, wal);
 
 	/* WaToEnableHwFixForPushConstHWBug:glk */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 }
 
 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -528,41 +538,41 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
 	gen9_ctx_workarounds_init(engine, wal);
 
 	/* WaToEnableHwFixForPushConstHWBug:cfl */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
-	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
-			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+		     GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 }
 
 static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
 				     struct i915_wa_list *wal)
 {
 	/* WaForceContextSaveRestoreNonCoherent:cnl */
-	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
-			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+	wa_masked_en(wal, CNL_HDC_CHICKEN0,
+		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
 
 	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaPushConstantDereferenceHoldDisable:cnl */
-	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+	wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
 
 	/* FtrEnableFastAnisoL1BankingFix:cnl */
-	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+	wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
 
 	/* WaDisable3DMidCmdPreemption:cnl */
-	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+	wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
 
 	/* WaDisableGPGPUMidCmdPreemption:cnl */
-	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
 	/* WaDisableEarlyEOT:cnl */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+	wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
 }
 
 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -580,8 +590,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 * Formerly known as WaPushConstantDereferenceHoldDisable
 	 */
 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
-		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
-				  PUSH_CONSTANT_DEREF_DISABLE);
+		wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+			     PUSH_CONSTANT_DEREF_DISABLE);
 
 	/* WaForceEnableNonCoherent:icl
 	 * This is not the same workaround as in early Gen9 platforms, where
@@ -590,40 +600,40 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 * (the register is whitelisted in hardware now, so UMDs can opt in
 	 * for coherency if they have a good reason).
 	 */
-	WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+	wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
 
 	/* Wa_2006611047:icl (pre-prod)
 	 * Formerly known as WaDisableImprovedTdlClkGating
 	 */
 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
-		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
-				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+		wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+			     GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
 
 	/* Wa_2006665173:icl (pre-prod) */
 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
-		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
-				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+		wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+			     GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
 
 	/* WaEnableFloatBlendOptimization:icl */
-	wa_write_masked_or(wal,
-			   GEN10_CACHE_MODE_SS,
-			   0, /* write-only, so skip validation */
-			   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+	wa_write_clr_set(wal,
+			 GEN10_CACHE_MODE_SS,
+			 0, /* write-only, so skip validation */
+			 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
 
 	/* WaDisableGPGPUMidThreadPreemption:icl */
-	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
 
 	/* allow headerless messages for preemptible GPGPU context */
-	WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
-			  GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+	wa_masked_en(wal, GEN10_SAMPLER_MODE,
+		     GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
 
 	/* Wa_1604278689:icl,ehl */
 	wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
-	wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
-			   0, /* write-only register; skip validation */
-			   0xFFFFFFFF);
+	wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
+			 0, /* write-only register; skip validation */
+			 0xFFFFFFFF);
 
 	/* Wa_1406306137:icl,ehl */
 	wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
@@ -643,11 +653,11 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 * Wa_14010443199:rkl
 	 * Wa_14010698770:rkl
 	 */
-	WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
-			  GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+	wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+		     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
 
 	/* WaDisableGPGPUMidThreadPreemption:gen12 */
-	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
 }
@@ -680,12 +690,22 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
 	gen12_ctx_workarounds_init(engine, wal);
 
 	/* Wa_1409044764 */
-	WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
-			  DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
+	wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
+		      DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
 
 	/* Wa_22010493298 */
-	WA_SET_BIT_MASKED(HIZ_CHICKEN,
-			  DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+	wa_masked_en(wal, HIZ_CHICKEN,
+		     DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+
+	/*
+	 * Wa_16011163337
+	 *
+	 * Like in tgl_ctx_workarounds_init(), read verification is ignored due
+	 * to Wa_1608008084.
+	 */
+	wa_add(wal,
+	       FF_MODE2,
+	       FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
 }
 
 static void
@@ -804,57 +824,11 @@ ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 static void
 snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
-	wa_masked_en(wal,
-		     _3D_CHICKEN,
-		     _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
-
-	/* WaDisable_RenderCache_OperationalFlush:snb */
-	wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
-	/*
-	 * BSpec recommends 8x4 when MSAA is used,
-	 * however in practice 16x4 seems fastest.
-	 *
-	 * Note that PS/WM thread counts depend on the WIZ hashing
-	 * disable bit, which we don't touch here, but it's good
-	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-	 */
-	wa_add(wal,
-	       GEN6_GT_MODE, 0,
-	       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
-	       GEN6_WIZ_HASHING_16x4);
-
-	wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
-
-	wa_masked_en(wal,
-		     _3D_CHICKEN3,
-		     /* WaStripsFansDisableFastClipPerformanceFix:snb */
-		     _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
-		     /*
-		      * Bspec says:
-		      * "This bit must be set if 3DSTATE_CLIP clip mode is set
-		      * to normal and 3DSTATE_SF number of SF output attributes
-		      * is more than 16."
-		      */
-		   _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
 }
 
 static void
 ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-	/* WaDisableEarlyCull:ivb */
-	wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
-	/* WaDisablePSDDualDispatchEnable:ivb */
-	if (IS_IVB_GT1(i915))
-		wa_masked_en(wal,
-			     GEN7_HALF_SLICE_CHICKEN1,
-			     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
-	/* WaDisable_RenderCache_OperationalFlush:ivb */
-	wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
 	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
 	wa_masked_dis(wal,
 		      GEN7_COMMON_SLICE_CHICKEN1,
@@ -866,90 +840,14 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 
 	/* WaForceL3Serialization:ivb */
 	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
-	/*
-	 * WaVSThreadDispatchOverride:ivb,vlv
-	 *
-	 * This actually overrides the dispatch
-	 * mode for all thread types.
-	 */
-	wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
-			   GEN7_FF_SCHED_MASK,
-			   GEN7_FF_TS_SCHED_HW |
-			   GEN7_FF_VS_SCHED_HW |
-			   GEN7_FF_DS_SCHED_HW);
-
-	if (0) { /* causes HiZ corruption on ivb:gt1 */
-		/* enable HiZ Raw Stall Optimization */
-		wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
-	}
-
-	/* WaDisable4x2SubspanOptimization:ivb */
-	wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
-	/*
-	 * BSpec recommends 8x4 when MSAA is used,
-	 * however in practice 16x4 seems fastest.
-	 *
-	 * Note that PS/WM thread counts depend on the WIZ hashing
-	 * disable bit, which we don't touch here, but it's good
-	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-	 */
-	wa_add(wal, GEN7_GT_MODE, 0,
-	       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
-	       GEN6_WIZ_HASHING_16x4);
 }
 
 static void
 vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-	/* WaDisableEarlyCull:vlv */
-	wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
-	/* WaPsdDispatchEnable:vlv */
-	/* WaDisablePSDDualDispatchEnable:vlv */
-	wa_masked_en(wal,
-		     GEN7_HALF_SLICE_CHICKEN1,
-		     GEN7_MAX_PS_THREAD_DEP |
-		     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
-	/* WaDisable_RenderCache_OperationalFlush:vlv */
-	wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
 	/* WaForceL3Serialization:vlv */
 	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
-	/*
-	 * WaVSThreadDispatchOverride:ivb,vlv
-	 *
-	 * This actually overrides the dispatch
-	 * mode for all thread types.
-	 */
-	wa_write_masked_or(wal,
-			   GEN7_FF_THREAD_MODE,
-			   GEN7_FF_SCHED_MASK,
-			   GEN7_FF_TS_SCHED_HW |
-			   GEN7_FF_VS_SCHED_HW |
-			   GEN7_FF_DS_SCHED_HW);
-
-	/*
-	 * BSpec says this must be set, even though
-	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
-	 */
-	wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
-	/*
-	 * BSpec recommends 8x4 when MSAA is used,
-	 * however in practice 16x4 seems fastest.
-	 *
-	 * Note that PS/WM thread counts depend on the WIZ hashing
-	 * disable bit, which we don't touch here, but it's good
-	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-	 */
-	wa_add(wal, GEN7_GT_MODE, 0,
-	       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
-	       GEN6_WIZ_HASHING_16x4);
-
 	/*
 	 * WaIncreaseL3CreditsForVLVB0:vlv
 	 * This is the hardware default actually.
@@ -970,31 +868,6 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 
 	/* WaVSRefCountFullforceMissDisable:hsw */
 	wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
-
-	wa_masked_dis(wal,
-		      CACHE_MODE_0_GEN7,
-		      /* WaDisable_RenderCache_OperationalFlush:hsw */
-		      RC_OP_FLUSH_ENABLE |
-		      /* enable HiZ Raw Stall Optimization */
-		      HIZ_RAW_STALL_OPT_DISABLE);
-
-	/* WaDisable4x2SubspanOptimization:hsw */
-	wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
-	/*
-	 * BSpec recommends 8x4 when MSAA is used,
-	 * however in practice 16x4 seems fastest.
-	 *
-	 * Note that PS/WM thread counts depend on the WIZ hashing
-	 * disable bit, which we don't touch here, but it's good
-	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-	 */
-	wa_add(wal, GEN7_GT_MODE, 0,
-	       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
-	       GEN6_WIZ_HASHING_16x4);
-
-	/* WaSampleCChickenBitEnable:hsw */
-	wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
 }
 
 static void
@@ -1164,7 +1037,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
 
 	drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
 
-	wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+	wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
 }
 
 static void
@@ -1189,10 +1062,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 
 	/* WaModifyGamTlbPartitioning:icl */
-	wa_write_masked_or(wal,
-			   GEN11_GACB_PERF_CTRL,
-			   GEN11_HASH_CTRL_MASK,
-			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+	wa_write_clr_set(wal,
+			 GEN11_GACB_PERF_CTRL,
+			 GEN11_HASH_CTRL_MASK,
+			 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
 
 	/* Wa_1405766107:icl
 	 * Formerly known as WaCL2SFHalfMaxAlloc
@@ -1260,6 +1133,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 		wa_write_or(wal,
 			    SLICE_UNIT_LEVEL_CLKGATE,
 			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+	/* Wa_1408615072:tgl[a0] */
+	if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+			    VSUNIT_CLKGATE_DIS_TGL);
 }
 
 static void
@@ -1358,9 +1236,9 @@ static bool
 wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
 {
 	if ((cur ^ wa->set) & wa->read) {
-		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
+		DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
 			  name, from, i915_mmio_reg_offset(wa->reg),
-			  cur, cur & wa->read, wa->set);
+			  cur, cur & wa->read, wa->set & wa->read);
 
 		return false;
 	}
@@ -1425,7 +1303,8 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
 	return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
 }
 
-static inline bool is_nonpriv_flags_valid(u32 flags)
+__maybe_unused
+static bool is_nonpriv_flags_valid(u32 flags)
 {
 	/* Check only valid flag bits are set */
 	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
@@ -1752,10 +1631,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 		wa_write_or(wal,
 			    GEN7_SARCHKMD,
 			    GEN7_DISABLE_SAMPLER_PREFETCH);
-
-		/* Wa_1408615072:tgl */
-		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
-			    VSUNIT_CLKGATE_DIS_TGL);
 	}
 
 	if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
@@ -1770,6 +1645,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 		 */
 		wa_write_or(wal, GEN7_FF_THREAD_MODE,
 			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+		/*
+		 * Wa_1606700617:tgl,dg1
+		 * Wa_22010271021:tgl,rkl,dg1
+		 */
+		wa_masked_en(wal,
+			     GEN9_CS_DEBUG_MODE1,
+			     FF_DOP_CLOCK_GATE_DISABLE);
+
+		/* Wa_1406941453:tgl,rkl,dg1 */
+		wa_masked_en(wal,
+			     GEN10_SAMPLER_MODE,
+			     ENABLE_SMALLPL);
 	}
 
 	if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
@@ -1798,21 +1686,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 			     GEN6_RC_SLEEP_PSMI_CONTROL,
 			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
 			     GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-
-		/*
-		 * Wa_1606700617:tgl
-		 * Wa_22010271021:tgl,rkl
-		 */
-		wa_masked_en(wal,
-			     GEN9_CS_DEBUG_MODE1,
-			     FF_DOP_CLOCK_GATE_DISABLE);
-	}
-
-	if (IS_GEN(i915, 12)) {
-		/* Wa_1406941453:gen12 */
-		wa_masked_en(wal,
-			     GEN10_SAMPLER_MODE,
-			     ENABLE_SMALLPL);
 	}
 
 	if (IS_GEN(i915, 11)) {
@@ -1838,14 +1711,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 		 * Wa_1604223664:icl
 		 * Formerly known as WaL3BankAddressHashing
 		 */
-		wa_write_masked_or(wal,
-				   GEN8_GARBCNTL,
-				   GEN11_HASH_CTRL_EXCL_MASK,
-				   GEN11_HASH_CTRL_EXCL_BIT0);
-		wa_write_masked_or(wal,
-				   GEN11_GLBLINVL,
-				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
-				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+		wa_write_clr_set(wal,
+				 GEN8_GARBCNTL,
+				 GEN11_HASH_CTRL_EXCL_MASK,
+				 GEN11_HASH_CTRL_EXCL_BIT0);
+		wa_write_clr_set(wal,
+				 GEN11_GLBLINVL,
+				 GEN11_BANK_HASH_ADDR_EXCL_MASK,
+				 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
 
 		/*
 		 * Wa_1405733216:icl
@@ -1874,10 +1747,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 			    GEN7_DISABLE_SAMPLER_PREFETCH);
 
 		/* Wa_1409178092:icl */
-		wa_write_masked_or(wal,
-				   GEN11_SCRATCH2,
-				   GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
-				   0);
+		wa_write_clr_set(wal,
+				 GEN11_SCRATCH2,
+				 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+				 0);
 
 		/* WaEnable32PlaneMode:icl */
 		wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
@@ -1951,24 +1824,132 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 
 		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
 		if (IS_GEN9_LP(i915))
-			wa_write_masked_or(wal,
-					   GEN8_L3SQCREG1,
-					   L3_PRIO_CREDITS_MASK,
-					   L3_GENERAL_PRIO_CREDITS(62) |
-					   L3_HIGH_PRIO_CREDITS(2));
+			wa_write_clr_set(wal,
+					 GEN8_L3SQCREG1,
+					 L3_PRIO_CREDITS_MASK,
+					 L3_GENERAL_PRIO_CREDITS(62) |
+					 L3_HIGH_PRIO_CREDITS(2));
 
 		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
 		wa_write_or(wal,
 			    GEN8_L3SQCREG4,
 			    GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+		/* Disable atomics in L3 to prevent unrecoverable hangs */
+		wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
+				 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+		wa_write_clr_set(wal, GEN8_L3SQCREG4,
+				 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+		wa_write_clr_set(wal, GEN9_SCRATCH1,
+				 EVICTION_PERF_FIX_ENABLE, 0);
+	}
+
+	if (IS_HASWELL(i915)) {
+		/* WaSampleCChickenBitEnable:hsw */
+		wa_masked_en(wal,
+			     HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+
+		wa_masked_dis(wal,
+			      CACHE_MODE_0_GEN7,
+			      /* enable HiZ Raw Stall Optimization */
+			      HIZ_RAW_STALL_OPT_DISABLE);
+
+		/* WaDisable4x2SubspanOptimization:hsw */
+		wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+	}
+
+	if (IS_VALLEYVIEW(i915)) {
+		/* WaDisableEarlyCull:vlv */
+		wa_masked_en(wal,
+			     _3D_CHICKEN3,
+			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+		/*
+		 * WaVSThreadDispatchOverride:ivb,vlv
+		 *
+		 * This actually overrides the dispatch
+		 * mode for all thread types.
+		 */
+		wa_write_clr_set(wal,
+				 GEN7_FF_THREAD_MODE,
+				 GEN7_FF_SCHED_MASK,
+				 GEN7_FF_TS_SCHED_HW |
+				 GEN7_FF_VS_SCHED_HW |
+				 GEN7_FF_DS_SCHED_HW);
+
+		/* WaPsdDispatchEnable:vlv */
+		/* WaDisablePSDDualDispatchEnable:vlv */
+		wa_masked_en(wal,
+			     GEN7_HALF_SLICE_CHICKEN1,
+			     GEN7_MAX_PS_THREAD_DEP |
+			     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
 	}
 
-	if (IS_GEN(i915, 7))
+	if (IS_IVYBRIDGE(i915)) {
+		/* WaDisableEarlyCull:ivb */
+		wa_masked_en(wal,
+			     _3D_CHICKEN3,
+			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+		if (0) { /* causes HiZ corruption on ivb:gt1 */
+			/* enable HiZ Raw Stall Optimization */
+			wa_masked_dis(wal,
+				      CACHE_MODE_0_GEN7,
+				      HIZ_RAW_STALL_OPT_DISABLE);
+		}
+
+		/*
+		 * WaVSThreadDispatchOverride:ivb,vlv
+		 *
+		 * This actually overrides the dispatch
+		 * mode for all thread types.
+		 */
+		wa_write_clr_set(wal,
+				 GEN7_FF_THREAD_MODE,
+				 GEN7_FF_SCHED_MASK,
+				 GEN7_FF_TS_SCHED_HW |
+				 GEN7_FF_VS_SCHED_HW |
+				 GEN7_FF_DS_SCHED_HW);
+
+		/* WaDisablePSDDualDispatchEnable:ivb */
+		if (IS_IVB_GT1(i915))
+			wa_masked_en(wal,
+				     GEN7_HALF_SLICE_CHICKEN1,
+				     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+	}
+
+	if (IS_GEN(i915, 7)) {
 		/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
 		wa_masked_en(wal,
 			     GFX_MODE_GEN7,
 			     GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
 
+		/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+		wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+		/*
+		 * BSpec says this must be set, even though
+		 * WaDisable4x2SubspanOptimization:ivb,hsw
+		 * WaDisable4x2SubspanOptimization isn't listed for VLV.
+		 */
+		wa_masked_en(wal,
+			     CACHE_MODE_1,
+			     PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+		/*
+		 * BSpec recommends 8x4 when MSAA is used,
+		 * however in practice 16x4 seems fastest.
+		 *
+		 * Note that PS/WM thread counts depend on the WIZ hashing
+		 * disable bit, which we don't touch here, but it's good
+		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+		 */
+		wa_add(wal, GEN7_GT_MODE, 0,
+		       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
+				     GEN6_WIZ_HASHING_16x4),
+		       GEN6_WIZ_HASHING_16x4);
+	}
+
 	if (IS_GEN_RANGE(i915, 6, 7))
 		/*
 		 * We need to disable the AsyncFlip performance optimisations in
@@ -1991,6 +1972,39 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 			     GFX_MODE,
 			     GFX_TLB_INVALIDATE_EXPLICIT);
 
+		/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+		wa_masked_en(wal,
+			     _3D_CHICKEN,
+			     _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+		wa_masked_en(wal,
+			     _3D_CHICKEN3,
+			     /* WaStripsFansDisableFastClipPerformanceFix:snb */
+			     _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+			     /*
+			      * Bspec says:
+			      * "This bit must be set if 3DSTATE_CLIP clip mode is set
+			      * to normal and 3DSTATE_SF number of SF output attributes
+			      * is more than 16."
+			      */
+			     _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+
+		/*
+		 * BSpec recommends 8x4 when MSAA is used,
+		 * however in practice 16x4 seems fastest.
+		 *
+		 * Note that PS/WM thread counts depend on the WIZ hashing
+		 * disable bit, which we don't touch here, but it's good
+		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+		 */
+		wa_add(wal,
+		       GEN6_GT_MODE, 0,
+		       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+		       GEN6_WIZ_HASHING_16x4);
+
+		/* WaDisable_RenderCache_OperationalFlush:snb */
+		wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
 		/*
 		 * From the Sandybridge PRM, volume 1 part 3, page 24:
 		 * "If this bit is set, STCunit will have LRA as replacement
@@ -2067,39 +2081,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
 	wa_list_apply(engine->uncore, &engine->wa_list);
 }
 
-static struct i915_vma *
-create_scratch(struct i915_address_space *vm, int count)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	unsigned int size;
-	int err;
-
-	size = round_up(count * sizeof(u32), PAGE_SIZE);
-	obj = i915_gem_object_create_internal(vm->i915, size);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
-	vma = i915_vma_instance(obj, vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_obj;
-	}
-
-	err = i915_vma_pin(vma, 0, 0,
-			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
-	if (err)
-		goto err_obj;
-
-	return vma;
-
-err_obj:
-	i915_gem_object_put(obj);
-	return ERR_PTR(err);
-}
-
 struct mcr_range {
 	u32 start;
 	u32 end;
@@ -2202,7 +2183,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
 	if (!wal->count)
 		return 0;
 
-	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
+	vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
+					   wal->count * sizeof(u32));
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2f830017c51d994aa28b8ae5aeba864a0e66ddfb..4b4f03b70df7a607ec193e8315dbc75d6199434b 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	GEM_BUG_ON(stalled);
 }
 
-static void mark_eio(struct i915_request *rq)
-{
-	if (i915_request_completed(rq))
-		return;
-
-	GEM_BUG_ON(i915_request_signaled(rq));
-
-	i915_request_set_error_once(rq, -EIO);
-	i915_request_mark_complete(rq);
-}
-
 static void mock_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(rq, &engine->active.requests, sched.link)
-		mark_eio(rq);
+		i915_request_mark_eio(rq);
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Cancel and submit all pending requests. */
 	list_for_each_entry(rq, &mock->hw_queue, mock.link) {
-		mark_eio(rq);
+		i915_request_mark_eio(rq);
 		__i915_request_submit(rq);
 	}
 	INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 1f4020e906a8aecf64503f97da76176cc8a6be07..db738d4001685161b01a054f198277b238f1bd9d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq)
 	/* Opencode i915_request_add() so we can keep the timeline locked. */
 	__i915_request_commit(rq);
 	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-	__i915_request_queue(rq, NULL);
+	__i915_request_queue_bh(rq);
 
 	timeout = i915_request_wait(rq, 0, HZ / 10);
 	if (timeout < 0)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 729c3c7b11e278e7b56f2bf68a734cd86852bff1..439c8984f5fae7b89c333dd9ec420caacf70e6e2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -6,6 +6,7 @@
 
 #include <linux/sort.h>
 
+#include "intel_gpu_commands.h"
 #include "intel_gt_pm.h"
 #include "intel_rps.h"
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b88aa35ad75b6bbdaef192b565b125753b125939..223ab88f7e5741f82c9b0e18b574defef384bc76 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -197,6 +197,7 @@ static int cmp_u32(const void *_a, const void *_b)
 
 static int __live_heartbeat_fast(struct intel_engine_cs *engine)
 {
+	const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
 	struct intel_context *ce;
 	struct i915_request *rq;
 	ktime_t t0, t1;
@@ -254,12 +255,18 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
 		times[0],
 		times[ARRAY_SIZE(times) - 1]);
 
-	/* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
-	if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+	/*
+	 * Ideally, the upper bound on min work delay would be something like
+	 * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
+	 * are, even with system_wq_highpri, at the mercy of the CPU scheduler
+	 * and may be stuck behind some slow work for many millisecond. Such
+	 * as our very own display workers.
+	 */
+	if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
 		pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
 		       engine->name,
 		       times[ARRAY_SIZE(times) / 2],
-		       jiffies_to_usecs(6));
+		       error_threshold);
 		err = -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index b08fc5390e8afcbfc17387d1eedf21b4b2ea0f32..c3d965279fc3a7ed8bfe2ad3bb6d390004a32c6e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -4,13 +4,215 @@
  * Copyright © 2018 Intel Corporation
  */
 
+#include <linux/sort.h>
+
 #include "i915_selftest.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
 #include "selftest_engine.h"
 #include "selftest_engine_heartbeat.h"
 #include "selftests/igt_atomic.h"
 #include "selftests/igt_flush_test.h"
 #include "selftests/igt_spinner.h"
 
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+	const u64 *a = A, *b = B;
+
+	return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+	sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+	return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		op;
+	*cs++ = value;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = offset;
+	*cs++ = 0;
+	*cs++ = value;
+
+	return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+	*cs++ = i915_mmio_reg_offset(reg);
+	*cs++ = offset;
+	*cs++ = 0;
+
+	return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+	WRITE_ONCE(*x, value);
+	wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+				u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+	struct intel_engine_cs *engine = ce->engine;
+	u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+	u32 offset = i915_ggtt_offset(engine->status_page.vma);
+	struct i915_request *rq;
+	u32 *cs;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	cs = intel_ring_begin(rq, 28);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
+
+	/* Signal & wait for start */
+	cs = emit_store(cs, offset + 4008, 1);
+	cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+	cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+	cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+	/* Busy wait */
+	cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+	cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+	cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+	intel_ring_advance(rq, cs);
+	i915_request_get(rq);
+	i915_request_add(rq);
+	intel_engine_flush_submission(engine);
+
+	/* Wait for the request to start executing, that then waits for us */
+	while (READ_ONCE(sema[2]) == 0)
+		cpu_relax();
+
+	/* Run the request for a 100us, sampling timestamps before/after */
+	preempt_disable();
+	*dt = local_clock();
+	write_semaphore(&sema[2], 0);
+	udelay(100);
+	*dt = local_clock() - *dt;
+	write_semaphore(&sema[2], 1);
+	preempt_enable();
+
+	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+		i915_request_put(rq);
+		return -ETIME;
+	}
+	i915_request_put(rq);
+
+	pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+		 engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+	*d_ctx = sema[3] - sema[1];
+	*d_ring = sema[4] - sema[0];
+	return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+	u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+	struct intel_context *ce;
+	int i, err = 0;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	for (i = 0; i < COUNT; i++) {
+		err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+		if (err)
+			break;
+	}
+	intel_context_put(ce);
+	if (err)
+		return err;
+
+	dt = trifilter(st);
+	d_ring = trifilter(s_ring);
+	d_ctx = trifilter(s_ctx);
+
+	pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
+		engine->name, dt,
+		intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+		intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+	d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+	if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+		pr_err("%s Mismatch between ring timestamp and walltime!\n",
+		       engine->name);
+		return -EINVAL;
+	}
+
+	d_ring = trifilter(s_ring);
+	d_ctx = trifilter(s_ctx);
+
+	d_ctx *= engine->gt->clock_frequency;
+	if (IS_ICELAKE(engine->i915))
+		d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+	else
+		d_ring *= engine->gt->clock_frequency;
+
+	if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+		pr_err("%s Mismatch between ring and context timestamps!\n",
+		       engine->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+	 * the same CS clock.
+	 */
+
+	if (INTEL_GEN(gt->i915) < 8)
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		int err;
+
+		st_engine_heartbeat_disable(engine);
+		err = __live_engine_timestamps(engine);
+		st_engine_heartbeat_enable(engine);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int live_engine_busy_stats(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -177,6 +379,7 @@ static int live_engine_pm(void *arg)
 int live_engine_pm_selftests(struct intel_gt *gt)
 {
 	static const struct i915_subtest tests[] = {
+		SUBTEST(live_engine_timestamps),
 		SUBTEST(live_engine_busy_stats),
 		SUBTEST(live_engine_pm),
 	};
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
new file mode 100644
index 0000000000000000000000000000000000000000..264b5ebdb021f0e7ac3d45024ced166e4290e827
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -0,0 +1,4741 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+static bool is_active(struct i915_request *rq)
+{
+	if (i915_request_is_active(rq))
+		return true;
+
+	if (i915_request_on_hold(rq))
+		return true;
+
+	if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+		return true;
+
+	return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+			   struct i915_request *rq,
+			   unsigned long timeout)
+{
+	/* Ignore our own attempts to suppress excess tasklets */
+	tasklet_hi_schedule(&engine->execlists.tasklet);
+
+	timeout += jiffies;
+	do {
+		bool done = time_after(jiffies, timeout);
+
+		if (i915_request_completed(rq)) /* that was quick! */
+			return 0;
+
+		/* Wait until the HW has acknowleged the submission (or err) */
+		intel_engine_flush_submission(engine);
+		if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+			return 0;
+
+		if (done)
+			return -ETIME;
+
+		cond_resched();
+	} while (1);
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+			  struct i915_request *rq,
+			  unsigned long timeout)
+{
+	timeout += jiffies;
+
+	do {
+		cond_resched();
+		intel_engine_flush_submission(engine);
+
+		if (READ_ONCE(engine->execlists.pending[0]))
+			continue;
+
+		if (i915_request_completed(rq))
+			break;
+
+		if (READ_ONCE(rq->fence.error))
+			break;
+	} while (time_before(jiffies, timeout));
+
+	flush_scheduled_work();
+
+	if (rq->fence.error != -EIO) {
+		pr_err("%s: hanging request %llx:%lld not reset\n",
+		       engine->name,
+		       rq->fence.context,
+		       rq->fence.seqno);
+		return -EINVAL;
+	}
+
+	/* Give the request a jiffie to complete after flushing the worker */
+	if (i915_request_wait(rq, 0,
+			      max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+		pr_err("%s: hanging request %llx:%lld did not complete\n",
+		       engine->name,
+		       rq->fence.context,
+		       rq->fence.seqno);
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int live_sanitycheck(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct igt_spinner spin;
+	int err = 0;
+
+	if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
+		return 0;
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_ctx;
+		}
+
+		i915_request_add(rq);
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			GEM_TRACE("spinner failed to start\n");
+			GEM_TRACE_DUMP();
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+			goto out_ctx;
+		}
+
+		igt_spinner_end(&spin);
+		if (igt_flush_test(gt->i915)) {
+			err = -EIO;
+			goto out_ctx;
+		}
+
+out_ctx:
+		intel_context_put(ce);
+		if (err)
+			break;
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct igt_spinner spin;
+	int err = -ENOMEM;
+
+	/*
+	 * Check that we can correctly context switch between 2 instances
+	 * on the same engine from the same parent context.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return err;
+
+	err = 0;
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce[2] = {};
+		struct i915_request *rq[2];
+		struct igt_live_test t;
+		int n;
+
+		if (prio && !intel_engine_has_preemption(engine))
+			continue;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			break;
+		}
+		st_engine_heartbeat_disable(engine);
+
+		for (n = 0; n < ARRAY_SIZE(ce); n++) {
+			struct intel_context *tmp;
+
+			tmp = intel_context_create(engine);
+			if (IS_ERR(tmp)) {
+				err = PTR_ERR(tmp);
+				goto err_ce;
+			}
+
+			err = intel_context_pin(tmp);
+			if (err) {
+				intel_context_put(tmp);
+				goto err_ce;
+			}
+
+			/*
+			 * Setup the pair of contexts such that if we
+			 * lite-restore using the RING_TAIL from ce[1] it
+			 * will execute garbage from ce[0]->ring.
+			 */
+			memset(tmp->ring->vaddr,
+			       POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+			       tmp->ring->vma->size);
+
+			ce[n] = tmp;
+		}
+		GEM_BUG_ON(!ce[1]->ring->size);
+		intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+		lrc_update_regs(ce[1], engine, ce[1]->ring->head);
+
+		rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+		if (IS_ERR(rq[0])) {
+			err = PTR_ERR(rq[0]);
+			goto err_ce;
+		}
+
+		i915_request_get(rq[0]);
+		i915_request_add(rq[0]);
+		GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+		if (!igt_wait_for_spinner(&spin, rq[0])) {
+			i915_request_put(rq[0]);
+			goto err_ce;
+		}
+
+		rq[1] = i915_request_create(ce[1]);
+		if (IS_ERR(rq[1])) {
+			err = PTR_ERR(rq[1]);
+			i915_request_put(rq[0]);
+			goto err_ce;
+		}
+
+		if (!prio) {
+			/*
+			 * Ensure we do the switch to ce[1] on completion.
+			 *
+			 * rq[0] is already submitted, so this should reduce
+			 * to a no-op (a wait on a request on the same engine
+			 * uses the submit fence, not the completion fence),
+			 * but it will install a dependency on rq[1] for rq[0]
+			 * that will prevent the pair being reordered by
+			 * timeslicing.
+			 */
+			i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+		}
+
+		i915_request_get(rq[1]);
+		i915_request_add(rq[1]);
+		GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+		i915_request_put(rq[0]);
+
+		if (prio) {
+			struct i915_sched_attr attr = {
+				.priority = prio,
+			};
+
+			/* Alternatively preempt the spinner with ce[1] */
+			engine->schedule(rq[1], &attr);
+		}
+
+		/* And switch back to ce[0] for good measure */
+		rq[0] = i915_request_create(ce[0]);
+		if (IS_ERR(rq[0])) {
+			err = PTR_ERR(rq[0]);
+			i915_request_put(rq[1]);
+			goto err_ce;
+		}
+
+		i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+		i915_request_get(rq[0]);
+		i915_request_add(rq[0]);
+		GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+		i915_request_put(rq[1]);
+		i915_request_put(rq[0]);
+
+err_ce:
+		intel_engine_flush_submission(engine);
+		igt_spinner_end(&spin);
+		for (n = 0; n < ARRAY_SIZE(ce); n++) {
+			if (IS_ERR_OR_NULL(ce[n]))
+				break;
+
+			intel_context_unpin(ce[n]);
+			intel_context_put(ce[n]);
+		}
+
+		st_engine_heartbeat_enable(engine);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			break;
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+	return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+	return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
+static int live_unlite_ring(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct igt_spinner spin;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * Setup a preemption event that will cause almost the entire ring
+	 * to be unwound, potentially fooling our intel_ring_direction()
+	 * into emitting a forward lite-restore instead of the rollback.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce[2] = {};
+		struct i915_request *rq;
+		struct igt_live_test t;
+		int n;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			break;
+		}
+		st_engine_heartbeat_disable(engine);
+
+		for (n = 0; n < ARRAY_SIZE(ce); n++) {
+			struct intel_context *tmp;
+
+			tmp = intel_context_create(engine);
+			if (IS_ERR(tmp)) {
+				err = PTR_ERR(tmp);
+				goto err_ce;
+			}
+
+			err = intel_context_pin(tmp);
+			if (err) {
+				intel_context_put(tmp);
+				goto err_ce;
+			}
+
+			memset32(tmp->ring->vaddr,
+				 0xdeadbeef, /* trigger a hang if executed */
+				 tmp->ring->vma->size / sizeof(u32));
+
+			ce[n] = tmp;
+		}
+
+		/* Create max prio spinner, followed by N low prio nops */
+		rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ce;
+		}
+
+		i915_request_get(rq);
+		rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+		i915_request_add(rq);
+
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			intel_gt_set_wedged(gt);
+			i915_request_put(rq);
+			err = -ETIME;
+			goto err_ce;
+		}
+
+		/* Fill the ring, until we will cause a wrap */
+		n = 0;
+		while (intel_ring_direction(ce[0]->ring,
+					    rq->wa_tail,
+					    ce[0]->ring->tail) <= 0) {
+			struct i915_request *tmp;
+
+			tmp = intel_context_create_request(ce[0]);
+			if (IS_ERR(tmp)) {
+				err = PTR_ERR(tmp);
+				i915_request_put(rq);
+				goto err_ce;
+			}
+
+			i915_request_add(tmp);
+			intel_engine_flush_submission(engine);
+			n++;
+		}
+		intel_engine_flush_submission(engine);
+		pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+			 engine->name, n,
+			 ce[0]->ring->size,
+			 ce[0]->ring->tail,
+			 ce[0]->ring->emit,
+			 rq->tail);
+		GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+						rq->tail,
+						ce[0]->ring->tail) <= 0);
+		i915_request_put(rq);
+
+		/* Create a second ring to preempt the first ring after rq[0] */
+		rq = intel_context_create_request(ce[1]);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ce;
+		}
+
+		rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		err = wait_for_submit(engine, rq, HZ / 2);
+		i915_request_put(rq);
+		if (err) {
+			pr_err("%s: preemption request was not submitted\n",
+			       engine->name);
+			err = -ETIME;
+		}
+
+		pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+			 engine->name,
+			 ce[0]->ring->tail, ce[0]->ring->emit,
+			 ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+		intel_engine_flush_submission(engine);
+		igt_spinner_end(&spin);
+		for (n = 0; n < ARRAY_SIZE(ce); n++) {
+			if (IS_ERR_OR_NULL(ce[n]))
+				break;
+
+			intel_context_unpin(ce[n]);
+			intel_context_put(ce[n]);
+		}
+		st_engine_heartbeat_enable(engine);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			break;
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_pin_rewind(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * We have to be careful not to trust intel_ring too much, for example
+	 * ring->head is updated upon retire which is out of sync with pinning
+	 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+	 * or else we risk writing an older, stale value.
+	 *
+	 * To simulate this, let's apply a bit of deliberate sabotague.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+		struct intel_ring *ring;
+		struct igt_live_test t;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			break;
+		}
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		err = intel_context_pin(ce);
+		if (err) {
+			intel_context_put(ce);
+			break;
+		}
+
+		/* Keep the context awake while we play games */
+		err = i915_active_acquire(&ce->active);
+		if (err) {
+			intel_context_unpin(ce);
+			intel_context_put(ce);
+			break;
+		}
+		ring = ce->ring;
+
+		/* Poison the ring, and offset the next request from HEAD */
+		memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+		ring->emit = ring->size / 2;
+		ring->tail = ring->emit;
+		GEM_BUG_ON(ring->head);
+
+		intel_context_unpin(ce);
+
+		/* Submit a simple nop request */
+		GEM_BUG_ON(intel_context_is_pinned(ce));
+		rq = intel_context_create_request(ce);
+		i915_active_release(&ce->active); /* e.g. async retire */
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
+		GEM_BUG_ON(!rq->head);
+		i915_request_add(rq);
+
+		/* Expect not to hang! */
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			break;
+		}
+	}
+
+	return err;
+}
+
+static int live_hold_reset(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct igt_spinner spin;
+	int err = 0;
+
+	/*
+	 * In order to support offline error capture for fast preempt reset,
+	 * we need to decouple the guilty request and ensure that it and its
+	 * descendents are not executed while the capture is in progress.
+	 */
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		st_engine_heartbeat_disable(engine);
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+		i915_request_add(rq);
+
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			intel_gt_set_wedged(gt);
+			err = -ETIME;
+			goto out;
+		}
+
+		/* We have our request executing, now remove it and reset */
+
+		local_bh_disable();
+		if (test_and_set_bit(I915_RESET_ENGINE + id,
+				     &gt->reset.flags)) {
+			local_bh_enable();
+			intel_gt_set_wedged(gt);
+			err = -EBUSY;
+			goto out;
+		}
+		tasklet_disable(&engine->execlists.tasklet);
+
+		engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+		i915_request_get(rq);
+		execlists_hold(engine, rq);
+		GEM_BUG_ON(!i915_request_on_hold(rq));
+
+		__intel_engine_reset_bh(engine, NULL);
+		GEM_BUG_ON(rq->fence.error != -EIO);
+
+		tasklet_enable(&engine->execlists.tasklet);
+		clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+				      &gt->reset.flags);
+		local_bh_enable();
+
+		/* Check that we do not resubmit the held request */
+		if (!i915_request_wait(rq, 0, HZ / 5)) {
+			pr_err("%s: on hold request completed!\n",
+			       engine->name);
+			i915_request_put(rq);
+			err = -EIO;
+			goto out;
+		}
+		GEM_BUG_ON(!i915_request_on_hold(rq));
+
+		/* But is resubmitted on release */
+		execlists_unhold(engine, rq);
+		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+			pr_err("%s: held request did not complete!\n",
+			       engine->name);
+			intel_gt_set_wedged(gt);
+			err = -ETIME;
+		}
+		i915_request_put(rq);
+
+out:
+		st_engine_heartbeat_enable(engine);
+		intel_context_put(ce);
+		if (err)
+			break;
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static const char *error_repr(int err)
+{
+	return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+	static const struct error_phase {
+		enum { GOOD = 0, BAD = -EIO } error[2];
+	} phases[] = {
+		{ { BAD,  GOOD } },
+		{ { BAD,  BAD  } },
+		{ { BAD,  GOOD } },
+		{ { GOOD, GOOD } }, /* sentinel */
+	};
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+	 * of invalid commands in user batches that will cause a GPU hang.
+	 * This is a faster mechanism than using hangcheck/heartbeats, but
+	 * only detects problems the HW knows about -- it will not warn when
+	 * we kill the HW!
+	 *
+	 * To verify our detection and reset, we throw some invalid commands
+	 * at the HW and wait for the interrupt.
+	 */
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		const struct error_phase *p;
+		int err = 0;
+
+		st_engine_heartbeat_disable(engine);
+
+		for (p = phases; p->error[0] != GOOD; p++) {
+			struct i915_request *client[ARRAY_SIZE(phases->error)];
+			u32 *cs;
+			int i;
+
+			memset(client, 0, sizeof(*client));
+			for (i = 0; i < ARRAY_SIZE(client); i++) {
+				struct intel_context *ce;
+				struct i915_request *rq;
+
+				ce = intel_context_create(engine);
+				if (IS_ERR(ce)) {
+					err = PTR_ERR(ce);
+					goto out;
+				}
+
+				rq = intel_context_create_request(ce);
+				intel_context_put(ce);
+				if (IS_ERR(rq)) {
+					err = PTR_ERR(rq);
+					goto out;
+				}
+
+				if (rq->engine->emit_init_breadcrumb) {
+					err = rq->engine->emit_init_breadcrumb(rq);
+					if (err) {
+						i915_request_add(rq);
+						goto out;
+					}
+				}
+
+				cs = intel_ring_begin(rq, 2);
+				if (IS_ERR(cs)) {
+					i915_request_add(rq);
+					err = PTR_ERR(cs);
+					goto out;
+				}
+
+				if (p->error[i]) {
+					*cs++ = 0xdeadbeef;
+					*cs++ = 0xdeadbeef;
+				} else {
+					*cs++ = MI_NOOP;
+					*cs++ = MI_NOOP;
+				}
+
+				client[i] = i915_request_get(rq);
+				i915_request_add(rq);
+			}
+
+			err = wait_for_submit(engine, client[0], HZ / 2);
+			if (err) {
+				pr_err("%s: first request did not start within time!\n",
+				       engine->name);
+				err = -ETIME;
+				goto out;
+			}
+
+			for (i = 0; i < ARRAY_SIZE(client); i++) {
+				if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+					pr_debug("%s: %s request incomplete!\n",
+						 engine->name,
+						 error_repr(p->error[i]));
+
+				if (!i915_request_started(client[i])) {
+					pr_err("%s: %s request not started!\n",
+					       engine->name,
+					       error_repr(p->error[i]));
+					err = -ETIME;
+					goto out;
+				}
+
+				/* Kick the tasklet to process the error */
+				intel_engine_flush_submission(engine);
+				if (client[i]->fence.error != p->error[i]) {
+					pr_err("%s: %s request (%s) with wrong error code: %d\n",
+					       engine->name,
+					       error_repr(p->error[i]),
+					       i915_request_completed(client[i]) ? "completed" : "running",
+					       client[i]->fence.error);
+					err = -EINVAL;
+					goto out;
+				}
+			}
+
+out:
+			for (i = 0; i < ARRAY_SIZE(client); i++)
+				if (client[i])
+					i915_request_put(client[i]);
+			if (err) {
+				pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+				       engine->name, p - phases,
+				       p->error[0], p->error[1]);
+				break;
+			}
+		}
+
+		st_engine_heartbeat_enable(engine);
+		if (err) {
+			intel_gt_set_wedged(gt);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 10);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_NEQ_SDD;
+	*cs++ = 0;
+	*cs++ = i915_ggtt_offset(vma) + 4 * idx;
+	*cs++ = 0;
+
+	if (idx > 0) {
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+		*cs++ = 0;
+		*cs++ = 1;
+	} else {
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+	intel_ring_advance(rq, cs);
+	return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	int err;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		goto out_ce;
+
+	err = 0;
+	if (rq->engine->emit_init_breadcrumb)
+		err = rq->engine->emit_init_breadcrumb(rq);
+	if (err == 0)
+		err = emit_semaphore_chain(rq, vma, idx);
+	if (err == 0)
+		i915_request_get(rq);
+	i915_request_add(rq);
+	if (err)
+		rq = ERR_PTR(err);
+
+out_ce:
+	intel_context_put(ce);
+	return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+	      struct i915_vma *vma,
+	      int idx, int prio)
+{
+	struct i915_sched_attr attr = {
+		.priority = prio,
+	};
+	struct i915_request *rq;
+	u32 *cs;
+
+	rq = intel_engine_create_kernel_request(engine);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+	*cs++ = 0;
+	*cs++ = 1;
+
+	intel_ring_advance(rq, cs);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+
+	local_bh_disable();
+	engine->schedule(rq, &attr);
+	local_bh_enable(); /* kick tasklet */
+
+	i915_request_put(rq);
+
+	return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+		      struct i915_vma *vma,
+		      int count)
+{
+	struct intel_engine_cs *engine;
+	struct i915_request *head;
+	enum intel_engine_id id;
+	int err, i, n = 0;
+
+	head = semaphore_queue(outer, vma, n++);
+	if (IS_ERR(head))
+		return PTR_ERR(head);
+
+	for_each_engine(engine, outer->gt, id) {
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		for (i = 0; i < count; i++) {
+			struct i915_request *rq;
+
+			rq = semaphore_queue(engine, vma, n++);
+			if (IS_ERR(rq)) {
+				err = PTR_ERR(rq);
+				goto out;
+			}
+
+			i915_request_put(rq);
+		}
+	}
+
+	err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
+	if (err)
+		goto out;
+
+	if (i915_request_wait(head, 0,
+			      2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
+		pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+		       outer->name, count, n);
+		GEM_TRACE_DUMP();
+		intel_gt_set_wedged(outer->gt);
+		err = -EIO;
+	}
+
+out:
+	i915_request_put(head);
+	return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct drm_i915_gem_object *obj;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct i915_vma *vma;
+	void *vaddr;
+	int err = 0;
+
+	/*
+	 * If a request takes too long, we would like to give other users
+	 * a fair go on the GPU. In particular, users may create batches
+	 * that wait upon external input, where that input may even be
+	 * supplied by another GPU job. To avoid blocking forever, we
+	 * need to preempt the current task and replace it with another
+	 * ready task.
+	 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+		return 0;
+
+	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+	if (err)
+		goto err_map;
+
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_pin;
+
+	for_each_engine(engine, gt, id) {
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		memset(vaddr, 0, PAGE_SIZE);
+
+		st_engine_heartbeat_disable(engine);
+		err = slice_semaphore_queue(engine, vma, 5);
+		st_engine_heartbeat_enable(engine);
+		if (err)
+			goto err_pin;
+
+		if (igt_flush_test(gt->i915)) {
+			err = -EIO;
+			goto err_pin;
+		}
+	}
+
+err_pin:
+	i915_vma_unpin(vma);
+err_map:
+	i915_gem_object_unpin_map(obj);
+err_obj:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+		struct i915_request *wait,
+		void *slot, int idx)
+{
+	const u32 offset =
+		i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(slot);
+	struct i915_request *rq;
+	u32 *cs;
+	int err;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return rq;
+
+	if (wait) {
+		err = i915_request_await_dma_fence(rq, &wait->fence);
+		if (err)
+			goto err;
+	}
+
+	cs = intel_ring_begin(rq, 14);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_NOOP;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_GTE_SDD;
+	*cs++ = idx;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = offset + idx * sizeof(u32);
+	*cs++ = 0;
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = offset;
+	*cs++ = 0;
+	*cs++ = idx + 1;
+
+	intel_ring_advance(rq, cs);
+
+	rq->sched.attr.priority = I915_PRIORITY_MASK;
+	err = 0;
+err:
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (err) {
+		i915_request_put(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * The usual presumption on timeslice expiration is that we replace
+	 * the active context with another. However, given a chain of
+	 * dependencies we may end up with replacing the context with itself,
+	 * but only a few of those requests, forcing us to rewind the
+	 * RING_TAIL of the original request.
+	 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		enum { A1, A2, B1 };
+		enum { X = 1, Z, Y };
+		struct i915_request *rq[3] = {};
+		struct intel_context *ce;
+		unsigned long timeslice;
+		int i, err = 0;
+		u32 *slot;
+
+		if (!intel_engine_has_timeslices(engine))
+			continue;
+
+		/*
+		 * A:rq1 -- semaphore wait, timestamp X
+		 * A:rq2 -- write timestamp Y
+		 *
+		 * B:rq1 [await A:rq1] -- write timestamp Z
+		 *
+		 * Force timeslice, release semaphore.
+		 *
+		 * Expect execution/evaluation order XZY
+		 */
+
+		st_engine_heartbeat_disable(engine);
+		timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+		slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto err;
+		}
+
+		rq[A1] = create_rewinder(ce, NULL, slot, X);
+		if (IS_ERR(rq[A1])) {
+			intel_context_put(ce);
+			goto err;
+		}
+
+		rq[A2] = create_rewinder(ce, NULL, slot, Y);
+		intel_context_put(ce);
+		if (IS_ERR(rq[A2]))
+			goto err;
+
+		err = wait_for_submit(engine, rq[A2], HZ / 2);
+		if (err) {
+			pr_err("%s: failed to submit first context\n",
+			       engine->name);
+			goto err;
+		}
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto err;
+		}
+
+		rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
+		intel_context_put(ce);
+		if (IS_ERR(rq[2]))
+			goto err;
+
+		err = wait_for_submit(engine, rq[B1], HZ / 2);
+		if (err) {
+			pr_err("%s: failed to submit second context\n",
+			       engine->name);
+			goto err;
+		}
+
+		/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+		ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
+		while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
+			/* Wait for the timeslice to kick in */
+			del_timer(&engine->execlists.timer);
+			tasklet_hi_schedule(&engine->execlists.tasklet);
+			intel_engine_flush_submission(engine);
+		}
+		/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+		GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+		GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+		GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+		/* Release the hounds! */
+		slot[0] = 1;
+		wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+		for (i = 1; i <= 3; i++) {
+			unsigned long timeout = jiffies + HZ / 2;
+
+			while (!READ_ONCE(slot[i]) &&
+			       time_before(jiffies, timeout))
+				;
+
+			if (!time_before(jiffies, timeout)) {
+				pr_err("%s: rq[%d] timed out\n",
+				       engine->name, i - 1);
+				err = -ETIME;
+				goto err;
+			}
+
+			pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+		}
+
+		/* XZY: XZ < XY */
+		if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+			pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+			       engine->name,
+			       slot[Z] - slot[X],
+			       slot[Y] - slot[X]);
+			err = -EINVAL;
+		}
+
+err:
+		memset32(&slot[0], -1, 4);
+		wmb();
+
+		engine->props.timeslice_duration_ms = timeslice;
+		st_engine_heartbeat_enable(engine);
+		for (i = 0; i < 3; i++)
+			i915_request_put(rq[i]);
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	rq = intel_engine_create_kernel_request(engine);
+	if (IS_ERR(rq))
+		return rq;
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+
+	return rq;
+}
+
+static long slice_timeout(struct intel_engine_cs *engine)
+{
+	long timeout;
+
+	/* Enough time for a timeslice to kick in, and kick out */
+	timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+	/* Enough time for the nop request to complete */
+	timeout += HZ / 5;
+
+	return timeout + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct drm_i915_gem_object *obj;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct i915_vma *vma;
+	void *vaddr;
+	int err = 0;
+
+	/*
+	 * Make sure that even if ELSP[0] and ELSP[1] are filled with
+	 * timeslicing between them disabled, we *do* enable timeslicing
+	 * if the queue demands it. (Normally, we do not submit if
+	 * ELSP[1] is already occupied, so must rely on timeslicing to
+	 * eject ELSP[0] in favour of the queue.)
+	 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+		return 0;
+
+	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+	if (err)
+		goto err_map;
+
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_pin;
+
+	for_each_engine(engine, gt, id) {
+		struct i915_sched_attr attr = {
+			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+		};
+		struct i915_request *rq, *nop;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		st_engine_heartbeat_disable(engine);
+		memset(vaddr, 0, PAGE_SIZE);
+
+		/* ELSP[0]: semaphore wait */
+		rq = semaphore_queue(engine, vma, 0);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_heartbeat;
+		}
+		engine->schedule(rq, &attr);
+		err = wait_for_submit(engine, rq, HZ / 2);
+		if (err) {
+			pr_err("%s: Timed out trying to submit semaphores\n",
+			       engine->name);
+			goto err_rq;
+		}
+
+		/* ELSP[1]: nop request */
+		nop = nop_request(engine);
+		if (IS_ERR(nop)) {
+			err = PTR_ERR(nop);
+			goto err_rq;
+		}
+		err = wait_for_submit(engine, nop, HZ / 2);
+		i915_request_put(nop);
+		if (err) {
+			pr_err("%s: Timed out trying to submit nop\n",
+			       engine->name);
+			goto err_rq;
+		}
+
+		GEM_BUG_ON(i915_request_completed(rq));
+		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+		/* Queue: semaphore signal, matching priority as semaphore */
+		err = release_queue(engine, vma, 1, effective_prio(rq));
+		if (err)
+			goto err_rq;
+
+		/* Wait until we ack the release_queue and start timeslicing */
+		do {
+			cond_resched();
+			intel_engine_flush_submission(engine);
+		} while (READ_ONCE(engine->execlists.pending[0]));
+
+		/* Timeslice every jiffy, so within 2 we should signal */
+		if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
+			struct drm_printer p =
+				drm_info_printer(gt->i915->drm.dev);
+
+			pr_err("%s: Failed to timeslice into queue\n",
+			       engine->name);
+			intel_engine_dump(engine, &p,
+					  "%s\n", engine->name);
+
+			memset(vaddr, 0xff, PAGE_SIZE);
+			err = -EIO;
+		}
+err_rq:
+		i915_request_put(rq);
+err_heartbeat:
+		st_engine_heartbeat_enable(engine);
+		if (err)
+			break;
+	}
+
+err_pin:
+	i915_vma_unpin(vma);
+err_map:
+	i915_gem_object_unpin_map(obj);
+err_obj:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static int live_timeslice_nopreempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct igt_spinner spin;
+	int err = 0;
+
+	/*
+	 * We should not timeslice into a request that is marked with
+	 * I915_REQUEST_NOPREEMPT.
+	 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+		return 0;
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+		unsigned long timeslice;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		st_engine_heartbeat_disable(engine);
+		timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+		/* Create an unpreemptible spinner */
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_heartbeat;
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			i915_request_put(rq);
+			err = -ETIME;
+			goto out_spin;
+		}
+
+		set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+		i915_request_put(rq);
+
+		/* Followed by a maximum priority barrier (heartbeat) */
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out_spin;
+		}
+
+		rq = intel_context_create_request(ce);
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_spin;
+		}
+
+		rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		/*
+		 * Wait until the barrier is in ELSP, and we know timeslicing
+		 * will have been activated.
+		 */
+		if (wait_for_submit(engine, rq, HZ / 2)) {
+			i915_request_put(rq);
+			err = -ETIME;
+			goto out_spin;
+		}
+
+		/*
+		 * Since the ELSP[0] request is unpreemptible, it should not
+		 * allow the maximum priority barrier through. Wait long
+		 * enough to see if it is timesliced in by mistake.
+		 */
+		if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
+			pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+			       engine->name);
+			err = -EINVAL;
+		}
+		i915_request_put(rq);
+
+out_spin:
+		igt_spinner_end(&spin);
+out_heartbeat:
+		xchg(&engine->props.timeslice_duration_ms, timeslice);
+		st_engine_heartbeat_enable(engine);
+		if (err)
+			break;
+
+		if (igt_flush_test(gt->i915)) {
+			err = -EIO;
+			break;
+		}
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_busywait_preempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct intel_engine_cs *engine;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+	u32 *map;
+
+	/*
+	 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+	 * preempt the busywaits used to synchronise between rings.
+	 */
+
+	ctx_hi = kernel_context(gt->i915);
+	if (!ctx_hi)
+		return -ENOMEM;
+	ctx_hi->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+	ctx_lo = kernel_context(gt->i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+	ctx_lo->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_ctx_lo;
+	}
+
+	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(map)) {
+		err = PTR_ERR(map);
+		goto err_obj;
+	}
+
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_map;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+	if (err)
+		goto err_map;
+
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_vma;
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *lo, *hi;
+		struct igt_live_test t;
+		u32 *cs;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_vma;
+		}
+
+		/*
+		 * We create two requests. The low priority request
+		 * busywaits on a semaphore (inside the ringbuffer where
+		 * is should be preemptible) and the high priority requests
+		 * uses a MI_STORE_DWORD_IMM to update the semaphore value
+		 * allowing the first request to complete. If preemption
+		 * fails, we hang instead.
+		 */
+
+		lo = igt_request_alloc(ctx_lo, engine);
+		if (IS_ERR(lo)) {
+			err = PTR_ERR(lo);
+			goto err_vma;
+		}
+
+		cs = intel_ring_begin(lo, 8);
+		if (IS_ERR(cs)) {
+			err = PTR_ERR(cs);
+			i915_request_add(lo);
+			goto err_vma;
+		}
+
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = i915_ggtt_offset(vma);
+		*cs++ = 0;
+		*cs++ = 1;
+
+		/* XXX Do we need a flush + invalidate here? */
+
+		*cs++ = MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_GLOBAL_GTT |
+			MI_SEMAPHORE_POLL |
+			MI_SEMAPHORE_SAD_EQ_SDD;
+		*cs++ = 0;
+		*cs++ = i915_ggtt_offset(vma);
+		*cs++ = 0;
+
+		intel_ring_advance(lo, cs);
+
+		i915_request_get(lo);
+		i915_request_add(lo);
+
+		if (wait_for(READ_ONCE(*map), 10)) {
+			i915_request_put(lo);
+			err = -ETIMEDOUT;
+			goto err_vma;
+		}
+
+		/* Low priority request should be busywaiting now */
+		if (i915_request_wait(lo, 0, 1) != -ETIME) {
+			i915_request_put(lo);
+			pr_err("%s: Busywaiting request did not!\n",
+			       engine->name);
+			err = -EIO;
+			goto err_vma;
+		}
+
+		hi = igt_request_alloc(ctx_hi, engine);
+		if (IS_ERR(hi)) {
+			err = PTR_ERR(hi);
+			i915_request_put(lo);
+			goto err_vma;
+		}
+
+		cs = intel_ring_begin(hi, 4);
+		if (IS_ERR(cs)) {
+			err = PTR_ERR(cs);
+			i915_request_add(hi);
+			i915_request_put(lo);
+			goto err_vma;
+		}
+
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = i915_ggtt_offset(vma);
+		*cs++ = 0;
+		*cs++ = 0;
+
+		intel_ring_advance(hi, cs);
+		i915_request_add(hi);
+
+		if (i915_request_wait(lo, 0, HZ / 5) < 0) {
+			struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+			pr_err("%s: Failed to preempt semaphore busywait!\n",
+			       engine->name);
+
+			intel_engine_dump(engine, &p, "%s\n", engine->name);
+			GEM_TRACE_DUMP();
+
+			i915_request_put(lo);
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+			goto err_vma;
+		}
+		GEM_BUG_ON(READ_ONCE(*map));
+		i915_request_put(lo);
+
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			goto err_vma;
+		}
+	}
+
+	err = 0;
+err_vma:
+	i915_vma_unpin(vma);
+err_map:
+	i915_gem_object_unpin_map(obj);
+err_obj:
+	i915_gem_object_put(obj);
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+	return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+		       struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine,
+		       u32 arb)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+
+	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
+
+	rq = igt_spinner_create_request(spin, ce, arb);
+	intel_context_put(ce);
+	return rq;
+}
+
+static int live_preempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct igt_spinner spin_hi, spin_lo;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	if (igt_spinner_init(&spin_hi, gt))
+		return -ENOMEM;
+
+	if (igt_spinner_init(&spin_lo, gt))
+		goto err_spin_hi;
+
+	ctx_hi = kernel_context(gt->i915);
+	if (!ctx_hi)
+		goto err_spin_lo;
+	ctx_hi->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+	ctx_lo = kernel_context(gt->i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+	ctx_lo->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+	for_each_engine(engine, gt, id) {
+		struct igt_live_test t;
+		struct i915_request *rq;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
+			GEM_TRACE("lo spinner failed to start\n");
+			GEM_TRACE_DUMP();
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			igt_spinner_end(&spin_lo);
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
+			GEM_TRACE("hi spinner failed to start\n");
+			GEM_TRACE_DUMP();
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
+
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+	}
+
+	err = 0;
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+err_spin_lo:
+	igt_spinner_fini(&spin_lo);
+err_spin_hi:
+	igt_spinner_fini(&spin_hi);
+	return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct igt_spinner spin_hi, spin_lo;
+	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = {};
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	if (igt_spinner_init(&spin_hi, gt))
+		return -ENOMEM;
+
+	if (igt_spinner_init(&spin_lo, gt))
+		goto err_spin_hi;
+
+	ctx_hi = kernel_context(gt->i915);
+	if (!ctx_hi)
+		goto err_spin_lo;
+
+	ctx_lo = kernel_context(gt->i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+
+	/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+	ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
+	for_each_engine(engine, gt, id) {
+		struct igt_live_test t;
+		struct i915_request *rq;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
+			pr_err("First context failed to start\n");
+			goto err_wedged;
+		}
+
+		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+					    MI_NOOP);
+		if (IS_ERR(rq)) {
+			igt_spinner_end(&spin_lo);
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (igt_wait_for_spinner(&spin_hi, rq)) {
+			pr_err("Second context overtook first?\n");
+			goto err_wedged;
+		}
+
+		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+		engine->schedule(rq, &attr);
+
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
+			pr_err("High priority context failed to preempt the low priority context\n");
+			GEM_TRACE_DUMP();
+			goto err_wedged;
+		}
+
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
+
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+	}
+
+	err = 0;
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+err_spin_lo:
+	igt_spinner_fini(&spin_lo);
+err_spin_hi:
+	igt_spinner_fini(&spin_hi);
+	return err;
+
+err_wedged:
+	igt_spinner_end(&spin_hi);
+	igt_spinner_end(&spin_lo);
+	intel_gt_set_wedged(gt);
+	err = -EIO;
+	goto err_ctx_lo;
+}
+
+struct preempt_client {
+	struct igt_spinner spin;
+	struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
+{
+	c->ctx = kernel_context(gt->i915);
+	if (!c->ctx)
+		return -ENOMEM;
+
+	if (igt_spinner_init(&c->spin, gt))
+		goto err_ctx;
+
+	return 0;
+
+err_ctx:
+	kernel_context_close(c->ctx);
+	return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+	igt_spinner_fini(&c->spin);
+	kernel_context_close(c->ctx);
+}
+
+static int live_nopreempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct preempt_client a, b;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * Verify that we can disable preemption for an individual request
+	 * that may be being observed and not want to be interrupted.
+	 */
+
+	if (preempt_client_init(gt, &a))
+		return -ENOMEM;
+	if (preempt_client_init(gt, &b))
+		goto err_client_a;
+	b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *rq_a, *rq_b;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		engine->execlists.preempt_hang.count = 0;
+
+		rq_a = spinner_create_request(&a.spin,
+					      a.ctx, engine,
+					      MI_ARB_CHECK);
+		if (IS_ERR(rq_a)) {
+			err = PTR_ERR(rq_a);
+			goto err_client_b;
+		}
+
+		/* Low priority client, but unpreemptable! */
+		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+		i915_request_add(rq_a);
+		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+			pr_err("First client failed to start\n");
+			goto err_wedged;
+		}
+
+		rq_b = spinner_create_request(&b.spin,
+					      b.ctx, engine,
+					      MI_ARB_CHECK);
+		if (IS_ERR(rq_b)) {
+			err = PTR_ERR(rq_b);
+			goto err_client_b;
+		}
+
+		i915_request_add(rq_b);
+
+		/* B is much more important than A! (But A is unpreemptable.) */
+		GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+		/* Wait long enough for preemption and timeslicing */
+		if (igt_wait_for_spinner(&b.spin, rq_b)) {
+			pr_err("Second client started too early!\n");
+			goto err_wedged;
+		}
+
+		igt_spinner_end(&a.spin);
+
+		if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+			pr_err("Second client failed to start\n");
+			goto err_wedged;
+		}
+
+		igt_spinner_end(&b.spin);
+
+		if (engine->execlists.preempt_hang.count) {
+			pr_err("Preemption recorded x%d; should have been suppressed!\n",
+			       engine->execlists.preempt_hang.count);
+			err = -EINVAL;
+			goto err_wedged;
+		}
+
+		if (igt_flush_test(gt->i915))
+			goto err_wedged;
+	}
+
+	err = 0;
+err_client_b:
+	preempt_client_fini(&b);
+err_client_a:
+	preempt_client_fini(&a);
+	return err;
+
+err_wedged:
+	igt_spinner_end(&b.spin);
+	igt_spinner_end(&a.spin);
+	intel_gt_set_wedged(gt);
+	err = -EIO;
+	goto err_client_b;
+}
+
+struct live_preempt_cancel {
+	struct intel_engine_cs *engine;
+	struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq;
+	struct igt_live_test t;
+	int err;
+
+	/* Preempt cancel of ELSP0 */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	rq = spinner_create_request(&arg->a.spin,
+				    arg->a.ctx, arg->engine,
+				    MI_ARB_CHECK);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+		err = -EIO;
+		goto out;
+	}
+
+	intel_context_set_banned(rq->context);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	err = wait_for_reset(arg->engine, rq, HZ / 2);
+	if (err) {
+		pr_err("Cancelled inflight0 request did not reset\n");
+		goto out;
+	}
+
+out:
+	i915_request_put(rq);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq[2] = {};
+	struct igt_live_test t;
+	int err;
+
+	/* Preempt cancel of ELSP1 */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	rq[0] = spinner_create_request(&arg->a.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_NOOP); /* no preemption */
+	if (IS_ERR(rq[0]))
+		return PTR_ERR(rq[0]);
+
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+	i915_request_get(rq[0]);
+	i915_request_add(rq[0]);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+		err = -EIO;
+		goto out;
+	}
+
+	rq[1] = spinner_create_request(&arg->b.spin,
+				       arg->b.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[1])) {
+		err = PTR_ERR(rq[1]);
+		goto out;
+	}
+
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+	i915_request_get(rq[1]);
+	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+	i915_request_add(rq[1]);
+	if (err)
+		goto out;
+
+	intel_context_set_banned(rq[1]->context);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	igt_spinner_end(&arg->a.spin);
+	err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+	if (err)
+		goto out;
+
+	if (rq[0]->fence.error != 0) {
+		pr_err("Normal inflight0 request did not complete\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[1]->fence.error != -EIO) {
+		pr_err("Cancelled inflight1 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq[1]);
+	i915_request_put(rq[0]);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq[3] = {};
+	struct igt_live_test t;
+	int err;
+
+	/* Full ELSP and one in the wings */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	rq[0] = spinner_create_request(&arg->a.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[0]))
+		return PTR_ERR(rq[0]);
+
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+	i915_request_get(rq[0]);
+	i915_request_add(rq[0]);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+		err = -EIO;
+		goto out;
+	}
+
+	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+	if (IS_ERR(rq[1])) {
+		err = PTR_ERR(rq[1]);
+		goto out;
+	}
+
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+	i915_request_get(rq[1]);
+	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+	i915_request_add(rq[1]);
+	if (err)
+		goto out;
+
+	rq[2] = spinner_create_request(&arg->b.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[2])) {
+		err = PTR_ERR(rq[2]);
+		goto out;
+	}
+
+	i915_request_get(rq[2]);
+	err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+	i915_request_add(rq[2]);
+	if (err)
+		goto out;
+
+	intel_context_set_banned(rq[2]->context);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+	if (err)
+		goto out;
+
+	if (rq[0]->fence.error != -EIO) {
+		pr_err("Cancelled inflight0 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[1]->fence.error != 0) {
+		pr_err("Normal inflight1 request did not complete\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[2]->fence.error != -EIO) {
+		pr_err("Cancelled queued request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq[2]);
+	i915_request_put(rq[1]);
+	i915_request_put(rq[0]);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq;
+	int err;
+
+	/* Preempt cancel non-preemptible spinner in ELSP0 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+		return 0;
+
+	if (!intel_has_reset_engine(arg->engine->gt))
+		return 0;
+
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+	rq = spinner_create_request(&arg->a.spin,
+				    arg->a.ctx, arg->engine,
+				    MI_NOOP); /* preemption disabled */
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+		err = -EIO;
+		goto out;
+	}
+
+	intel_context_set_banned(rq->context);
+	err = intel_engine_pulse(arg->engine); /* force reset */
+	if (err)
+		goto out;
+
+	err = wait_for_reset(arg->engine, rq, HZ / 2);
+	if (err) {
+		pr_err("Cancelled inflight0 request did not reset\n");
+		goto out;
+	}
+
+out:
+	i915_request_put(rq);
+	if (igt_flush_test(arg->engine->i915))
+		err = -EIO;
+	return err;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+	engine->reset_timeout.probability = 999;
+	atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+	memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int __cancel_fail(struct live_preempt_cancel *arg)
+{
+	struct intel_engine_cs *engine = arg->engine;
+	struct i915_request *rq;
+	int err;
+
+	if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+		return 0;
+
+	if (!intel_has_reset_engine(engine->gt))
+		return 0;
+
+	GEM_TRACE("%s(%s)\n", __func__, engine->name);
+	rq = spinner_create_request(&arg->a.spin,
+				    arg->a.ctx, engine,
+				    MI_NOOP); /* preemption disabled */
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+		err = -EIO;
+		goto out;
+	}
+
+	intel_context_set_banned(rq->context);
+
+	err = intel_engine_pulse(engine);
+	if (err)
+		goto out;
+
+	force_reset_timeout(engine);
+
+	/* force preempt reset [failure] */
+	while (!engine->execlists.pending[0])
+		intel_engine_flush_submission(engine);
+	del_timer_sync(&engine->execlists.preempt);
+	intel_engine_flush_submission(engine);
+
+	cancel_reset_timeout(engine);
+
+	/* after failure, require heartbeats to reset device */
+	intel_engine_set_heartbeat(engine, 1);
+	err = wait_for_reset(engine, rq, HZ / 2);
+	intel_engine_set_heartbeat(engine,
+				   engine->defaults.heartbeat_interval_ms);
+	if (err) {
+		pr_err("Cancelled inflight0 request did not reset\n");
+		goto out;
+	}
+
+out:
+	i915_request_put(rq);
+	if (igt_flush_test(engine->i915))
+		err = -EIO;
+	return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct live_preempt_cancel data;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * To cancel an inflight context, we need to first remove it from the
+	 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+	 */
+
+	if (preempt_client_init(gt, &data.a))
+		return -ENOMEM;
+	if (preempt_client_init(gt, &data.b))
+		goto err_client_a;
+
+	for_each_engine(data.engine, gt, id) {
+		if (!intel_engine_has_preemption(data.engine))
+			continue;
+
+		err = __cancel_active0(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_active1(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_queued(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_hostile(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_fail(&data);
+		if (err)
+			goto err_wedged;
+	}
+
+	err = 0;
+err_client_b:
+	preempt_client_fini(&data.b);
+err_client_a:
+	preempt_client_fini(&data.a);
+	return err;
+
+err_wedged:
+	GEM_TRACE_DUMP();
+	igt_spinner_end(&data.b.spin);
+	igt_spinner_end(&data.a.spin);
+	intel_gt_set_wedged(gt);
+	goto err_client_b;
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = {
+		.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+	};
+	struct preempt_client a, b;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * Verify that if a preemption request does not cause a change in
+	 * the current execution order, the preempt-to-idle injection is
+	 * skipped and that we do not accidentally apply it after the CS
+	 * completion event.
+	 */
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0; /* presume black blox */
+
+	if (intel_vgpu_active(gt->i915))
+		return 0; /* GVT forces single port & request submission */
+
+	if (preempt_client_init(gt, &a))
+		return -ENOMEM;
+	if (preempt_client_init(gt, &b))
+		goto err_client_a;
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *rq_a, *rq_b;
+		int depth;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (igt_flush_test(gt->i915))
+			goto err_wedged;
+
+		st_engine_heartbeat_disable(engine);
+		engine->execlists.preempt_hang.count = 0;
+
+		rq_a = spinner_create_request(&a.spin,
+					      a.ctx, engine,
+					      MI_NOOP);
+		if (IS_ERR(rq_a)) {
+			err = PTR_ERR(rq_a);
+			st_engine_heartbeat_enable(engine);
+			goto err_client_b;
+		}
+
+		i915_request_add(rq_a);
+		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+			pr_err("First client failed to start\n");
+			st_engine_heartbeat_enable(engine);
+			goto err_wedged;
+		}
+
+		/* Keep postponing the timer to avoid premature slicing */
+		mod_timer(&engine->execlists.timer, jiffies + HZ);
+		for (depth = 0; depth < 8; depth++) {
+			rq_b = spinner_create_request(&b.spin,
+						      b.ctx, engine,
+						      MI_NOOP);
+			if (IS_ERR(rq_b)) {
+				err = PTR_ERR(rq_b);
+				st_engine_heartbeat_enable(engine);
+				goto err_client_b;
+			}
+			i915_request_add(rq_b);
+
+			GEM_BUG_ON(i915_request_completed(rq_a));
+			engine->schedule(rq_a, &attr);
+			igt_spinner_end(&a.spin);
+
+			if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+				pr_err("Second client failed to start\n");
+				st_engine_heartbeat_enable(engine);
+				goto err_wedged;
+			}
+
+			swap(a, b);
+			rq_a = rq_b;
+		}
+		igt_spinner_end(&a.spin);
+
+		if (engine->execlists.preempt_hang.count) {
+			pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+			       engine->name,
+			       engine->execlists.preempt_hang.count,
+			       depth);
+			st_engine_heartbeat_enable(engine);
+			err = -EINVAL;
+			goto err_client_b;
+		}
+
+		st_engine_heartbeat_enable(engine);
+		if (igt_flush_test(gt->i915))
+			goto err_wedged;
+	}
+
+	err = 0;
+err_client_b:
+	preempt_client_fini(&b);
+err_client_a:
+	preempt_client_fini(&a);
+	return err;
+
+err_wedged:
+	igt_spinner_end(&b.spin);
+	igt_spinner_end(&a.spin);
+	intel_gt_set_wedged(gt);
+	err = -EIO;
+	goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct preempt_client hi, lo;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * Build a chain AB...BA between two contexts (A, B) and request
+	 * preemption of the last request. It should then complete before
+	 * the previously submitted spinner in B.
+	 */
+
+	if (preempt_client_init(gt, &hi))
+		return -ENOMEM;
+
+	if (preempt_client_init(gt, &lo))
+		goto err_client_hi;
+
+	for_each_engine(engine, gt, id) {
+		struct i915_sched_attr attr = {
+			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+		};
+		struct igt_live_test t;
+		struct i915_request *rq;
+		int ring_size, count, i;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		rq = spinner_create_request(&lo.spin,
+					    lo.ctx, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq))
+			goto err_wedged;
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		ring_size = rq->wa_tail - rq->head;
+		if (ring_size < 0)
+			ring_size += rq->ring->size;
+		ring_size = rq->ring->size / ring_size;
+		pr_debug("%s(%s): Using maximum of %d requests\n",
+			 __func__, engine->name, ring_size);
+
+		igt_spinner_end(&lo.spin);
+		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+			pr_err("Timed out waiting to flush %s\n", engine->name);
+			i915_request_put(rq);
+			goto err_wedged;
+		}
+		i915_request_put(rq);
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_wedged;
+		}
+
+		for_each_prime_number_from(count, 1, ring_size) {
+			rq = spinner_create_request(&hi.spin,
+						    hi.ctx, engine,
+						    MI_ARB_CHECK);
+			if (IS_ERR(rq))
+				goto err_wedged;
+			i915_request_add(rq);
+			if (!igt_wait_for_spinner(&hi.spin, rq))
+				goto err_wedged;
+
+			rq = spinner_create_request(&lo.spin,
+						    lo.ctx, engine,
+						    MI_ARB_CHECK);
+			if (IS_ERR(rq))
+				goto err_wedged;
+			i915_request_add(rq);
+
+			for (i = 0; i < count; i++) {
+				rq = igt_request_alloc(lo.ctx, engine);
+				if (IS_ERR(rq))
+					goto err_wedged;
+				i915_request_add(rq);
+			}
+
+			rq = igt_request_alloc(hi.ctx, engine);
+			if (IS_ERR(rq))
+				goto err_wedged;
+
+			i915_request_get(rq);
+			i915_request_add(rq);
+			engine->schedule(rq, &attr);
+
+			igt_spinner_end(&hi.spin);
+			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+				struct drm_printer p =
+					drm_info_printer(gt->i915->drm.dev);
+
+				pr_err("Failed to preempt over chain of %d\n",
+				       count);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+				i915_request_put(rq);
+				goto err_wedged;
+			}
+			igt_spinner_end(&lo.spin);
+			i915_request_put(rq);
+
+			rq = igt_request_alloc(lo.ctx, engine);
+			if (IS_ERR(rq))
+				goto err_wedged;
+
+			i915_request_get(rq);
+			i915_request_add(rq);
+
+			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+				struct drm_printer p =
+					drm_info_printer(gt->i915->drm.dev);
+
+				pr_err("Failed to flush low priority chain of %d requests\n",
+				       count);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+
+				i915_request_put(rq);
+				goto err_wedged;
+			}
+			i915_request_put(rq);
+		}
+
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			goto err_wedged;
+		}
+	}
+
+	err = 0;
+err_client_lo:
+	preempt_client_fini(&lo);
+err_client_hi:
+	preempt_client_fini(&hi);
+	return err;
+
+err_wedged:
+	igt_spinner_end(&hi.spin);
+	igt_spinner_end(&lo.spin);
+	intel_gt_set_wedged(gt);
+	err = -EIO;
+	goto err_client_lo;
+}
+
+static int create_gang(struct intel_engine_cs *engine,
+		       struct i915_request **prev)
+{
+	struct drm_i915_gem_object *obj;
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	u32 *cs;
+	int err;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_ce;
+	}
+
+	vma = i915_vma_instance(obj, ce->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_obj;
+
+	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_obj;
+	}
+
+	/* Semaphore target: spin until zero */
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_EQ_SDD;
+	*cs++ = 0;
+	*cs++ = lower_32_bits(vma->node.start);
+	*cs++ = upper_32_bits(vma->node.start);
+
+	if (*prev) {
+		u64 offset = (*prev)->batch->node.start;
+
+		/* Terminate the spinner in the next lower priority batch. */
+		*cs++ = MI_STORE_DWORD_IMM_GEN4;
+		*cs++ = lower_32_bits(offset);
+		*cs++ = upper_32_bits(offset);
+		*cs++ = 0;
+	}
+
+	*cs++ = MI_BATCH_BUFFER_END;
+	i915_gem_object_flush_map(obj);
+	i915_gem_object_unpin_map(obj);
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_obj;
+	}
+
+	rq->batch = i915_vma_get(vma);
+	i915_request_get(rq);
+
+	i915_vma_lock(vma);
+	err = i915_request_await_object(rq, vma->obj, false);
+	if (!err)
+		err = i915_vma_move_to_active(vma, rq, 0);
+	if (!err)
+		err = rq->engine->emit_bb_start(rq,
+						vma->node.start,
+						PAGE_SIZE, 0);
+	i915_vma_unlock(vma);
+	i915_request_add(rq);
+	if (err)
+		goto err_rq;
+
+	i915_gem_object_put(obj);
+	intel_context_put(ce);
+
+	rq->mock.link.next = &(*prev)->mock.link;
+	*prev = rq;
+	return 0;
+
+err_rq:
+	i915_vma_put(rq->batch);
+	i915_request_put(rq);
+err_obj:
+	i915_gem_object_put(obj);
+err_ce:
+	intel_context_put(ce);
+	return err;
+}
+
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+			       struct igt_spinner *spin,
+			       int queue_sz, int ring_sz)
+{
+	struct intel_context *ce[2] = {};
+	struct i915_request *rq;
+	struct igt_live_test t;
+	int err = 0;
+	int n;
+
+	if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+		return -EIO;
+
+	for (n = 0; n < ARRAY_SIZE(ce); n++) {
+		struct intel_context *tmp;
+
+		tmp = intel_context_create(engine);
+		if (IS_ERR(tmp)) {
+			err = PTR_ERR(tmp);
+			goto err_ce;
+		}
+
+		tmp->ring = __intel_context_ring_size(ring_sz);
+
+		err = intel_context_pin(tmp);
+		if (err) {
+			intel_context_put(tmp);
+			goto err_ce;
+		}
+
+		memset32(tmp->ring->vaddr,
+			 0xdeadbeef, /* trigger a hang if executed */
+			 tmp->ring->vma->size / sizeof(u32));
+
+		ce[n] = tmp;
+	}
+
+	rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_ce;
+	}
+
+	i915_request_get(rq);
+	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+	i915_request_add(rq);
+
+	if (!igt_wait_for_spinner(spin, rq)) {
+		intel_gt_set_wedged(engine->gt);
+		i915_request_put(rq);
+		err = -ETIME;
+		goto err_ce;
+	}
+
+	/* Fill the ring, until we will cause a wrap */
+	n = 0;
+	while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+		struct i915_request *tmp;
+
+		tmp = intel_context_create_request(ce[0]);
+		if (IS_ERR(tmp)) {
+			err = PTR_ERR(tmp);
+			i915_request_put(rq);
+			goto err_ce;
+		}
+
+		i915_request_add(tmp);
+		intel_engine_flush_submission(engine);
+		n++;
+	}
+	intel_engine_flush_submission(engine);
+	pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+		 engine->name, queue_sz, n,
+		 ce[0]->ring->size,
+		 ce[0]->ring->tail,
+		 ce[0]->ring->emit,
+		 rq->tail);
+	i915_request_put(rq);
+
+	/* Create a second request to preempt the first ring */
+	rq = intel_context_create_request(ce[1]);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_ce;
+	}
+
+	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+	i915_request_get(rq);
+	i915_request_add(rq);
+
+	err = wait_for_submit(engine, rq, HZ / 2);
+	i915_request_put(rq);
+	if (err) {
+		pr_err("%s: preemption request was not submited\n",
+		       engine->name);
+		err = -ETIME;
+	}
+
+	pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+		 engine->name,
+		 ce[0]->ring->tail, ce[0]->ring->emit,
+		 ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+	intel_engine_flush_submission(engine);
+	igt_spinner_end(spin);
+	for (n = 0; n < ARRAY_SIZE(ce); n++) {
+		if (IS_ERR_OR_NULL(ce[n]))
+			break;
+
+		intel_context_unpin(ce[n]);
+		intel_context_put(ce[n]);
+	}
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct igt_spinner spin;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * Check that we rollback large chunks of a ring in order to do a
+	 * preemption event. Similar to live_unlite_ring, but looking at
+	 * ring size rather than the impact of intel_ring_direction().
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for_each_engine(engine, gt, id) {
+		int n;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		st_engine_heartbeat_disable(engine);
+
+		for (n = 0; n <= 3; n++) {
+			err = __live_preempt_ring(engine, &spin,
+						  n * SZ_4K / 4, SZ_4K);
+			if (err)
+				break;
+		}
+
+		st_engine_heartbeat_enable(engine);
+		if (err)
+			break;
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * Build as long a chain of preempters as we can, with each
+	 * request higher priority than the last. Once we are ready, we release
+	 * the last batch which then precolates down the chain, each releasing
+	 * the next oldest in turn. The intent is to simply push as hard as we
+	 * can with the number of preemptions, trying to exceed narrow HW
+	 * limits. At a minimum, we insist that we can sort all the user
+	 * high priority levels into execution order.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *rq = NULL;
+		struct igt_live_test t;
+		IGT_TIMEOUT(end_time);
+		int prio = 0;
+		int err = 0;
+		u32 *cs;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+			return -EIO;
+
+		do {
+			struct i915_sched_attr attr = {
+				.priority = I915_USER_PRIORITY(prio++),
+			};
+
+			err = create_gang(engine, &rq);
+			if (err)
+				break;
+
+			/* Submit each spinner at increasing priority */
+			engine->schedule(rq, &attr);
+		} while (prio <= I915_PRIORITY_MAX &&
+			 !__igt_timeout(end_time, NULL));
+		pr_debug("%s: Preempt chain of %d requests\n",
+			 engine->name, prio);
+
+		/*
+		 * Such that the last spinner is the highest priority and
+		 * should execute first. When that spinner completes,
+		 * it will terminate the next lowest spinner until there
+		 * are no more spinners and the gang is complete.
+		 */
+		cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+		if (!IS_ERR(cs)) {
+			*cs = 0;
+			i915_gem_object_unpin_map(rq->batch->obj);
+		} else {
+			err = PTR_ERR(cs);
+			intel_gt_set_wedged(gt);
+		}
+
+		while (rq) { /* wait for each rq from highest to lowest prio */
+			struct i915_request *n = list_next_entry(rq, mock.link);
+
+			if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+				struct drm_printer p =
+					drm_info_printer(engine->i915->drm.dev);
+
+				pr_err("Failed to flush chain of %d requests, at %d\n",
+				       prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+
+				err = -ETIME;
+			}
+
+			i915_vma_put(rq->batch);
+			i915_request_put(rq);
+			rq = n;
+		}
+
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+		struct i915_vma *result,
+		unsigned int offset)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	u32 *cs;
+	int err;
+	int i;
+
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	vma = i915_vma_instance(obj, result->vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err) {
+		i915_vma_put(vma);
+		return ERR_PTR(err);
+	}
+
+	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(cs)) {
+		i915_vma_put(vma);
+		return ERR_CAST(cs);
+	}
+
+	/* All GPR are clear for new contexts. We use GPR(0) as a constant */
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = CS_GPR(engine, 0);
+	*cs++ = 1;
+
+	for (i = 1; i < NUM_GPR; i++) {
+		u64 addr;
+
+		/*
+		 * Perform: GPR[i]++
+		 *
+		 * As we read and write into the context saved GPR[i], if
+		 * we restart this batch buffer from an earlier point, we
+		 * will repeat the increment and store a value > 1.
+		 */
+		*cs++ = MI_MATH(4);
+		*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+		*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+		*cs++ = MI_MATH_ADD;
+		*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+		addr = result->node.start + offset + i * sizeof(*cs);
+		*cs++ = MI_STORE_REGISTER_MEM_GEN8;
+		*cs++ = CS_GPR(engine, 2 * i);
+		*cs++ = lower_32_bits(addr);
+		*cs++ = upper_32_bits(addr);
+
+		*cs++ = MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_POLL |
+			MI_SEMAPHORE_SAD_GTE_SDD;
+		*cs++ = i;
+		*cs++ = lower_32_bits(result->node.start);
+		*cs++ = upper_32_bits(result->node.start);
+	}
+
+	*cs++ = MI_BATCH_BUFFER_END;
+	i915_gem_object_flush_map(obj);
+	i915_gem_object_unpin_map(obj);
+
+	return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
+
+	obj = i915_gem_object_create_internal(gt->i915, sz);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_ggtt_pin(vma, NULL, 0, 0);
+	if (err) {
+		i915_vma_put(vma);
+		return ERR_PTR(err);
+	}
+
+	return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+		  struct i915_vma *global,
+		  unsigned int offset)
+{
+	struct i915_vma *batch, *vma;
+	struct intel_context *ce;
+	struct i915_request *rq;
+	int err;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
+
+	vma = i915_vma_instance(global->obj, ce->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_ce;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto out_ce;
+
+	batch = create_gpr_user(engine, vma, offset);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		goto out_vma;
+	}
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_batch;
+	}
+
+	i915_vma_lock(vma);
+	err = i915_request_await_object(rq, vma->obj, false);
+	if (!err)
+		err = i915_vma_move_to_active(vma, rq, 0);
+	i915_vma_unlock(vma);
+
+	i915_vma_lock(batch);
+	if (!err)
+		err = i915_request_await_object(rq, batch->obj, false);
+	if (!err)
+		err = i915_vma_move_to_active(batch, rq, 0);
+	if (!err)
+		err = rq->engine->emit_bb_start(rq,
+						batch->node.start,
+						PAGE_SIZE, 0);
+	i915_vma_unlock(batch);
+	i915_vma_unpin(batch);
+
+	if (!err)
+		i915_request_get(rq);
+	i915_request_add(rq);
+
+out_batch:
+	i915_vma_put(batch);
+out_vma:
+	i915_vma_unpin(vma);
+out_ce:
+	intel_context_put(ce);
+	return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+			struct i915_vma *global,
+			int id)
+{
+	struct i915_sched_attr attr = {
+		.priority = I915_PRIORITY_MAX
+	};
+	struct i915_request *rq;
+	int err = 0;
+	u32 *cs;
+
+	rq = intel_engine_create_kernel_request(engine);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = i915_ggtt_offset(global);
+	*cs++ = 0;
+	*cs++ = id;
+
+	intel_ring_advance(rq, cs);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+
+	engine->schedule(rq, &attr);
+
+	if (i915_request_wait(rq, 0, HZ / 2) < 0)
+		err = -ETIME;
+	i915_request_put(rq);
+
+	return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct i915_vma *global;
+	enum intel_engine_id id;
+	u32 *result;
+	int err = 0;
+
+	/*
+	 * In our other tests, we look at preemption in carefully
+	 * controlled conditions in the ringbuffer. Since most of the
+	 * time is spent in user batches, most of our preemptions naturally
+	 * occur there. We want to verify that when we preempt inside a batch
+	 * we continue on from the current instruction and do not roll back
+	 * to the start, or another earlier arbitration point.
+	 *
+	 * To verify this, we create a batch which is a mixture of
+	 * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+	 * a few preempting contexts thrown into the mix, we look for any
+	 * repeated instructions (which show up as incorrect values).
+	 */
+
+	global = create_global(gt, 4096);
+	if (IS_ERR(global))
+		return PTR_ERR(global);
+
+	result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+	if (IS_ERR(result)) {
+		i915_vma_unpin_and_release(&global, 0);
+		return PTR_ERR(result);
+	}
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *client[3] = {};
+		struct igt_live_test t;
+		int i;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+			continue; /* we need per-context GPR */
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			break;
+		}
+
+		memset(result, 0, 4096);
+
+		for (i = 0; i < ARRAY_SIZE(client); i++) {
+			struct i915_request *rq;
+
+			rq = create_gpr_client(engine, global,
+					       NUM_GPR * i * sizeof(u32));
+			if (IS_ERR(rq)) {
+				err = PTR_ERR(rq);
+				goto end_test;
+			}
+
+			client[i] = rq;
+		}
+
+		/* Continuously preempt the set of 3 running contexts */
+		for (i = 1; i <= NUM_GPR; i++) {
+			err = preempt_user(engine, global, i);
+			if (err)
+				goto end_test;
+		}
+
+		if (READ_ONCE(result[0]) != NUM_GPR) {
+			pr_err("%s: Failed to release semaphore\n",
+			       engine->name);
+			err = -EIO;
+			goto end_test;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(client); i++) {
+			int gpr;
+
+			if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+				err = -ETIME;
+				goto end_test;
+			}
+
+			for (gpr = 1; gpr < NUM_GPR; gpr++) {
+				if (result[NUM_GPR * i + gpr] != 1) {
+					pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+					       engine->name,
+					       i, gpr, result[NUM_GPR * i + gpr]);
+					err = -EINVAL;
+					goto end_test;
+				}
+			}
+		}
+
+end_test:
+		for (i = 0; i < ARRAY_SIZE(client); i++) {
+			if (!client[i])
+				break;
+
+			i915_request_put(client[i]);
+		}
+
+		/* Flush the semaphores on error */
+		smp_store_mb(result[0], -1);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			break;
+	}
+
+	i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+	return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct igt_spinner spin_lo;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * Check that we force preemption to occur by cancelling the previous
+	 * context if it refuses to yield the GPU.
+	 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+		return 0;
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	if (igt_spinner_init(&spin_lo, gt))
+		return -ENOMEM;
+
+	ctx_hi = kernel_context(gt->i915);
+	if (!ctx_hi)
+		goto err_spin_lo;
+	ctx_hi->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+	ctx_lo = kernel_context(gt->i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+	ctx_lo->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+	for_each_engine(engine, gt, id) {
+		unsigned long saved_timeout;
+		struct i915_request *rq;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+					    MI_NOOP); /* preemption disabled */
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		rq = igt_request_alloc(ctx_hi, engine);
+		if (IS_ERR(rq)) {
+			igt_spinner_end(&spin_lo);
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		/* Flush the previous CS ack before changing timeouts */
+		while (READ_ONCE(engine->execlists.pending[0]))
+			cpu_relax();
+
+		saved_timeout = engine->props.preempt_timeout_ms;
+		engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		intel_engine_flush_submission(engine);
+		engine->props.preempt_timeout_ms = saved_timeout;
+
+		if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+			intel_gt_set_wedged(gt);
+			i915_request_put(rq);
+			err = -ETIME;
+			goto err_ctx_lo;
+		}
+
+		igt_spinner_end(&spin_lo);
+		i915_request_put(rq);
+	}
+
+	err = 0;
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+err_spin_lo:
+	igt_spinner_fini(&spin_lo);
+	return err;
+}
+
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+	return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+	return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+	struct intel_gt *gt;
+	struct i915_gem_context **contexts;
+	struct intel_engine_cs *engine;
+	struct drm_i915_gem_object *batch;
+	unsigned int ncontext;
+	struct rnd_state prng;
+	unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+	return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+							  &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+			struct i915_gem_context *ctx, int prio,
+			struct drm_i915_gem_object *batch)
+{
+	struct i915_request *rq;
+	struct i915_vma *vma = NULL;
+	int err = 0;
+
+	if (batch) {
+		struct i915_address_space *vm;
+
+		vm = i915_gem_context_get_vm_rcu(ctx);
+		vma = i915_vma_instance(batch, vm, NULL);
+		i915_vm_put(vm);
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+
+		err = i915_vma_pin(vma, 0, 0, PIN_USER);
+		if (err)
+			return err;
+	}
+
+	ctx->sched.priority = prio;
+
+	rq = igt_request_alloc(ctx, smoke->engine);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto unpin;
+	}
+
+	if (vma) {
+		i915_vma_lock(vma);
+		err = i915_request_await_object(rq, vma->obj, false);
+		if (!err)
+			err = i915_vma_move_to_active(vma, rq, 0);
+		if (!err)
+			err = rq->engine->emit_bb_start(rq,
+							vma->node.start,
+							PAGE_SIZE, 0);
+		i915_vma_unlock(vma);
+	}
+
+	i915_request_add(rq);
+
+unpin:
+	if (vma)
+		i915_vma_unpin(vma);
+
+	return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+	struct preempt_smoke *smoke = arg;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_gem_context *ctx = smoke_context(smoke);
+		int err;
+
+		err = smoke_submit(smoke,
+				   ctx, count % I915_PRIORITY_MAX,
+				   smoke->batch);
+		if (err)
+			return err;
+
+		count++;
+	} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+	smoke->count = count;
+	return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+	struct task_struct *tsk[I915_NUM_ENGINES] = {};
+	struct preempt_smoke arg[I915_NUM_ENGINES];
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned long count;
+	int err = 0;
+
+	for_each_engine(engine, smoke->gt, id) {
+		arg[id] = *smoke;
+		arg[id].engine = engine;
+		if (!(flags & BATCH))
+			arg[id].batch = NULL;
+		arg[id].count = 0;
+
+		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+				      "igt/smoke:%d", id);
+		if (IS_ERR(tsk[id])) {
+			err = PTR_ERR(tsk[id]);
+			break;
+		}
+		get_task_struct(tsk[id]);
+	}
+
+	yield(); /* start all threads before we kthread_stop() */
+
+	count = 0;
+	for_each_engine(engine, smoke->gt, id) {
+		int status;
+
+		if (IS_ERR_OR_NULL(tsk[id]))
+			continue;
+
+		status = kthread_stop(tsk[id]);
+		if (status && !err)
+			err = status;
+
+		count += arg[id].count;
+
+		put_task_struct(tsk[id]);
+	}
+
+	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+		count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+	return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+	enum intel_engine_id id;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		for_each_engine(smoke->engine, smoke->gt, id) {
+			struct i915_gem_context *ctx = smoke_context(smoke);
+			int err;
+
+			err = smoke_submit(smoke,
+					   ctx, random_priority(&smoke->prng),
+					   flags & BATCH ? smoke->batch : NULL);
+			if (err)
+				return err;
+
+			count++;
+		}
+	} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+		count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+	return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+	struct preempt_smoke smoke = {
+		.gt = arg,
+		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+		.ncontext = 256,
+	};
+	const unsigned int phase[] = { 0, BATCH };
+	struct igt_live_test t;
+	int err = -ENOMEM;
+	u32 *cs;
+	int n;
+
+	smoke.contexts = kmalloc_array(smoke.ncontext,
+				       sizeof(*smoke.contexts),
+				       GFP_KERNEL);
+	if (!smoke.contexts)
+		return -ENOMEM;
+
+	smoke.batch =
+		i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
+	if (IS_ERR(smoke.batch)) {
+		err = PTR_ERR(smoke.batch);
+		goto err_free;
+	}
+
+	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_batch;
+	}
+	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+		cs[n] = MI_ARB_CHECK;
+	cs[n] = MI_BATCH_BUFFER_END;
+	i915_gem_object_flush_map(smoke.batch);
+	i915_gem_object_unpin_map(smoke.batch);
+
+	if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
+		err = -EIO;
+		goto err_batch;
+	}
+
+	for (n = 0; n < smoke.ncontext; n++) {
+		smoke.contexts[n] = kernel_context(smoke.gt->i915);
+		if (!smoke.contexts[n])
+			goto err_ctx;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(phase); n++) {
+		err = smoke_crescendo(&smoke, phase[n]);
+		if (err)
+			goto err_ctx;
+
+		err = smoke_random(&smoke, phase[n]);
+		if (err)
+			goto err_ctx;
+	}
+
+err_ctx:
+	if (igt_live_test_end(&t))
+		err = -EIO;
+
+	for (n = 0; n < smoke.ncontext; n++) {
+		if (!smoke.contexts[n])
+			break;
+		kernel_context_close(smoke.contexts[n]);
+	}
+
+err_batch:
+	i915_gem_object_put(smoke.batch);
+err_free:
+	kfree(smoke.contexts);
+
+	return err;
+}
+
+static int nop_virtual_engine(struct intel_gt *gt,
+			      struct intel_engine_cs **siblings,
+			      unsigned int nsibling,
+			      unsigned int nctx,
+			      unsigned int flags)
+#define CHAIN BIT(0)
+{
+	IGT_TIMEOUT(end_time);
+	struct i915_request *request[16] = {};
+	struct intel_context *ve[16];
+	unsigned long n, prime, nc;
+	struct igt_live_test t;
+	ktime_t times[2] = {};
+	int err;
+
+	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
+
+	for (n = 0; n < nctx; n++) {
+		ve[n] = intel_execlists_create_virtual(siblings, nsibling);
+		if (IS_ERR(ve[n])) {
+			err = PTR_ERR(ve[n]);
+			nctx = n;
+			goto out;
+		}
+
+		err = intel_context_pin(ve[n]);
+		if (err) {
+			intel_context_put(ve[n]);
+			nctx = n;
+			goto out;
+		}
+	}
+
+	err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
+	if (err)
+		goto out;
+
+	for_each_prime_number_from(prime, 1, 8192) {
+		times[1] = ktime_get_raw();
+
+		if (flags & CHAIN) {
+			for (nc = 0; nc < nctx; nc++) {
+				for (n = 0; n < prime; n++) {
+					struct i915_request *rq;
+
+					rq = i915_request_create(ve[nc]);
+					if (IS_ERR(rq)) {
+						err = PTR_ERR(rq);
+						goto out;
+					}
+
+					if (request[nc])
+						i915_request_put(request[nc]);
+					request[nc] = i915_request_get(rq);
+					i915_request_add(rq);
+				}
+			}
+		} else {
+			for (n = 0; n < prime; n++) {
+				for (nc = 0; nc < nctx; nc++) {
+					struct i915_request *rq;
+
+					rq = i915_request_create(ve[nc]);
+					if (IS_ERR(rq)) {
+						err = PTR_ERR(rq);
+						goto out;
+					}
+
+					if (request[nc])
+						i915_request_put(request[nc]);
+					request[nc] = i915_request_get(rq);
+					i915_request_add(rq);
+				}
+			}
+		}
+
+		for (nc = 0; nc < nctx; nc++) {
+			if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+				pr_err("%s(%s): wait for %llx:%lld timed out\n",
+				       __func__, ve[0]->engine->name,
+				       request[nc]->fence.context,
+				       request[nc]->fence.seqno);
+
+				GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+					  __func__, ve[0]->engine->name,
+					  request[nc]->fence.context,
+					  request[nc]->fence.seqno);
+				GEM_TRACE_DUMP();
+				intel_gt_set_wedged(gt);
+				break;
+			}
+		}
+
+		times[1] = ktime_sub(ktime_get_raw(), times[1]);
+		if (prime == 1)
+			times[0] = times[1];
+
+		for (nc = 0; nc < nctx; nc++) {
+			i915_request_put(request[nc]);
+			request[nc] = NULL;
+		}
+
+		if (__igt_timeout(end_time, NULL))
+			break;
+	}
+
+	err = igt_live_test_end(&t);
+	if (err)
+		goto out;
+
+	pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+		nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+		prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+
+	for (nc = 0; nc < nctx; nc++) {
+		i915_request_put(request[nc]);
+		intel_context_unpin(ve[nc]);
+		intel_context_put(ve[nc]);
+	}
+	return err;
+}
+
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+		  unsigned int class,
+		  struct intel_engine_cs **siblings,
+		  bool (*filter)(const struct intel_engine_cs *))
+{
+	unsigned int n = 0;
+	unsigned int inst;
+
+	for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+		if (!gt->engine_class[class][inst])
+			continue;
+
+		if (filter && !filter(gt->engine_class[class][inst]))
+			continue;
+
+		siblings[n++] = gt->engine_class[class][inst];
+	}
+
+	return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+		unsigned int class,
+		struct intel_engine_cs **siblings)
+{
+	return __select_siblings(gt, class, siblings, NULL);
+}
+
+static int live_virtual_engine(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned int class;
+	int err;
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		err = nop_virtual_engine(gt, &engine, 1, 1, 0);
+		if (err) {
+			pr_err("Failed to wrap engine %s: err=%d\n",
+			       engine->name, err);
+			return err;
+		}
+	}
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		int nsibling, n;
+
+		nsibling = select_siblings(gt, class, siblings);
+		if (nsibling < 2)
+			continue;
+
+		for (n = 1; n <= nsibling + 1; n++) {
+			err = nop_virtual_engine(gt, siblings, nsibling,
+						 n, 0);
+			if (err)
+				return err;
+		}
+
+		err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int mask_virtual_engine(struct intel_gt *gt,
+			       struct intel_engine_cs **siblings,
+			       unsigned int nsibling)
+{
+	struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+	struct intel_context *ve;
+	struct igt_live_test t;
+	unsigned int n;
+	int err;
+
+	/*
+	 * Check that by setting the execution mask on a request, we can
+	 * restrict it to our desired engine within the virtual engine.
+	 */
+
+	ve = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ve)) {
+		err = PTR_ERR(ve);
+		goto out_close;
+	}
+
+	err = intel_context_pin(ve);
+	if (err)
+		goto out_put;
+
+	err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+	if (err)
+		goto out_unpin;
+
+	for (n = 0; n < nsibling; n++) {
+		request[n] = i915_request_create(ve);
+		if (IS_ERR(request[n])) {
+			err = PTR_ERR(request[n]);
+			nsibling = n;
+			goto out;
+		}
+
+		/* Reverse order as it's more likely to be unnatural */
+		request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+		i915_request_get(request[n]);
+		i915_request_add(request[n]);
+	}
+
+	for (n = 0; n < nsibling; n++) {
+		if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+			pr_err("%s(%s): wait for %llx:%lld timed out\n",
+			       __func__, ve->engine->name,
+			       request[n]->fence.context,
+			       request[n]->fence.seqno);
+
+			GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+				  __func__, ve->engine->name,
+				  request[n]->fence.context,
+				  request[n]->fence.seqno);
+			GEM_TRACE_DUMP();
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+			goto out;
+		}
+
+		if (request[n]->engine != siblings[nsibling - n - 1]) {
+			pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+			       request[n]->engine->name,
+			       siblings[nsibling - n - 1]->name);
+			err = -EINVAL;
+			goto out;
+		}
+	}
+
+	err = igt_live_test_end(&t);
+out:
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+
+	for (n = 0; n < nsibling; n++)
+		i915_request_put(request[n]);
+
+out_unpin:
+	intel_context_unpin(ve);
+out_put:
+	intel_context_put(ve);
+out_close:
+	return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class;
+	int err;
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		unsigned int nsibling;
+
+		nsibling = select_siblings(gt, class, siblings);
+		if (nsibling < 2)
+			continue;
+
+		err = mask_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+				  struct intel_engine_cs **siblings,
+				  unsigned int nsibling)
+{
+	const long timeout = slice_timeout(siblings[0]);
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct igt_spinner spin;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * Virtual requests must take part in timeslicing on the target engines.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for (n = 0; n < nsibling; n++) {
+		ce = intel_context_create(siblings[n]);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_add(rq);
+	}
+
+	ce = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		goto out;
+	}
+
+	rq = intel_context_create_request(ce);
+	intel_context_put(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, timeout) < 0) {
+		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+			      __func__, rq->engine->name);
+		GEM_TRACE_DUMP();
+		intel_gt_set_wedged(gt);
+		err = -EIO;
+	}
+	i915_request_put(rq);
+
+out:
+	igt_spinner_end(&spin);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+				   struct intel_engine_cs **siblings,
+				   unsigned int nsibling)
+{
+	const long timeout = slice_timeout(siblings[0]);
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct igt_spinner spin;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * Virtual requests must allow others a fair timeslice.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	/* XXX We do not handle oversubscription and fairness with normal rq */
+	for (n = 0; n < nsibling; n++) {
+		ce = intel_execlists_create_virtual(siblings, nsibling);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_add(rq);
+	}
+
+	for (n = 0; !err && n < nsibling; n++) {
+		ce = intel_context_create(siblings[n]);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = intel_context_create_request(ce);
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+		if (i915_request_wait(rq, 0, timeout) < 0) {
+			GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+				      __func__, siblings[n]->name);
+			GEM_TRACE_DUMP();
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+		}
+		i915_request_put(rq);
+	}
+
+out:
+	igt_spinner_end(&spin);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class;
+	int err;
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		unsigned int nsibling;
+
+		nsibling = __select_siblings(gt, class, siblings,
+					     intel_engine_has_timeslices);
+		if (nsibling < 2)
+			continue;
+
+		err = slicein_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+
+		err = sliceout_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+				    struct intel_engine_cs **siblings,
+				    unsigned int nsibling)
+{
+	struct i915_request *last = NULL;
+	struct intel_context *ve;
+	struct i915_vma *scratch;
+	struct igt_live_test t;
+	unsigned int n;
+	int err = 0;
+	u32 *cs;
+
+	scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
+					       PAGE_SIZE);
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
+
+	err = i915_vma_sync(scratch);
+	if (err)
+		goto out_scratch;
+
+	ve = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ve)) {
+		err = PTR_ERR(ve);
+		goto out_scratch;
+	}
+
+	err = intel_context_pin(ve);
+	if (err)
+		goto out_put;
+
+	err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+	if (err)
+		goto out_unpin;
+
+	for (n = 0; n < NUM_GPR_DW; n++) {
+		struct intel_engine_cs *engine = siblings[n % nsibling];
+		struct i915_request *rq;
+
+		rq = i915_request_create(ve);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_end;
+		}
+
+		i915_request_put(last);
+		last = i915_request_get(rq);
+
+		cs = intel_ring_begin(rq, 8);
+		if (IS_ERR(cs)) {
+			i915_request_add(rq);
+			err = PTR_ERR(cs);
+			goto out_end;
+		}
+
+		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+		*cs++ = CS_GPR(engine, n);
+		*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+		*cs++ = 0;
+
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+		*cs++ = n + 1;
+
+		*cs++ = MI_NOOP;
+		intel_ring_advance(rq, cs);
+
+		/* Restrict this request to run on a particular engine */
+		rq->execution_mask = engine->mask;
+		i915_request_add(rq);
+	}
+
+	if (i915_request_wait(last, 0, HZ / 5) < 0) {
+		err = -ETIME;
+		goto out_end;
+	}
+
+	cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto out_end;
+	}
+
+	for (n = 0; n < NUM_GPR_DW; n++) {
+		if (cs[n] != n) {
+			pr_err("Incorrect value[%d] found for GPR[%d]\n",
+			       cs[n], n);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	i915_request_put(last);
+out_unpin:
+	intel_context_unpin(ve);
+out_put:
+	intel_context_put(ve);
+out_scratch:
+	i915_vma_unpin_and_release(&scratch, 0);
+	return err;
+}
+
+static int live_virtual_preserved(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class;
+
+	/*
+	 * Check that the context image retains non-privileged (user) registers
+	 * from one engine to the next. For this we check that the CS_GPR
+	 * are preserved.
+	 */
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	/* As we use CS_GPR we cannot run before they existed on all engines. */
+	if (INTEL_GEN(gt->i915) < 9)
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		int nsibling, err;
+
+		nsibling = select_siblings(gt, class, siblings);
+		if (nsibling < 2)
+			continue;
+
+		err = preserved_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
+			       unsigned int class,
+			       struct intel_engine_cs **siblings,
+			       unsigned int nsibling,
+			       unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+	struct intel_engine_cs *master;
+	struct i915_request *rq[16];
+	enum intel_engine_id id;
+	struct igt_spinner spin;
+	unsigned long n;
+	int err;
+
+	/*
+	 * A set of bonded requests is intended to be run concurrently
+	 * across a number of engines. We use one request per-engine
+	 * and a magic fence to schedule each of the bonded requests
+	 * at the same time. A consequence of our current scheduler is that
+	 * we only move requests to the HW ready queue when the request
+	 * becomes ready, that is when all of its prerequisite fences have
+	 * been signaled. As one of those fences is the master submit fence,
+	 * there is a delay on all secondary fences as the HW may be
+	 * currently busy. Equally, as all the requests are independent,
+	 * they may have other fences that delay individual request
+	 * submission to HW. Ergo, we do not guarantee that all requests are
+	 * immediately submitted to HW at the same time, just that if the
+	 * rules are abided by, they are ready at the same time as the
+	 * first is submitted. Userspace can embed semaphores in its batch
+	 * to ensure parallel execution of its phases as it requires.
+	 * Though naturally it gets requested that perhaps the scheduler should
+	 * take care of parallel execution, even across preemption events on
+	 * different HW. (The proper answer is of course "lalalala".)
+	 *
+	 * With the submit-fence, we have identified three possible phases
+	 * of synchronisation depending on the master fence: queued (not
+	 * ready), executing, and signaled. The first two are quite simple
+	 * and checked below. However, the signaled master fence handling is
+	 * contentious. Currently we do not distinguish between a signaled
+	 * fence and an expired fence, as once signaled it does not convey
+	 * any information about the previous execution. It may even be freed
+	 * and hence checking later it may not exist at all. Ergo we currently
+	 * do not apply the bonding constraint for an already signaled fence,
+	 * as our expectation is that it should not constrain the secondaries
+	 * and is outside of the scope of the bonded request API (i.e. all
+	 * userspace requests are meant to be running in parallel). As
+	 * it imposes no constraint, and is effectively a no-op, we do not
+	 * check below as normal execution flows are checked extensively above.
+	 *
+	 * XXX Is the degenerate handling of signaled submit fences the
+	 * expected behaviour for userpace?
+	 */
+
+	GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	err = 0;
+	rq[0] = ERR_PTR(-ENOMEM);
+	for_each_engine(master, gt, id) {
+		struct i915_sw_fence fence = {};
+		struct intel_context *ce;
+
+		if (master->class == class)
+			continue;
+
+		ce = intel_context_create(master);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+		rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+		intel_context_put(ce);
+		if (IS_ERR(rq[0])) {
+			err = PTR_ERR(rq[0]);
+			goto out;
+		}
+		i915_request_get(rq[0]);
+
+		if (flags & BOND_SCHEDULE) {
+			onstack_fence_init(&fence);
+			err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+							       &fence,
+							       GFP_KERNEL);
+		}
+
+		i915_request_add(rq[0]);
+		if (err < 0)
+			goto out;
+
+		if (!(flags & BOND_SCHEDULE) &&
+		    !igt_wait_for_spinner(&spin, rq[0])) {
+			err = -EIO;
+			goto out;
+		}
+
+		for (n = 0; n < nsibling; n++) {
+			struct intel_context *ve;
+
+			ve = intel_execlists_create_virtual(siblings, nsibling);
+			if (IS_ERR(ve)) {
+				err = PTR_ERR(ve);
+				onstack_fence_fini(&fence);
+				goto out;
+			}
+
+			err = intel_virtual_engine_attach_bond(ve->engine,
+							       master,
+							       siblings[n]);
+			if (err) {
+				intel_context_put(ve);
+				onstack_fence_fini(&fence);
+				goto out;
+			}
+
+			err = intel_context_pin(ve);
+			intel_context_put(ve);
+			if (err) {
+				onstack_fence_fini(&fence);
+				goto out;
+			}
+
+			rq[n + 1] = i915_request_create(ve);
+			intel_context_unpin(ve);
+			if (IS_ERR(rq[n + 1])) {
+				err = PTR_ERR(rq[n + 1]);
+				onstack_fence_fini(&fence);
+				goto out;
+			}
+			i915_request_get(rq[n + 1]);
+
+			err = i915_request_await_execution(rq[n + 1],
+							   &rq[0]->fence,
+							   ve->engine->bond_execute);
+			i915_request_add(rq[n + 1]);
+			if (err < 0) {
+				onstack_fence_fini(&fence);
+				goto out;
+			}
+		}
+		onstack_fence_fini(&fence);
+		intel_engine_flush_submission(master);
+		igt_spinner_end(&spin);
+
+		if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
+			pr_err("Master request did not execute (on %s)!\n",
+			       rq[0]->engine->name);
+			err = -EIO;
+			goto out;
+		}
+
+		for (n = 0; n < nsibling; n++) {
+			if (i915_request_wait(rq[n + 1], 0,
+					      MAX_SCHEDULE_TIMEOUT) < 0) {
+				err = -EIO;
+				goto out;
+			}
+
+			if (rq[n + 1]->engine != siblings[n]) {
+				pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+				       siblings[n]->name,
+				       rq[n + 1]->engine->name,
+				       rq[0]->engine->name);
+				err = -EINVAL;
+				goto out;
+			}
+		}
+
+		for (n = 0; !IS_ERR(rq[n]); n++)
+			i915_request_put(rq[n]);
+		rq[0] = ERR_PTR(-ENOMEM);
+	}
+
+out:
+	for (n = 0; !IS_ERR(rq[n]); n++)
+		i915_request_put(rq[n]);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+	static const struct phase {
+		const char *name;
+		unsigned int flags;
+	} phases[] = {
+		{ "", 0 },
+		{ "schedule", BOND_SCHEDULE },
+		{ },
+	};
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class;
+	int err;
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		const struct phase *p;
+		int nsibling;
+
+		nsibling = select_siblings(gt, class, siblings);
+		if (nsibling < 2)
+			continue;
+
+		for (p = phases; p->name; p++) {
+			err = bond_virtual_engine(gt,
+						  class, siblings, nsibling,
+						  p->flags);
+			if (err) {
+				pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+				       __func__, p->name, class, nsibling, err);
+				return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+				struct intel_engine_cs **siblings,
+				unsigned int nsibling)
+{
+	struct intel_engine_cs *engine;
+	struct intel_context *ve;
+	struct igt_spinner spin;
+	struct i915_request *rq;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * In order to support offline error capture for fast preempt reset,
+	 * we need to decouple the guilty request and ensure that it and its
+	 * descendents are not executed while the capture is in progress.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	ve = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ve)) {
+		err = PTR_ERR(ve);
+		goto out_spin;
+	}
+
+	for (n = 0; n < nsibling; n++)
+		st_engine_heartbeat_disable(siblings[n]);
+
+	rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_heartbeat;
+	}
+	i915_request_add(rq);
+
+	if (!igt_wait_for_spinner(&spin, rq)) {
+		intel_gt_set_wedged(gt);
+		err = -ETIME;
+		goto out_heartbeat;
+	}
+
+	engine = rq->engine;
+	GEM_BUG_ON(engine == ve->engine);
+
+	/* Take ownership of the reset and tasklet */
+	local_bh_disable();
+	if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+			     &gt->reset.flags)) {
+		local_bh_enable();
+		intel_gt_set_wedged(gt);
+		err = -EBUSY;
+		goto out_heartbeat;
+	}
+	tasklet_disable(&engine->execlists.tasklet);
+
+	engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+	GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+	/* Fake a preemption event; failed of course */
+	spin_lock_irq(&engine->active.lock);
+	__unwind_incomplete_requests(engine);
+	spin_unlock_irq(&engine->active.lock);
+	GEM_BUG_ON(rq->engine != engine);
+
+	/* Reset the engine while keeping our active request on hold */
+	execlists_hold(engine, rq);
+	GEM_BUG_ON(!i915_request_on_hold(rq));
+
+	__intel_engine_reset_bh(engine, NULL);
+	GEM_BUG_ON(rq->fence.error != -EIO);
+
+	/* Release our grasp on the engine, letting CS flow again */
+	tasklet_enable(&engine->execlists.tasklet);
+	clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+	local_bh_enable();
+
+	/* Check that we do not resubmit the held request */
+	i915_request_get(rq);
+	if (!i915_request_wait(rq, 0, HZ / 5)) {
+		pr_err("%s: on hold request completed!\n",
+		       engine->name);
+		intel_gt_set_wedged(gt);
+		err = -EIO;
+		goto out_rq;
+	}
+	GEM_BUG_ON(!i915_request_on_hold(rq));
+
+	/* But is resubmitted on release */
+	execlists_unhold(engine, rq);
+	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+		pr_err("%s: held request did not complete!\n",
+		       engine->name);
+		intel_gt_set_wedged(gt);
+		err = -ETIME;
+	}
+
+out_rq:
+	i915_request_put(rq);
+out_heartbeat:
+	for (n = 0; n < nsibling; n++)
+		st_engine_heartbeat_enable(siblings[n]);
+
+	intel_context_put(ve);
+out_spin:
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class;
+
+	/*
+	 * Check that we handle a reset event within a virtual engine.
+	 * Only the physical engine is reset, but we have to check the flow
+	 * of the virtual requests around the reset, and make sure it is not
+	 * forgotten.
+	 */
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		int nsibling, err;
+
+		nsibling = select_siblings(gt, class, siblings);
+		if (nsibling < 2)
+			continue;
+
+		err = reset_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_sanitycheck),
+		SUBTEST(live_unlite_switch),
+		SUBTEST(live_unlite_preempt),
+		SUBTEST(live_unlite_ring),
+		SUBTEST(live_pin_rewind),
+		SUBTEST(live_hold_reset),
+		SUBTEST(live_error_interrupt),
+		SUBTEST(live_timeslice_preempt),
+		SUBTEST(live_timeslice_rewind),
+		SUBTEST(live_timeslice_queue),
+		SUBTEST(live_timeslice_nopreempt),
+		SUBTEST(live_busywait_preempt),
+		SUBTEST(live_preempt),
+		SUBTEST(live_late_preempt),
+		SUBTEST(live_nopreempt),
+		SUBTEST(live_preempt_cancel),
+		SUBTEST(live_suppress_self_preempt),
+		SUBTEST(live_chain_preempt),
+		SUBTEST(live_preempt_ring),
+		SUBTEST(live_preempt_gang),
+		SUBTEST(live_preempt_timeout),
+		SUBTEST(live_preempt_user),
+		SUBTEST(live_preempt_smoke),
+		SUBTEST(live_virtual_engine),
+		SUBTEST(live_virtual_mask),
+		SUBTEST(live_virtual_preserved),
+		SUBTEST(live_virtual_slice),
+		SUBTEST(live_virtual_bond),
+		SUBTEST(live_virtual_reset),
+	};
+
+	if (!HAS_EXECLISTS(i915))
+		return 0;
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 6180a47c1b5114850edd34a10c15e73f85330a40..5d911f724ebe69c92d1cbb780165878e07c0aeba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -71,7 +71,7 @@ static int live_gt_clocks(void *arg)
 	enum intel_engine_id id;
 	int err = 0;
 
-	if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+	if (!gt->clock_frequency) { /* unknown */
 		pr_info("CS_TIMESTAMP frequency unknown\n");
 		return 0;
 	}
@@ -112,12 +112,12 @@ static int live_gt_clocks(void *arg)
 
 		measure_clocks(engine, &cycles, &dt);
 
-		time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
-		expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+		time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+		expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
 
 		pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
 			engine->name, cycles, time, dt, expected,
-			RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+			engine->gt->clock_frequency / 1000);
 
 		if (9 * time < 8 * dt || 8 * time > 9 * dt) {
 			pr_err("%s: CS ticks did not match walltime!\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index fb5ebf930ab2e4a39da8b9326cb76f08608d1876..463bb6a700c86fc6b3593506b8d911b14875a149 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -506,7 +506,8 @@ static int igt_reset_nop_engine(void *arg)
 			}
 			err = intel_engine_reset(engine, NULL);
 			if (err) {
-				pr_err("i915_reset_engine failed\n");
+				pr_err("intel_engine_reset(%s) failed, err:%d\n",
+				       engine->name, err);
 				break;
 			}
 
@@ -539,6 +540,149 @@ static int igt_reset_nop_engine(void *arg)
 	return 0;
 }
 
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+	engine->reset_timeout.probability = 999;
+	atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+	memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int igt_reset_fail_engine(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/* Check that we can recover from engine-reset failues */
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		unsigned int count;
+		struct intel_context *ce;
+		IGT_TIMEOUT(end_time);
+		int err;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce))
+			return PTR_ERR(ce);
+
+		st_engine_heartbeat_disable(engine);
+		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+
+		force_reset_timeout(engine);
+		err = intel_engine_reset(engine, NULL);
+		cancel_reset_timeout(engine);
+		if (err == 0) /* timeouts only generated on gen8+ */
+			goto skip;
+
+		count = 0;
+		do {
+			struct i915_request *last = NULL;
+			int i;
+
+			if (!wait_for_idle(engine)) {
+				pr_err("%s failed to idle before reset\n",
+				       engine->name);
+				err = -EIO;
+				break;
+			}
+
+			for (i = 0; i < count % 15; i++) {
+				struct i915_request *rq;
+
+				rq = intel_context_create_request(ce);
+				if (IS_ERR(rq)) {
+					struct drm_printer p =
+						drm_info_printer(gt->i915->drm.dev);
+					intel_engine_dump(engine, &p,
+							  "%s(%s): failed to submit request\n",
+							  __func__,
+							  engine->name);
+
+					GEM_TRACE("%s(%s): failed to submit request\n",
+						  __func__,
+						  engine->name);
+					GEM_TRACE_DUMP();
+
+					intel_gt_set_wedged(gt);
+					if (last)
+						i915_request_put(last);
+
+					err = PTR_ERR(rq);
+					goto out;
+				}
+
+				if (last)
+					i915_request_put(last);
+				last = i915_request_get(rq);
+				i915_request_add(rq);
+			}
+
+			if (count & 1) {
+				err = intel_engine_reset(engine, NULL);
+				if (err) {
+					GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
+						      engine->name, err);
+					GEM_TRACE_DUMP();
+					i915_request_put(last);
+					break;
+				}
+			} else {
+				force_reset_timeout(engine);
+				err = intel_engine_reset(engine, NULL);
+				cancel_reset_timeout(engine);
+				if (err != -ETIMEDOUT) {
+					pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
+					       engine->name, err);
+					i915_request_put(last);
+					break;
+				}
+			}
+
+			err = 0;
+			if (last) {
+				if (i915_request_wait(last, 0, HZ / 2) < 0) {
+					struct drm_printer p =
+						drm_info_printer(gt->i915->drm.dev);
+
+					intel_engine_dump(engine, &p,
+							  "%s(%s): failed to complete request\n",
+							  __func__,
+							  engine->name);
+
+					GEM_TRACE("%s(%s): failed to complete request\n",
+						  __func__,
+						  engine->name);
+					GEM_TRACE_DUMP();
+
+					err = -EIO;
+				}
+				i915_request_put(last);
+			}
+			count++;
+		} while (err == 0 && time_before(jiffies, end_time));
+out:
+		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+skip:
+		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+		st_engine_heartbeat_enable(engine);
+		intel_context_put(ce);
+
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int __igt_reset_engine(struct intel_gt *gt, bool active)
 {
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
@@ -560,6 +704,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
 	for_each_engine(engine, gt, id) {
 		unsigned int reset_count, reset_engine_count;
+		unsigned long count;
 		IGT_TIMEOUT(end_time);
 
 		if (active && !intel_engine_can_store_dword(engine))
@@ -577,6 +722,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
 		st_engine_heartbeat_disable(engine);
 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+		count = 0;
 		do {
 			if (active) {
 				struct i915_request *rq;
@@ -608,7 +754,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
 			err = intel_engine_reset(engine, NULL);
 			if (err) {
-				pr_err("i915_reset_engine failed\n");
+				pr_err("intel_engine_reset(%s) failed, err:%d\n",
+				       engine->name, err);
 				break;
 			}
 
@@ -625,9 +772,13 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 				err = -EINVAL;
 				break;
 			}
+
+			count++;
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
 		st_engine_heartbeat_enable(engine);
+		pr_info("%s: Completed %lu %s resets\n",
+			engine->name, count, active ? "active" : "idle");
 
 		if (err)
 			break;
@@ -1478,7 +1629,8 @@ static int igt_reset_queue(void *arg)
 			prev = rq;
 			count++;
 		} while (time_before(jiffies, end_time));
-		pr_info("%s: Completed %d resets\n", engine->name, count);
+		pr_info("%s: Completed %d queued resets\n",
+			engine->name, count);
 
 		*h.batch = MI_BATCH_BUFFER_END;
 		intel_gt_chipset_flush(engine->gt);
@@ -1575,13 +1727,21 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
 	GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
 		  engine->name, mode, p->name);
 
-	tasklet_disable(t);
+	if (t->func)
+		tasklet_disable(t);
+	if (strcmp(p->name, "softirq"))
+		local_bh_disable();
 	p->critical_section_begin();
 
-	err = intel_engine_reset(engine, NULL);
+	err = __intel_engine_reset_bh(engine, NULL);
 
 	p->critical_section_end();
-	tasklet_enable(t);
+	if (strcmp(p->name, "softirq"))
+		local_bh_enable();
+	if (t->func) {
+		tasklet_enable(t);
+		tasklet_hi_schedule(t);
+	}
 
 	if (err)
 		pr_err("i915_reset_engine(%s:%s) failed under %s\n",
@@ -1687,6 +1847,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_reset_nop_engine),
 		SUBTEST(igt_reset_idle_engine),
 		SUBTEST(igt_reset_active_engine),
+		SUBTEST(igt_reset_fail_engine),
 		SUBTEST(igt_reset_engines),
 		SUBTEST(igt_reset_engines_atomic),
 		SUBTEST(igt_reset_queue),
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95d41c01d0e04221f6f22733d3f1d5a65e2107a9..920979a894137c9755810a182ebdf667a97a3742 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1,4709 +1,73 @@
+// SPDX-License-Identifier: MIT
 /*
- * SPDX-License-Identifier: MIT
- *
  * Copyright © 2018 Intel Corporation
  */
 
 #include <linux/prime_numbers.h>
 
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_reset.h"
-#include "gt/selftest_engine_heartbeat.h"
-
-#include "i915_selftest.h"
-#include "selftests/i915_random.h"
-#include "selftests/igt_flush_test.h"
-#include "selftests/igt_live_test.h"
-#include "selftests/igt_spinner.h"
-#include "selftests/lib_sw_fence.h"
-
-#include "gem/selftests/igt_gem_utils.h"
-#include "gem/selftests/mock_context.h"
-
-#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
-#define NUM_GPR 16
-#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
-
-static struct i915_vma *create_scratch(struct intel_gt *gt)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int err;
-
-	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		i915_gem_object_put(obj);
-		return vma;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
-	if (err) {
-		i915_gem_object_put(obj);
-		return ERR_PTR(err);
-	}
-
-	return vma;
-}
-
-static bool is_active(struct i915_request *rq)
-{
-	if (i915_request_is_active(rq))
-		return true;
-
-	if (i915_request_on_hold(rq))
-		return true;
-
-	if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
-		return true;
-
-	return false;
-}
-
-static int wait_for_submit(struct intel_engine_cs *engine,
-			   struct i915_request *rq,
-			   unsigned long timeout)
-{
-	timeout += jiffies;
-	do {
-		bool done = time_after(jiffies, timeout);
-
-		if (i915_request_completed(rq)) /* that was quick! */
-			return 0;
-
-		/* Wait until the HW has acknowleged the submission (or err) */
-		intel_engine_flush_submission(engine);
-		if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
-			return 0;
-
-		if (done)
-			return -ETIME;
-
-		cond_resched();
-	} while (1);
-}
-
-static int wait_for_reset(struct intel_engine_cs *engine,
-			  struct i915_request *rq,
-			  unsigned long timeout)
-{
-	timeout += jiffies;
-
-	do {
-		cond_resched();
-		intel_engine_flush_submission(engine);
-
-		if (READ_ONCE(engine->execlists.pending[0]))
-			continue;
-
-		if (i915_request_completed(rq))
-			break;
-
-		if (READ_ONCE(rq->fence.error))
-			break;
-	} while (time_before(jiffies, timeout));
-
-	flush_scheduled_work();
-
-	if (rq->fence.error != -EIO) {
-		pr_err("%s: hanging request %llx:%lld not reset\n",
-		       engine->name,
-		       rq->fence.context,
-		       rq->fence.seqno);
-		return -EINVAL;
-	}
-
-	/* Give the request a jiffie to complete after flushing the worker */
-	if (i915_request_wait(rq, 0,
-			      max(0l, (long)(timeout - jiffies)) + 1) < 0) {
-		pr_err("%s: hanging request %llx:%lld did not complete\n",
-		       engine->name,
-		       rq->fence.context,
-		       rq->fence.seqno);
-		return -ETIME;
-	}
-
-	return 0;
-}
-
-static int live_sanitycheck(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct igt_spinner spin;
-	int err = 0;
-
-	if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
-		return 0;
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			break;
-		}
-
-		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out_ctx;
-		}
-
-		i915_request_add(rq);
-		if (!igt_wait_for_spinner(&spin, rq)) {
-			GEM_TRACE("spinner failed to start\n");
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto out_ctx;
-		}
-
-		igt_spinner_end(&spin);
-		if (igt_flush_test(gt->i915)) {
-			err = -EIO;
-			goto out_ctx;
-		}
-
-out_ctx:
-		intel_context_put(ce);
-		if (err)
-			break;
-	}
-
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int live_unlite_restore(struct intel_gt *gt, int prio)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct igt_spinner spin;
-	int err = -ENOMEM;
-
-	/*
-	 * Check that we can correctly context switch between 2 instances
-	 * on the same engine from the same parent context.
-	 */
-
-	if (igt_spinner_init(&spin, gt))
-		return err;
-
-	err = 0;
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce[2] = {};
-		struct i915_request *rq[2];
-		struct igt_live_test t;
-		int n;
-
-		if (prio && !intel_engine_has_preemption(engine))
-			continue;
-
-		if (!intel_engine_can_store_dword(engine))
-			continue;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			break;
-		}
-		st_engine_heartbeat_disable(engine);
-
-		for (n = 0; n < ARRAY_SIZE(ce); n++) {
-			struct intel_context *tmp;
-
-			tmp = intel_context_create(engine);
-			if (IS_ERR(tmp)) {
-				err = PTR_ERR(tmp);
-				goto err_ce;
-			}
-
-			err = intel_context_pin(tmp);
-			if (err) {
-				intel_context_put(tmp);
-				goto err_ce;
-			}
-
-			/*
-			 * Setup the pair of contexts such that if we
-			 * lite-restore using the RING_TAIL from ce[1] it
-			 * will execute garbage from ce[0]->ring.
-			 */
-			memset(tmp->ring->vaddr,
-			       POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
-			       tmp->ring->vma->size);
-
-			ce[n] = tmp;
-		}
-		GEM_BUG_ON(!ce[1]->ring->size);
-		intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
-		__execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
-
-		rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
-		if (IS_ERR(rq[0])) {
-			err = PTR_ERR(rq[0]);
-			goto err_ce;
-		}
-
-		i915_request_get(rq[0]);
-		i915_request_add(rq[0]);
-		GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
-
-		if (!igt_wait_for_spinner(&spin, rq[0])) {
-			i915_request_put(rq[0]);
-			goto err_ce;
-		}
-
-		rq[1] = i915_request_create(ce[1]);
-		if (IS_ERR(rq[1])) {
-			err = PTR_ERR(rq[1]);
-			i915_request_put(rq[0]);
-			goto err_ce;
-		}
-
-		if (!prio) {
-			/*
-			 * Ensure we do the switch to ce[1] on completion.
-			 *
-			 * rq[0] is already submitted, so this should reduce
-			 * to a no-op (a wait on a request on the same engine
-			 * uses the submit fence, not the completion fence),
-			 * but it will install a dependency on rq[1] for rq[0]
-			 * that will prevent the pair being reordered by
-			 * timeslicing.
-			 */
-			i915_request_await_dma_fence(rq[1], &rq[0]->fence);
-		}
-
-		i915_request_get(rq[1]);
-		i915_request_add(rq[1]);
-		GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
-		i915_request_put(rq[0]);
-
-		if (prio) {
-			struct i915_sched_attr attr = {
-				.priority = prio,
-			};
-
-			/* Alternatively preempt the spinner with ce[1] */
-			engine->schedule(rq[1], &attr);
-		}
-
-		/* And switch back to ce[0] for good measure */
-		rq[0] = i915_request_create(ce[0]);
-		if (IS_ERR(rq[0])) {
-			err = PTR_ERR(rq[0]);
-			i915_request_put(rq[1]);
-			goto err_ce;
-		}
-
-		i915_request_await_dma_fence(rq[0], &rq[1]->fence);
-		i915_request_get(rq[0]);
-		i915_request_add(rq[0]);
-		GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
-		i915_request_put(rq[1]);
-		i915_request_put(rq[0]);
-
-err_ce:
-		intel_engine_flush_submission(engine);
-		igt_spinner_end(&spin);
-		for (n = 0; n < ARRAY_SIZE(ce); n++) {
-			if (IS_ERR_OR_NULL(ce[n]))
-				break;
-
-			intel_context_unpin(ce[n]);
-			intel_context_put(ce[n]);
-		}
-
-		st_engine_heartbeat_enable(engine);
-		if (igt_live_test_end(&t))
-			err = -EIO;
-		if (err)
-			break;
-	}
-
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int live_unlite_switch(void *arg)
-{
-	return live_unlite_restore(arg, 0);
-}
-
-static int live_unlite_preempt(void *arg)
-{
-	return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
-}
-
-static int live_unlite_ring(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	struct igt_spinner spin;
-	enum intel_engine_id id;
-	int err = 0;
-
-	/*
-	 * Setup a preemption event that will cause almost the entire ring
-	 * to be unwound, potentially fooling our intel_ring_direction()
-	 * into emitting a forward lite-restore instead of the rollback.
-	 */
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce[2] = {};
-		struct i915_request *rq;
-		struct igt_live_test t;
-		int n;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (!intel_engine_can_store_dword(engine))
-			continue;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			break;
-		}
-		st_engine_heartbeat_disable(engine);
-
-		for (n = 0; n < ARRAY_SIZE(ce); n++) {
-			struct intel_context *tmp;
-
-			tmp = intel_context_create(engine);
-			if (IS_ERR(tmp)) {
-				err = PTR_ERR(tmp);
-				goto err_ce;
-			}
-
-			err = intel_context_pin(tmp);
-			if (err) {
-				intel_context_put(tmp);
-				goto err_ce;
-			}
-
-			memset32(tmp->ring->vaddr,
-				 0xdeadbeef, /* trigger a hang if executed */
-				 tmp->ring->vma->size / sizeof(u32));
-
-			ce[n] = tmp;
-		}
-
-		/* Create max prio spinner, followed by N low prio nops */
-		rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_ce;
-		}
-
-		i915_request_get(rq);
-		rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-		i915_request_add(rq);
-
-		if (!igt_wait_for_spinner(&spin, rq)) {
-			intel_gt_set_wedged(gt);
-			i915_request_put(rq);
-			err = -ETIME;
-			goto err_ce;
-		}
-
-		/* Fill the ring, until we will cause a wrap */
-		n = 0;
-		while (intel_ring_direction(ce[0]->ring,
-					    rq->wa_tail,
-					    ce[0]->ring->tail) <= 0) {
-			struct i915_request *tmp;
-
-			tmp = intel_context_create_request(ce[0]);
-			if (IS_ERR(tmp)) {
-				err = PTR_ERR(tmp);
-				i915_request_put(rq);
-				goto err_ce;
-			}
-
-			i915_request_add(tmp);
-			intel_engine_flush_submission(engine);
-			n++;
-		}
-		intel_engine_flush_submission(engine);
-		pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
-			 engine->name, n,
-			 ce[0]->ring->size,
-			 ce[0]->ring->tail,
-			 ce[0]->ring->emit,
-			 rq->tail);
-		GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
-						rq->tail,
-						ce[0]->ring->tail) <= 0);
-		i915_request_put(rq);
-
-		/* Create a second ring to preempt the first ring after rq[0] */
-		rq = intel_context_create_request(ce[1]);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_ce;
-		}
-
-		rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-		i915_request_get(rq);
-		i915_request_add(rq);
-
-		err = wait_for_submit(engine, rq, HZ / 2);
-		i915_request_put(rq);
-		if (err) {
-			pr_err("%s: preemption request was not submitted\n",
-			       engine->name);
-			err = -ETIME;
-		}
-
-		pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
-			 engine->name,
-			 ce[0]->ring->tail, ce[0]->ring->emit,
-			 ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
-		intel_engine_flush_submission(engine);
-		igt_spinner_end(&spin);
-		for (n = 0; n < ARRAY_SIZE(ce); n++) {
-			if (IS_ERR_OR_NULL(ce[n]))
-				break;
-
-			intel_context_unpin(ce[n]);
-			intel_context_put(ce[n]);
-		}
-		st_engine_heartbeat_enable(engine);
-		if (igt_live_test_end(&t))
-			err = -EIO;
-		if (err)
-			break;
-	}
-
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int live_pin_rewind(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	/*
-	 * We have to be careful not to trust intel_ring too much, for example
-	 * ring->head is updated upon retire which is out of sync with pinning
-	 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
-	 * or else we risk writing an older, stale value.
-	 *
-	 * To simulate this, let's apply a bit of deliberate sabotague.
-	 */
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-		struct intel_ring *ring;
-		struct igt_live_test t;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			break;
-		}
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			break;
-		}
-
-		err = intel_context_pin(ce);
-		if (err) {
-			intel_context_put(ce);
-			break;
-		}
-
-		/* Keep the context awake while we play games */
-		err = i915_active_acquire(&ce->active);
-		if (err) {
-			intel_context_unpin(ce);
-			intel_context_put(ce);
-			break;
-		}
-		ring = ce->ring;
-
-		/* Poison the ring, and offset the next request from HEAD */
-		memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
-		ring->emit = ring->size / 2;
-		ring->tail = ring->emit;
-		GEM_BUG_ON(ring->head);
-
-		intel_context_unpin(ce);
-
-		/* Submit a simple nop request */
-		GEM_BUG_ON(intel_context_is_pinned(ce));
-		rq = intel_context_create_request(ce);
-		i915_active_release(&ce->active); /* e.g. async retire */
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			break;
-		}
-		GEM_BUG_ON(!rq->head);
-		i915_request_add(rq);
-
-		/* Expect not to hang! */
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			break;
-		}
-	}
-
-	return err;
-}
-
-static int live_hold_reset(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct igt_spinner spin;
-	int err = 0;
-
-	/*
-	 * In order to support offline error capture for fast preempt reset,
-	 * we need to decouple the guilty request and ensure that it and its
-	 * descendents are not executed while the capture is in progress.
-	 */
-
-	if (!intel_has_reset_engine(gt))
-		return 0;
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			break;
-		}
-
-		st_engine_heartbeat_disable(engine);
-
-		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out;
-		}
-		i915_request_add(rq);
-
-		if (!igt_wait_for_spinner(&spin, rq)) {
-			intel_gt_set_wedged(gt);
-			err = -ETIME;
-			goto out;
-		}
-
-		/* We have our request executing, now remove it and reset */
-
-		if (test_and_set_bit(I915_RESET_ENGINE + id,
-				     &gt->reset.flags)) {
-			intel_gt_set_wedged(gt);
-			err = -EBUSY;
-			goto out;
-		}
-		tasklet_disable(&engine->execlists.tasklet);
-
-		engine->execlists.tasklet.func(engine->execlists.tasklet.data);
-		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
-		i915_request_get(rq);
-		execlists_hold(engine, rq);
-		GEM_BUG_ON(!i915_request_on_hold(rq));
-
-		intel_engine_reset(engine, NULL);
-		GEM_BUG_ON(rq->fence.error != -EIO);
-
-		tasklet_enable(&engine->execlists.tasklet);
-		clear_and_wake_up_bit(I915_RESET_ENGINE + id,
-				      &gt->reset.flags);
-
-		/* Check that we do not resubmit the held request */
-		if (!i915_request_wait(rq, 0, HZ / 5)) {
-			pr_err("%s: on hold request completed!\n",
-			       engine->name);
-			i915_request_put(rq);
-			err = -EIO;
-			goto out;
-		}
-		GEM_BUG_ON(!i915_request_on_hold(rq));
-
-		/* But is resubmitted on release */
-		execlists_unhold(engine, rq);
-		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
-			pr_err("%s: held request did not complete!\n",
-			       engine->name);
-			intel_gt_set_wedged(gt);
-			err = -ETIME;
-		}
-		i915_request_put(rq);
-
-out:
-		st_engine_heartbeat_enable(engine);
-		intel_context_put(ce);
-		if (err)
-			break;
-	}
-
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static const char *error_repr(int err)
-{
-	return err ? "bad" : "good";
-}
-
-static int live_error_interrupt(void *arg)
-{
-	static const struct error_phase {
-		enum { GOOD = 0, BAD = -EIO } error[2];
-	} phases[] = {
-		{ { BAD,  GOOD } },
-		{ { BAD,  BAD  } },
-		{ { BAD,  GOOD } },
-		{ { GOOD, GOOD } }, /* sentinel */
-	};
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	/*
-	 * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
-	 * of invalid commands in user batches that will cause a GPU hang.
-	 * This is a faster mechanism than using hangcheck/heartbeats, but
-	 * only detects problems the HW knows about -- it will not warn when
-	 * we kill the HW!
-	 *
-	 * To verify our detection and reset, we throw some invalid commands
-	 * at the HW and wait for the interrupt.
-	 */
-
-	if (!intel_has_reset_engine(gt))
-		return 0;
-
-	for_each_engine(engine, gt, id) {
-		const struct error_phase *p;
-		int err = 0;
-
-		st_engine_heartbeat_disable(engine);
-
-		for (p = phases; p->error[0] != GOOD; p++) {
-			struct i915_request *client[ARRAY_SIZE(phases->error)];
-			u32 *cs;
-			int i;
-
-			memset(client, 0, sizeof(*client));
-			for (i = 0; i < ARRAY_SIZE(client); i++) {
-				struct intel_context *ce;
-				struct i915_request *rq;
-
-				ce = intel_context_create(engine);
-				if (IS_ERR(ce)) {
-					err = PTR_ERR(ce);
-					goto out;
-				}
-
-				rq = intel_context_create_request(ce);
-				intel_context_put(ce);
-				if (IS_ERR(rq)) {
-					err = PTR_ERR(rq);
-					goto out;
-				}
-
-				if (rq->engine->emit_init_breadcrumb) {
-					err = rq->engine->emit_init_breadcrumb(rq);
-					if (err) {
-						i915_request_add(rq);
-						goto out;
-					}
-				}
-
-				cs = intel_ring_begin(rq, 2);
-				if (IS_ERR(cs)) {
-					i915_request_add(rq);
-					err = PTR_ERR(cs);
-					goto out;
-				}
-
-				if (p->error[i]) {
-					*cs++ = 0xdeadbeef;
-					*cs++ = 0xdeadbeef;
-				} else {
-					*cs++ = MI_NOOP;
-					*cs++ = MI_NOOP;
-				}
-
-				client[i] = i915_request_get(rq);
-				i915_request_add(rq);
-			}
-
-			err = wait_for_submit(engine, client[0], HZ / 2);
-			if (err) {
-				pr_err("%s: first request did not start within time!\n",
-				       engine->name);
-				err = -ETIME;
-				goto out;
-			}
-
-			for (i = 0; i < ARRAY_SIZE(client); i++) {
-				if (i915_request_wait(client[i], 0, HZ / 5) < 0)
-					pr_debug("%s: %s request incomplete!\n",
-						 engine->name,
-						 error_repr(p->error[i]));
-
-				if (!i915_request_started(client[i])) {
-					pr_err("%s: %s request not started!\n",
-					       engine->name,
-					       error_repr(p->error[i]));
-					err = -ETIME;
-					goto out;
-				}
-
-				/* Kick the tasklet to process the error */
-				intel_engine_flush_submission(engine);
-				if (client[i]->fence.error != p->error[i]) {
-					pr_err("%s: %s request (%s) with wrong error code: %d\n",
-					       engine->name,
-					       error_repr(p->error[i]),
-					       i915_request_completed(client[i]) ? "completed" : "running",
-					       client[i]->fence.error);
-					err = -EINVAL;
-					goto out;
-				}
-			}
-
-out:
-			for (i = 0; i < ARRAY_SIZE(client); i++)
-				if (client[i])
-					i915_request_put(client[i]);
-			if (err) {
-				pr_err("%s: failed at phase[%zd] { %d, %d }\n",
-				       engine->name, p - phases,
-				       p->error[0], p->error[1]);
-				break;
-			}
-		}
-
-		st_engine_heartbeat_enable(engine);
-		if (err) {
-			intel_gt_set_wedged(gt);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-static int
-emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
-{
-	u32 *cs;
-
-	cs = intel_ring_begin(rq, 10);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
-	*cs++ = MI_SEMAPHORE_WAIT |
-		MI_SEMAPHORE_GLOBAL_GTT |
-		MI_SEMAPHORE_POLL |
-		MI_SEMAPHORE_SAD_NEQ_SDD;
-	*cs++ = 0;
-	*cs++ = i915_ggtt_offset(vma) + 4 * idx;
-	*cs++ = 0;
-
-	if (idx > 0) {
-		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-		*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
-		*cs++ = 0;
-		*cs++ = 1;
-	} else {
-		*cs++ = MI_NOOP;
-		*cs++ = MI_NOOP;
-		*cs++ = MI_NOOP;
-		*cs++ = MI_NOOP;
-	}
-
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
-	intel_ring_advance(rq, cs);
-	return 0;
-}
-
-static struct i915_request *
-semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
-{
-	struct intel_context *ce;
-	struct i915_request *rq;
-	int err;
-
-	ce = intel_context_create(engine);
-	if (IS_ERR(ce))
-		return ERR_CAST(ce);
-
-	rq = intel_context_create_request(ce);
-	if (IS_ERR(rq))
-		goto out_ce;
-
-	err = 0;
-	if (rq->engine->emit_init_breadcrumb)
-		err = rq->engine->emit_init_breadcrumb(rq);
-	if (err == 0)
-		err = emit_semaphore_chain(rq, vma, idx);
-	if (err == 0)
-		i915_request_get(rq);
-	i915_request_add(rq);
-	if (err)
-		rq = ERR_PTR(err);
-
-out_ce:
-	intel_context_put(ce);
-	return rq;
-}
-
-static int
-release_queue(struct intel_engine_cs *engine,
-	      struct i915_vma *vma,
-	      int idx, int prio)
-{
-	struct i915_sched_attr attr = {
-		.priority = prio,
-	};
-	struct i915_request *rq;
-	u32 *cs;
-
-	rq = intel_engine_create_kernel_request(engine);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
-
-	cs = intel_ring_begin(rq, 4);
-	if (IS_ERR(cs)) {
-		i915_request_add(rq);
-		return PTR_ERR(cs);
-	}
-
-	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-	*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
-	*cs++ = 0;
-	*cs++ = 1;
-
-	intel_ring_advance(rq, cs);
-
-	i915_request_get(rq);
-	i915_request_add(rq);
-
-	local_bh_disable();
-	engine->schedule(rq, &attr);
-	local_bh_enable(); /* kick tasklet */
-
-	i915_request_put(rq);
-
-	return 0;
-}
-
-static int
-slice_semaphore_queue(struct intel_engine_cs *outer,
-		      struct i915_vma *vma,
-		      int count)
-{
-	struct intel_engine_cs *engine;
-	struct i915_request *head;
-	enum intel_engine_id id;
-	int err, i, n = 0;
-
-	head = semaphore_queue(outer, vma, n++);
-	if (IS_ERR(head))
-		return PTR_ERR(head);
-
-	for_each_engine(engine, outer->gt, id) {
-		for (i = 0; i < count; i++) {
-			struct i915_request *rq;
-
-			rq = semaphore_queue(engine, vma, n++);
-			if (IS_ERR(rq)) {
-				err = PTR_ERR(rq);
-				goto out;
-			}
-
-			i915_request_put(rq);
-		}
-	}
-
-	err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
-	if (err)
-		goto out;
-
-	if (i915_request_wait(head, 0,
-			      2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
-		pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
-		       count, n);
-		GEM_TRACE_DUMP();
-		intel_gt_set_wedged(outer->gt);
-		err = -EIO;
-	}
-
-out:
-	i915_request_put(head);
-	return err;
-}
-
-static int live_timeslice_preempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct i915_vma *vma;
-	void *vaddr;
-	int err = 0;
-
-	/*
-	 * If a request takes too long, we would like to give other users
-	 * a fair go on the GPU. In particular, users may create batches
-	 * that wait upon external input, where that input may even be
-	 * supplied by another GPU job. To avoid blocking forever, we
-	 * need to preempt the current task and replace it with another
-	 * ready task.
-	 */
-	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-		return 0;
-
-	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
-
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_obj;
-	}
-
-	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_obj;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
-	if (err)
-		goto err_map;
-
-	err = i915_vma_sync(vma);
-	if (err)
-		goto err_pin;
-
-	for_each_engine(engine, gt, id) {
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		memset(vaddr, 0, PAGE_SIZE);
-
-		st_engine_heartbeat_disable(engine);
-		err = slice_semaphore_queue(engine, vma, 5);
-		st_engine_heartbeat_enable(engine);
-		if (err)
-			goto err_pin;
-
-		if (igt_flush_test(gt->i915)) {
-			err = -EIO;
-			goto err_pin;
-		}
-	}
-
-err_pin:
-	i915_vma_unpin(vma);
-err_map:
-	i915_gem_object_unpin_map(obj);
-err_obj:
-	i915_gem_object_put(obj);
-	return err;
-}
-
-static struct i915_request *
-create_rewinder(struct intel_context *ce,
-		struct i915_request *wait,
-		void *slot, int idx)
-{
-	const u32 offset =
-		i915_ggtt_offset(ce->engine->status_page.vma) +
-		offset_in_page(slot);
-	struct i915_request *rq;
-	u32 *cs;
-	int err;
-
-	rq = intel_context_create_request(ce);
-	if (IS_ERR(rq))
-		return rq;
-
-	if (wait) {
-		err = i915_request_await_dma_fence(rq, &wait->fence);
-		if (err)
-			goto err;
-	}
-
-	cs = intel_ring_begin(rq, 14);
-	if (IS_ERR(cs)) {
-		err = PTR_ERR(cs);
-		goto err;
-	}
-
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-	*cs++ = MI_NOOP;
-
-	*cs++ = MI_SEMAPHORE_WAIT |
-		MI_SEMAPHORE_GLOBAL_GTT |
-		MI_SEMAPHORE_POLL |
-		MI_SEMAPHORE_SAD_GTE_SDD;
-	*cs++ = idx;
-	*cs++ = offset;
-	*cs++ = 0;
-
-	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
-	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
-	*cs++ = offset + idx * sizeof(u32);
-	*cs++ = 0;
-
-	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-	*cs++ = offset;
-	*cs++ = 0;
-	*cs++ = idx + 1;
-
-	intel_ring_advance(rq, cs);
-
-	rq->sched.attr.priority = I915_PRIORITY_MASK;
-	err = 0;
-err:
-	i915_request_get(rq);
-	i915_request_add(rq);
-	if (err) {
-		i915_request_put(rq);
-		return ERR_PTR(err);
-	}
-
-	return rq;
-}
-
-static int live_timeslice_rewind(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	/*
-	 * The usual presumption on timeslice expiration is that we replace
-	 * the active context with another. However, given a chain of
-	 * dependencies we may end up with replacing the context with itself,
-	 * but only a few of those requests, forcing us to rewind the
-	 * RING_TAIL of the original request.
-	 */
-	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-		return 0;
-
-	for_each_engine(engine, gt, id) {
-		enum { A1, A2, B1 };
-		enum { X = 1, Z, Y };
-		struct i915_request *rq[3] = {};
-		struct intel_context *ce;
-		unsigned long timeslice;
-		int i, err = 0;
-		u32 *slot;
-
-		if (!intel_engine_has_timeslices(engine))
-			continue;
-
-		/*
-		 * A:rq1 -- semaphore wait, timestamp X
-		 * A:rq2 -- write timestamp Y
-		 *
-		 * B:rq1 [await A:rq1] -- write timestamp Z
-		 *
-		 * Force timeslice, release semaphore.
-		 *
-		 * Expect execution/evaluation order XZY
-		 */
-
-		st_engine_heartbeat_disable(engine);
-		timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
-		slot = memset32(engine->status_page.addr + 1000, 0, 4);
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto err;
-		}
-
-		rq[A1] = create_rewinder(ce, NULL, slot, X);
-		if (IS_ERR(rq[A1])) {
-			intel_context_put(ce);
-			goto err;
-		}
-
-		rq[A2] = create_rewinder(ce, NULL, slot, Y);
-		intel_context_put(ce);
-		if (IS_ERR(rq[A2]))
-			goto err;
-
-		err = wait_for_submit(engine, rq[A2], HZ / 2);
-		if (err) {
-			pr_err("%s: failed to submit first context\n",
-			       engine->name);
-			goto err;
-		}
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto err;
-		}
-
-		rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
-		intel_context_put(ce);
-		if (IS_ERR(rq[2]))
-			goto err;
-
-		err = wait_for_submit(engine, rq[B1], HZ / 2);
-		if (err) {
-			pr_err("%s: failed to submit second context\n",
-			       engine->name);
-			goto err;
-		}
-
-		/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
-		ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
-		if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
-			/* Wait for the timeslice to kick in */
-			del_timer(&engine->execlists.timer);
-			tasklet_hi_schedule(&engine->execlists.tasklet);
-			intel_engine_flush_submission(engine);
-		}
-		/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
-		GEM_BUG_ON(!i915_request_is_active(rq[A1]));
-		GEM_BUG_ON(!i915_request_is_active(rq[B1]));
-		GEM_BUG_ON(i915_request_is_active(rq[A2]));
-
-		/* Release the hounds! */
-		slot[0] = 1;
-		wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
-
-		for (i = 1; i <= 3; i++) {
-			unsigned long timeout = jiffies + HZ / 2;
-
-			while (!READ_ONCE(slot[i]) &&
-			       time_before(jiffies, timeout))
-				;
-
-			if (!time_before(jiffies, timeout)) {
-				pr_err("%s: rq[%d] timed out\n",
-				       engine->name, i - 1);
-				err = -ETIME;
-				goto err;
-			}
-
-			pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
-		}
-
-		/* XZY: XZ < XY */
-		if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
-			pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
-			       engine->name,
-			       slot[Z] - slot[X],
-			       slot[Y] - slot[X]);
-			err = -EINVAL;
-		}
-
-err:
-		memset32(&slot[0], -1, 4);
-		wmb();
-
-		engine->props.timeslice_duration_ms = timeslice;
-		st_engine_heartbeat_enable(engine);
-		for (i = 0; i < 3; i++)
-			i915_request_put(rq[i]);
-		if (igt_flush_test(gt->i915))
-			err = -EIO;
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
-{
-	struct i915_request *rq;
-
-	rq = intel_engine_create_kernel_request(engine);
-	if (IS_ERR(rq))
-		return rq;
-
-	i915_request_get(rq);
-	i915_request_add(rq);
-
-	return rq;
-}
-
-static long slice_timeout(struct intel_engine_cs *engine)
-{
-	long timeout;
-
-	/* Enough time for a timeslice to kick in, and kick out */
-	timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
-
-	/* Enough time for the nop request to complete */
-	timeout += HZ / 5;
-
-	return timeout + 1;
-}
-
-static int live_timeslice_queue(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct i915_vma *vma;
-	void *vaddr;
-	int err = 0;
-
-	/*
-	 * Make sure that even if ELSP[0] and ELSP[1] are filled with
-	 * timeslicing between them disabled, we *do* enable timeslicing
-	 * if the queue demands it. (Normally, we do not submit if
-	 * ELSP[1] is already occupied, so must rely on timeslicing to
-	 * eject ELSP[0] in favour of the queue.)
-	 */
-	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-		return 0;
-
-	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
-
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_obj;
-	}
-
-	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_obj;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
-	if (err)
-		goto err_map;
-
-	err = i915_vma_sync(vma);
-	if (err)
-		goto err_pin;
-
-	for_each_engine(engine, gt, id) {
-		struct i915_sched_attr attr = {
-			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
-		};
-		struct i915_request *rq, *nop;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		st_engine_heartbeat_disable(engine);
-		memset(vaddr, 0, PAGE_SIZE);
-
-		/* ELSP[0]: semaphore wait */
-		rq = semaphore_queue(engine, vma, 0);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_heartbeat;
-		}
-		engine->schedule(rq, &attr);
-		err = wait_for_submit(engine, rq, HZ / 2);
-		if (err) {
-			pr_err("%s: Timed out trying to submit semaphores\n",
-			       engine->name);
-			goto err_rq;
-		}
-
-		/* ELSP[1]: nop request */
-		nop = nop_request(engine);
-		if (IS_ERR(nop)) {
-			err = PTR_ERR(nop);
-			goto err_rq;
-		}
-		err = wait_for_submit(engine, nop, HZ / 2);
-		i915_request_put(nop);
-		if (err) {
-			pr_err("%s: Timed out trying to submit nop\n",
-			       engine->name);
-			goto err_rq;
-		}
-
-		GEM_BUG_ON(i915_request_completed(rq));
-		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
-		/* Queue: semaphore signal, matching priority as semaphore */
-		err = release_queue(engine, vma, 1, effective_prio(rq));
-		if (err)
-			goto err_rq;
-
-		/* Wait until we ack the release_queue and start timeslicing */
-		do {
-			cond_resched();
-			intel_engine_flush_submission(engine);
-		} while (READ_ONCE(engine->execlists.pending[0]));
-
-		/* Timeslice every jiffy, so within 2 we should signal */
-		if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
-			struct drm_printer p =
-				drm_info_printer(gt->i915->drm.dev);
-
-			pr_err("%s: Failed to timeslice into queue\n",
-			       engine->name);
-			intel_engine_dump(engine, &p,
-					  "%s\n", engine->name);
-
-			memset(vaddr, 0xff, PAGE_SIZE);
-			err = -EIO;
-		}
-err_rq:
-		i915_request_put(rq);
-err_heartbeat:
-		st_engine_heartbeat_enable(engine);
-		if (err)
-			break;
-	}
-
-err_pin:
-	i915_vma_unpin(vma);
-err_map:
-	i915_gem_object_unpin_map(obj);
-err_obj:
-	i915_gem_object_put(obj);
-	return err;
-}
-
-static int live_timeslice_nopreempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct igt_spinner spin;
-	int err = 0;
-
-	/*
-	 * We should not timeslice into a request that is marked with
-	 * I915_REQUEST_NOPREEMPT.
-	 */
-	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-		return 0;
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-		unsigned long timeslice;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			break;
-		}
-
-		st_engine_heartbeat_disable(engine);
-		timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
-		/* Create an unpreemptible spinner */
-
-		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out_heartbeat;
-		}
-
-		i915_request_get(rq);
-		i915_request_add(rq);
-
-		if (!igt_wait_for_spinner(&spin, rq)) {
-			i915_request_put(rq);
-			err = -ETIME;
-			goto out_spin;
-		}
-
-		set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
-		i915_request_put(rq);
-
-		/* Followed by a maximum priority barrier (heartbeat) */
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto out_spin;
-		}
-
-		rq = intel_context_create_request(ce);
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out_spin;
-		}
-
-		rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-		i915_request_get(rq);
-		i915_request_add(rq);
-
-		/*
-		 * Wait until the barrier is in ELSP, and we know timeslicing
-		 * will have been activated.
-		 */
-		if (wait_for_submit(engine, rq, HZ / 2)) {
-			i915_request_put(rq);
-			err = -ETIME;
-			goto out_spin;
-		}
-
-		/*
-		 * Since the ELSP[0] request is unpreemptible, it should not
-		 * allow the maximum priority barrier through. Wait long
-		 * enough to see if it is timesliced in by mistake.
-		 */
-		if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
-			pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
-			       engine->name);
-			err = -EINVAL;
-		}
-		i915_request_put(rq);
-
-out_spin:
-		igt_spinner_end(&spin);
-out_heartbeat:
-		xchg(&engine->props.timeslice_duration_ms, timeslice);
-		st_engine_heartbeat_enable(engine);
-		if (err)
-			break;
-
-		if (igt_flush_test(gt->i915)) {
-			err = -EIO;
-			break;
-		}
-	}
-
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int live_busywait_preempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct intel_engine_cs *engine;
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-	u32 *map;
-
-	/*
-	 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
-	 * preempt the busywaits used to synchronise between rings.
-	 */
-
-	ctx_hi = kernel_context(gt->i915);
-	if (!ctx_hi)
-		return -ENOMEM;
-	ctx_hi->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
-	ctx_lo = kernel_context(gt->i915);
-	if (!ctx_lo)
-		goto err_ctx_hi;
-	ctx_lo->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
-	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(obj)) {
-		err = PTR_ERR(obj);
-		goto err_ctx_lo;
-	}
-
-	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
-	if (IS_ERR(map)) {
-		err = PTR_ERR(map);
-		goto err_obj;
-	}
-
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_map;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
-	if (err)
-		goto err_map;
-
-	err = i915_vma_sync(vma);
-	if (err)
-		goto err_vma;
-
-	for_each_engine(engine, gt, id) {
-		struct i915_request *lo, *hi;
-		struct igt_live_test t;
-		u32 *cs;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (!intel_engine_can_store_dword(engine))
-			continue;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			goto err_vma;
-		}
-
-		/*
-		 * We create two requests. The low priority request
-		 * busywaits on a semaphore (inside the ringbuffer where
-		 * is should be preemptible) and the high priority requests
-		 * uses a MI_STORE_DWORD_IMM to update the semaphore value
-		 * allowing the first request to complete. If preemption
-		 * fails, we hang instead.
-		 */
-
-		lo = igt_request_alloc(ctx_lo, engine);
-		if (IS_ERR(lo)) {
-			err = PTR_ERR(lo);
-			goto err_vma;
-		}
-
-		cs = intel_ring_begin(lo, 8);
-		if (IS_ERR(cs)) {
-			err = PTR_ERR(cs);
-			i915_request_add(lo);
-			goto err_vma;
-		}
-
-		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-		*cs++ = i915_ggtt_offset(vma);
-		*cs++ = 0;
-		*cs++ = 1;
-
-		/* XXX Do we need a flush + invalidate here? */
-
-		*cs++ = MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_POLL |
-			MI_SEMAPHORE_SAD_EQ_SDD;
-		*cs++ = 0;
-		*cs++ = i915_ggtt_offset(vma);
-		*cs++ = 0;
-
-		intel_ring_advance(lo, cs);
-
-		i915_request_get(lo);
-		i915_request_add(lo);
-
-		if (wait_for(READ_ONCE(*map), 10)) {
-			i915_request_put(lo);
-			err = -ETIMEDOUT;
-			goto err_vma;
-		}
-
-		/* Low priority request should be busywaiting now */
-		if (i915_request_wait(lo, 0, 1) != -ETIME) {
-			i915_request_put(lo);
-			pr_err("%s: Busywaiting request did not!\n",
-			       engine->name);
-			err = -EIO;
-			goto err_vma;
-		}
-
-		hi = igt_request_alloc(ctx_hi, engine);
-		if (IS_ERR(hi)) {
-			err = PTR_ERR(hi);
-			i915_request_put(lo);
-			goto err_vma;
-		}
-
-		cs = intel_ring_begin(hi, 4);
-		if (IS_ERR(cs)) {
-			err = PTR_ERR(cs);
-			i915_request_add(hi);
-			i915_request_put(lo);
-			goto err_vma;
-		}
-
-		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-		*cs++ = i915_ggtt_offset(vma);
-		*cs++ = 0;
-		*cs++ = 0;
-
-		intel_ring_advance(hi, cs);
-		i915_request_add(hi);
-
-		if (i915_request_wait(lo, 0, HZ / 5) < 0) {
-			struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
-
-			pr_err("%s: Failed to preempt semaphore busywait!\n",
-			       engine->name);
-
-			intel_engine_dump(engine, &p, "%s\n", engine->name);
-			GEM_TRACE_DUMP();
-
-			i915_request_put(lo);
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_vma;
-		}
-		GEM_BUG_ON(READ_ONCE(*map));
-		i915_request_put(lo);
-
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			goto err_vma;
-		}
-	}
-
-	err = 0;
-err_vma:
-	i915_vma_unpin(vma);
-err_map:
-	i915_gem_object_unpin_map(obj);
-err_obj:
-	i915_gem_object_put(obj);
-err_ctx_lo:
-	kernel_context_close(ctx_lo);
-err_ctx_hi:
-	kernel_context_close(ctx_hi);
-	return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct igt_spinner *spin,
-		       struct i915_gem_context *ctx,
-		       struct intel_engine_cs *engine,
-		       u32 arb)
-{
-	struct intel_context *ce;
-	struct i915_request *rq;
-
-	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
-	if (IS_ERR(ce))
-		return ERR_CAST(ce);
-
-	rq = igt_spinner_create_request(spin, ce, arb);
-	intel_context_put(ce);
-	return rq;
-}
-
-static int live_preempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct igt_spinner spin_hi, spin_lo;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
-		pr_err("Logical preemption supported, but not exposed\n");
-
-	if (igt_spinner_init(&spin_hi, gt))
-		return -ENOMEM;
-
-	if (igt_spinner_init(&spin_lo, gt))
-		goto err_spin_hi;
-
-	ctx_hi = kernel_context(gt->i915);
-	if (!ctx_hi)
-		goto err_spin_lo;
-	ctx_hi->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
-	ctx_lo = kernel_context(gt->i915);
-	if (!ctx_lo)
-		goto err_ctx_hi;
-	ctx_lo->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
-	for_each_engine(engine, gt, id) {
-		struct igt_live_test t;
-		struct i915_request *rq;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		i915_request_add(rq);
-		if (!igt_wait_for_spinner(&spin_lo, rq)) {
-			GEM_TRACE("lo spinner failed to start\n");
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			igt_spinner_end(&spin_lo);
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		i915_request_add(rq);
-		if (!igt_wait_for_spinner(&spin_hi, rq)) {
-			GEM_TRACE("hi spinner failed to start\n");
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		igt_spinner_end(&spin_hi);
-		igt_spinner_end(&spin_lo);
-
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-	}
-
-	err = 0;
-err_ctx_lo:
-	kernel_context_close(ctx_lo);
-err_ctx_hi:
-	kernel_context_close(ctx_hi);
-err_spin_lo:
-	igt_spinner_fini(&spin_lo);
-err_spin_hi:
-	igt_spinner_fini(&spin_hi);
-	return err;
-}
-
-static int live_late_preempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct igt_spinner spin_hi, spin_lo;
-	struct intel_engine_cs *engine;
-	struct i915_sched_attr attr = {};
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (igt_spinner_init(&spin_hi, gt))
-		return -ENOMEM;
-
-	if (igt_spinner_init(&spin_lo, gt))
-		goto err_spin_hi;
-
-	ctx_hi = kernel_context(gt->i915);
-	if (!ctx_hi)
-		goto err_spin_lo;
-
-	ctx_lo = kernel_context(gt->i915);
-	if (!ctx_lo)
-		goto err_ctx_hi;
-
-	/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
-	ctx_lo->sched.priority = I915_USER_PRIORITY(1);
-
-	for_each_engine(engine, gt, id) {
-		struct igt_live_test t;
-		struct i915_request *rq;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		i915_request_add(rq);
-		if (!igt_wait_for_spinner(&spin_lo, rq)) {
-			pr_err("First context failed to start\n");
-			goto err_wedged;
-		}
-
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_NOOP);
-		if (IS_ERR(rq)) {
-			igt_spinner_end(&spin_lo);
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		i915_request_add(rq);
-		if (igt_wait_for_spinner(&spin_hi, rq)) {
-			pr_err("Second context overtook first?\n");
-			goto err_wedged;
-		}
-
-		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
-		engine->schedule(rq, &attr);
-
-		if (!igt_wait_for_spinner(&spin_hi, rq)) {
-			pr_err("High priority context failed to preempt the low priority context\n");
-			GEM_TRACE_DUMP();
-			goto err_wedged;
-		}
-
-		igt_spinner_end(&spin_hi);
-		igt_spinner_end(&spin_lo);
-
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-	}
-
-	err = 0;
-err_ctx_lo:
-	kernel_context_close(ctx_lo);
-err_ctx_hi:
-	kernel_context_close(ctx_hi);
-err_spin_lo:
-	igt_spinner_fini(&spin_lo);
-err_spin_hi:
-	igt_spinner_fini(&spin_hi);
-	return err;
-
-err_wedged:
-	igt_spinner_end(&spin_hi);
-	igt_spinner_end(&spin_lo);
-	intel_gt_set_wedged(gt);
-	err = -EIO;
-	goto err_ctx_lo;
-}
-
-struct preempt_client {
-	struct igt_spinner spin;
-	struct i915_gem_context *ctx;
-};
-
-static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
-{
-	c->ctx = kernel_context(gt->i915);
-	if (!c->ctx)
-		return -ENOMEM;
-
-	if (igt_spinner_init(&c->spin, gt))
-		goto err_ctx;
-
-	return 0;
-
-err_ctx:
-	kernel_context_close(c->ctx);
-	return -ENOMEM;
-}
-
-static void preempt_client_fini(struct preempt_client *c)
-{
-	igt_spinner_fini(&c->spin);
-	kernel_context_close(c->ctx);
-}
-
-static int live_nopreempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	struct preempt_client a, b;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	/*
-	 * Verify that we can disable preemption for an individual request
-	 * that may be being observed and not want to be interrupted.
-	 */
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (preempt_client_init(gt, &a))
-		return -ENOMEM;
-	if (preempt_client_init(gt, &b))
-		goto err_client_a;
-	b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
-
-	for_each_engine(engine, gt, id) {
-		struct i915_request *rq_a, *rq_b;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		engine->execlists.preempt_hang.count = 0;
-
-		rq_a = spinner_create_request(&a.spin,
-					      a.ctx, engine,
-					      MI_ARB_CHECK);
-		if (IS_ERR(rq_a)) {
-			err = PTR_ERR(rq_a);
-			goto err_client_b;
-		}
-
-		/* Low priority client, but unpreemptable! */
-		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
-
-		i915_request_add(rq_a);
-		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
-			pr_err("First client failed to start\n");
-			goto err_wedged;
-		}
-
-		rq_b = spinner_create_request(&b.spin,
-					      b.ctx, engine,
-					      MI_ARB_CHECK);
-		if (IS_ERR(rq_b)) {
-			err = PTR_ERR(rq_b);
-			goto err_client_b;
-		}
-
-		i915_request_add(rq_b);
-
-		/* B is much more important than A! (But A is unpreemptable.) */
-		GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
-
-		/* Wait long enough for preemption and timeslicing */
-		if (igt_wait_for_spinner(&b.spin, rq_b)) {
-			pr_err("Second client started too early!\n");
-			goto err_wedged;
-		}
-
-		igt_spinner_end(&a.spin);
-
-		if (!igt_wait_for_spinner(&b.spin, rq_b)) {
-			pr_err("Second client failed to start\n");
-			goto err_wedged;
-		}
-
-		igt_spinner_end(&b.spin);
-
-		if (engine->execlists.preempt_hang.count) {
-			pr_err("Preemption recorded x%d; should have been suppressed!\n",
-			       engine->execlists.preempt_hang.count);
-			err = -EINVAL;
-			goto err_wedged;
-		}
-
-		if (igt_flush_test(gt->i915))
-			goto err_wedged;
-	}
-
-	err = 0;
-err_client_b:
-	preempt_client_fini(&b);
-err_client_a:
-	preempt_client_fini(&a);
-	return err;
-
-err_wedged:
-	igt_spinner_end(&b.spin);
-	igt_spinner_end(&a.spin);
-	intel_gt_set_wedged(gt);
-	err = -EIO;
-	goto err_client_b;
-}
-
-struct live_preempt_cancel {
-	struct intel_engine_cs *engine;
-	struct preempt_client a, b;
-};
-
-static int __cancel_active0(struct live_preempt_cancel *arg)
-{
-	struct i915_request *rq;
-	struct igt_live_test t;
-	int err;
-
-	/* Preempt cancel of ELSP0 */
-	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	if (igt_live_test_begin(&t, arg->engine->i915,
-				__func__, arg->engine->name))
-		return -EIO;
-
-	rq = spinner_create_request(&arg->a.spin,
-				    arg->a.ctx, arg->engine,
-				    MI_ARB_CHECK);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
-
-	clear_bit(CONTEXT_BANNED, &rq->context->flags);
-	i915_request_get(rq);
-	i915_request_add(rq);
-	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
-		err = -EIO;
-		goto out;
-	}
-
-	intel_context_set_banned(rq->context);
-	err = intel_engine_pulse(arg->engine);
-	if (err)
-		goto out;
-
-	err = wait_for_reset(arg->engine, rq, HZ / 2);
-	if (err) {
-		pr_err("Cancelled inflight0 request did not reset\n");
-		goto out;
-	}
-
-out:
-	i915_request_put(rq);
-	if (igt_live_test_end(&t))
-		err = -EIO;
-	return err;
-}
-
-static int __cancel_active1(struct live_preempt_cancel *arg)
-{
-	struct i915_request *rq[2] = {};
-	struct igt_live_test t;
-	int err;
-
-	/* Preempt cancel of ELSP1 */
-	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	if (igt_live_test_begin(&t, arg->engine->i915,
-				__func__, arg->engine->name))
-		return -EIO;
-
-	rq[0] = spinner_create_request(&arg->a.spin,
-				       arg->a.ctx, arg->engine,
-				       MI_NOOP); /* no preemption */
-	if (IS_ERR(rq[0]))
-		return PTR_ERR(rq[0]);
-
-	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
-	i915_request_get(rq[0]);
-	i915_request_add(rq[0]);
-	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
-		err = -EIO;
-		goto out;
-	}
-
-	rq[1] = spinner_create_request(&arg->b.spin,
-				       arg->b.ctx, arg->engine,
-				       MI_ARB_CHECK);
-	if (IS_ERR(rq[1])) {
-		err = PTR_ERR(rq[1]);
-		goto out;
-	}
-
-	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
-	i915_request_get(rq[1]);
-	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
-	i915_request_add(rq[1]);
-	if (err)
-		goto out;
-
-	intel_context_set_banned(rq[1]->context);
-	err = intel_engine_pulse(arg->engine);
-	if (err)
-		goto out;
-
-	igt_spinner_end(&arg->a.spin);
-	err = wait_for_reset(arg->engine, rq[1], HZ / 2);
-	if (err)
-		goto out;
-
-	if (rq[0]->fence.error != 0) {
-		pr_err("Normal inflight0 request did not complete\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	if (rq[1]->fence.error != -EIO) {
-		pr_err("Cancelled inflight1 request did not report -EIO\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-out:
-	i915_request_put(rq[1]);
-	i915_request_put(rq[0]);
-	if (igt_live_test_end(&t))
-		err = -EIO;
-	return err;
-}
-
-static int __cancel_queued(struct live_preempt_cancel *arg)
-{
-	struct i915_request *rq[3] = {};
-	struct igt_live_test t;
-	int err;
-
-	/* Full ELSP and one in the wings */
-	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	if (igt_live_test_begin(&t, arg->engine->i915,
-				__func__, arg->engine->name))
-		return -EIO;
-
-	rq[0] = spinner_create_request(&arg->a.spin,
-				       arg->a.ctx, arg->engine,
-				       MI_ARB_CHECK);
-	if (IS_ERR(rq[0]))
-		return PTR_ERR(rq[0]);
-
-	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
-	i915_request_get(rq[0]);
-	i915_request_add(rq[0]);
-	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
-		err = -EIO;
-		goto out;
-	}
-
-	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
-	if (IS_ERR(rq[1])) {
-		err = PTR_ERR(rq[1]);
-		goto out;
-	}
-
-	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
-	i915_request_get(rq[1]);
-	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
-	i915_request_add(rq[1]);
-	if (err)
-		goto out;
-
-	rq[2] = spinner_create_request(&arg->b.spin,
-				       arg->a.ctx, arg->engine,
-				       MI_ARB_CHECK);
-	if (IS_ERR(rq[2])) {
-		err = PTR_ERR(rq[2]);
-		goto out;
-	}
-
-	i915_request_get(rq[2]);
-	err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
-	i915_request_add(rq[2]);
-	if (err)
-		goto out;
-
-	intel_context_set_banned(rq[2]->context);
-	err = intel_engine_pulse(arg->engine);
-	if (err)
-		goto out;
-
-	err = wait_for_reset(arg->engine, rq[2], HZ / 2);
-	if (err)
-		goto out;
-
-	if (rq[0]->fence.error != -EIO) {
-		pr_err("Cancelled inflight0 request did not report -EIO\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	if (rq[1]->fence.error != 0) {
-		pr_err("Normal inflight1 request did not complete\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	if (rq[2]->fence.error != -EIO) {
-		pr_err("Cancelled queued request did not report -EIO\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-out:
-	i915_request_put(rq[2]);
-	i915_request_put(rq[1]);
-	i915_request_put(rq[0]);
-	if (igt_live_test_end(&t))
-		err = -EIO;
-	return err;
-}
-
-static int __cancel_hostile(struct live_preempt_cancel *arg)
-{
-	struct i915_request *rq;
-	int err;
-
-	/* Preempt cancel non-preemptible spinner in ELSP0 */
-	if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
-		return 0;
-
-	if (!intel_has_reset_engine(arg->engine->gt))
-		return 0;
-
-	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	rq = spinner_create_request(&arg->a.spin,
-				    arg->a.ctx, arg->engine,
-				    MI_NOOP); /* preemption disabled */
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
-
-	clear_bit(CONTEXT_BANNED, &rq->context->flags);
-	i915_request_get(rq);
-	i915_request_add(rq);
-	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
-		err = -EIO;
-		goto out;
-	}
-
-	intel_context_set_banned(rq->context);
-	err = intel_engine_pulse(arg->engine); /* force reset */
-	if (err)
-		goto out;
-
-	err = wait_for_reset(arg->engine, rq, HZ / 2);
-	if (err) {
-		pr_err("Cancelled inflight0 request did not reset\n");
-		goto out;
-	}
-
-out:
-	i915_request_put(rq);
-	if (igt_flush_test(arg->engine->i915))
-		err = -EIO;
-	return err;
-}
-
-static int live_preempt_cancel(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct live_preempt_cancel data;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	/*
-	 * To cancel an inflight context, we need to first remove it from the
-	 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
-	 */
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (preempt_client_init(gt, &data.a))
-		return -ENOMEM;
-	if (preempt_client_init(gt, &data.b))
-		goto err_client_a;
-
-	for_each_engine(data.engine, gt, id) {
-		if (!intel_engine_has_preemption(data.engine))
-			continue;
-
-		err = __cancel_active0(&data);
-		if (err)
-			goto err_wedged;
-
-		err = __cancel_active1(&data);
-		if (err)
-			goto err_wedged;
-
-		err = __cancel_queued(&data);
-		if (err)
-			goto err_wedged;
-
-		err = __cancel_hostile(&data);
-		if (err)
-			goto err_wedged;
-	}
-
-	err = 0;
-err_client_b:
-	preempt_client_fini(&data.b);
-err_client_a:
-	preempt_client_fini(&data.a);
-	return err;
-
-err_wedged:
-	GEM_TRACE_DUMP();
-	igt_spinner_end(&data.b.spin);
-	igt_spinner_end(&data.a.spin);
-	intel_gt_set_wedged(gt);
-	goto err_client_b;
-}
-
-static int live_suppress_self_preempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	struct i915_sched_attr attr = {
-		.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
-	};
-	struct preempt_client a, b;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	/*
-	 * Verify that if a preemption request does not cause a change in
-	 * the current execution order, the preempt-to-idle injection is
-	 * skipped and that we do not accidentally apply it after the CS
-	 * completion event.
-	 */
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0; /* presume black blox */
-
-	if (intel_vgpu_active(gt->i915))
-		return 0; /* GVT forces single port & request submission */
-
-	if (preempt_client_init(gt, &a))
-		return -ENOMEM;
-	if (preempt_client_init(gt, &b))
-		goto err_client_a;
-
-	for_each_engine(engine, gt, id) {
-		struct i915_request *rq_a, *rq_b;
-		int depth;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (igt_flush_test(gt->i915))
-			goto err_wedged;
-
-		st_engine_heartbeat_disable(engine);
-		engine->execlists.preempt_hang.count = 0;
-
-		rq_a = spinner_create_request(&a.spin,
-					      a.ctx, engine,
-					      MI_NOOP);
-		if (IS_ERR(rq_a)) {
-			err = PTR_ERR(rq_a);
-			st_engine_heartbeat_enable(engine);
-			goto err_client_b;
-		}
-
-		i915_request_add(rq_a);
-		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
-			pr_err("First client failed to start\n");
-			st_engine_heartbeat_enable(engine);
-			goto err_wedged;
-		}
-
-		/* Keep postponing the timer to avoid premature slicing */
-		mod_timer(&engine->execlists.timer, jiffies + HZ);
-		for (depth = 0; depth < 8; depth++) {
-			rq_b = spinner_create_request(&b.spin,
-						      b.ctx, engine,
-						      MI_NOOP);
-			if (IS_ERR(rq_b)) {
-				err = PTR_ERR(rq_b);
-				st_engine_heartbeat_enable(engine);
-				goto err_client_b;
-			}
-			i915_request_add(rq_b);
-
-			GEM_BUG_ON(i915_request_completed(rq_a));
-			engine->schedule(rq_a, &attr);
-			igt_spinner_end(&a.spin);
-
-			if (!igt_wait_for_spinner(&b.spin, rq_b)) {
-				pr_err("Second client failed to start\n");
-				st_engine_heartbeat_enable(engine);
-				goto err_wedged;
-			}
-
-			swap(a, b);
-			rq_a = rq_b;
-		}
-		igt_spinner_end(&a.spin);
-
-		if (engine->execlists.preempt_hang.count) {
-			pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
-			       engine->name,
-			       engine->execlists.preempt_hang.count,
-			       depth);
-			st_engine_heartbeat_enable(engine);
-			err = -EINVAL;
-			goto err_client_b;
-		}
-
-		st_engine_heartbeat_enable(engine);
-		if (igt_flush_test(gt->i915))
-			goto err_wedged;
-	}
-
-	err = 0;
-err_client_b:
-	preempt_client_fini(&b);
-err_client_a:
-	preempt_client_fini(&a);
-	return err;
-
-err_wedged:
-	igt_spinner_end(&b.spin);
-	igt_spinner_end(&a.spin);
-	intel_gt_set_wedged(gt);
-	err = -EIO;
-	goto err_client_b;
-}
-
-static int live_chain_preempt(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	struct preempt_client hi, lo;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	/*
-	 * Build a chain AB...BA between two contexts (A, B) and request
-	 * preemption of the last request. It should then complete before
-	 * the previously submitted spinner in B.
-	 */
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (preempt_client_init(gt, &hi))
-		return -ENOMEM;
-
-	if (preempt_client_init(gt, &lo))
-		goto err_client_hi;
-
-	for_each_engine(engine, gt, id) {
-		struct i915_sched_attr attr = {
-			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
-		};
-		struct igt_live_test t;
-		struct i915_request *rq;
-		int ring_size, count, i;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		rq = spinner_create_request(&lo.spin,
-					    lo.ctx, engine,
-					    MI_ARB_CHECK);
-		if (IS_ERR(rq))
-			goto err_wedged;
-
-		i915_request_get(rq);
-		i915_request_add(rq);
-
-		ring_size = rq->wa_tail - rq->head;
-		if (ring_size < 0)
-			ring_size += rq->ring->size;
-		ring_size = rq->ring->size / ring_size;
-		pr_debug("%s(%s): Using maximum of %d requests\n",
-			 __func__, engine->name, ring_size);
-
-		igt_spinner_end(&lo.spin);
-		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
-			pr_err("Timed out waiting to flush %s\n", engine->name);
-			i915_request_put(rq);
-			goto err_wedged;
-		}
-		i915_request_put(rq);
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			goto err_wedged;
-		}
-
-		for_each_prime_number_from(count, 1, ring_size) {
-			rq = spinner_create_request(&hi.spin,
-						    hi.ctx, engine,
-						    MI_ARB_CHECK);
-			if (IS_ERR(rq))
-				goto err_wedged;
-			i915_request_add(rq);
-			if (!igt_wait_for_spinner(&hi.spin, rq))
-				goto err_wedged;
-
-			rq = spinner_create_request(&lo.spin,
-						    lo.ctx, engine,
-						    MI_ARB_CHECK);
-			if (IS_ERR(rq))
-				goto err_wedged;
-			i915_request_add(rq);
-
-			for (i = 0; i < count; i++) {
-				rq = igt_request_alloc(lo.ctx, engine);
-				if (IS_ERR(rq))
-					goto err_wedged;
-				i915_request_add(rq);
-			}
-
-			rq = igt_request_alloc(hi.ctx, engine);
-			if (IS_ERR(rq))
-				goto err_wedged;
-
-			i915_request_get(rq);
-			i915_request_add(rq);
-			engine->schedule(rq, &attr);
-
-			igt_spinner_end(&hi.spin);
-			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
-				struct drm_printer p =
-					drm_info_printer(gt->i915->drm.dev);
-
-				pr_err("Failed to preempt over chain of %d\n",
-				       count);
-				intel_engine_dump(engine, &p,
-						  "%s\n", engine->name);
-				i915_request_put(rq);
-				goto err_wedged;
-			}
-			igt_spinner_end(&lo.spin);
-			i915_request_put(rq);
-
-			rq = igt_request_alloc(lo.ctx, engine);
-			if (IS_ERR(rq))
-				goto err_wedged;
-
-			i915_request_get(rq);
-			i915_request_add(rq);
-
-			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
-				struct drm_printer p =
-					drm_info_printer(gt->i915->drm.dev);
-
-				pr_err("Failed to flush low priority chain of %d requests\n",
-				       count);
-				intel_engine_dump(engine, &p,
-						  "%s\n", engine->name);
-
-				i915_request_put(rq);
-				goto err_wedged;
-			}
-			i915_request_put(rq);
-		}
-
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			goto err_wedged;
-		}
-	}
-
-	err = 0;
-err_client_lo:
-	preempt_client_fini(&lo);
-err_client_hi:
-	preempt_client_fini(&hi);
-	return err;
-
-err_wedged:
-	igt_spinner_end(&hi.spin);
-	igt_spinner_end(&lo.spin);
-	intel_gt_set_wedged(gt);
-	err = -EIO;
-	goto err_client_lo;
-}
-
-static int create_gang(struct intel_engine_cs *engine,
-		       struct i915_request **prev)
-{
-	struct drm_i915_gem_object *obj;
-	struct intel_context *ce;
-	struct i915_request *rq;
-	struct i915_vma *vma;
-	u32 *cs;
-	int err;
-
-	ce = intel_context_create(engine);
-	if (IS_ERR(ce))
-		return PTR_ERR(ce);
-
-	obj = i915_gem_object_create_internal(engine->i915, 4096);
-	if (IS_ERR(obj)) {
-		err = PTR_ERR(obj);
-		goto err_ce;
-	}
-
-	vma = i915_vma_instance(obj, ce->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_obj;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err)
-		goto err_obj;
-
-	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
-	if (IS_ERR(cs))
-		goto err_obj;
-
-	/* Semaphore target: spin until zero */
-	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
-	*cs++ = MI_SEMAPHORE_WAIT |
-		MI_SEMAPHORE_POLL |
-		MI_SEMAPHORE_SAD_EQ_SDD;
-	*cs++ = 0;
-	*cs++ = lower_32_bits(vma->node.start);
-	*cs++ = upper_32_bits(vma->node.start);
-
-	if (*prev) {
-		u64 offset = (*prev)->batch->node.start;
-
-		/* Terminate the spinner in the next lower priority batch. */
-		*cs++ = MI_STORE_DWORD_IMM_GEN4;
-		*cs++ = lower_32_bits(offset);
-		*cs++ = upper_32_bits(offset);
-		*cs++ = 0;
-	}
-
-	*cs++ = MI_BATCH_BUFFER_END;
-	i915_gem_object_flush_map(obj);
-	i915_gem_object_unpin_map(obj);
-
-	rq = intel_context_create_request(ce);
-	if (IS_ERR(rq))
-		goto err_obj;
-
-	rq->batch = i915_vma_get(vma);
-	i915_request_get(rq);
-
-	i915_vma_lock(vma);
-	err = i915_request_await_object(rq, vma->obj, false);
-	if (!err)
-		err = i915_vma_move_to_active(vma, rq, 0);
-	if (!err)
-		err = rq->engine->emit_bb_start(rq,
-						vma->node.start,
-						PAGE_SIZE, 0);
-	i915_vma_unlock(vma);
-	i915_request_add(rq);
-	if (err)
-		goto err_rq;
-
-	i915_gem_object_put(obj);
-	intel_context_put(ce);
-
-	rq->mock.link.next = &(*prev)->mock.link;
-	*prev = rq;
-	return 0;
-
-err_rq:
-	i915_vma_put(rq->batch);
-	i915_request_put(rq);
-err_obj:
-	i915_gem_object_put(obj);
-err_ce:
-	intel_context_put(ce);
-	return err;
-}
-
-static int __live_preempt_ring(struct intel_engine_cs *engine,
-			       struct igt_spinner *spin,
-			       int queue_sz, int ring_sz)
-{
-	struct intel_context *ce[2] = {};
-	struct i915_request *rq;
-	struct igt_live_test t;
-	int err = 0;
-	int n;
-
-	if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
-		return -EIO;
-
-	for (n = 0; n < ARRAY_SIZE(ce); n++) {
-		struct intel_context *tmp;
-
-		tmp = intel_context_create(engine);
-		if (IS_ERR(tmp)) {
-			err = PTR_ERR(tmp);
-			goto err_ce;
-		}
-
-		tmp->ring = __intel_context_ring_size(ring_sz);
-
-		err = intel_context_pin(tmp);
-		if (err) {
-			intel_context_put(tmp);
-			goto err_ce;
-		}
-
-		memset32(tmp->ring->vaddr,
-			 0xdeadbeef, /* trigger a hang if executed */
-			 tmp->ring->vma->size / sizeof(u32));
-
-		ce[n] = tmp;
-	}
-
-	rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto err_ce;
-	}
-
-	i915_request_get(rq);
-	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-	i915_request_add(rq);
-
-	if (!igt_wait_for_spinner(spin, rq)) {
-		intel_gt_set_wedged(engine->gt);
-		i915_request_put(rq);
-		err = -ETIME;
-		goto err_ce;
-	}
-
-	/* Fill the ring, until we will cause a wrap */
-	n = 0;
-	while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
-		struct i915_request *tmp;
-
-		tmp = intel_context_create_request(ce[0]);
-		if (IS_ERR(tmp)) {
-			err = PTR_ERR(tmp);
-			i915_request_put(rq);
-			goto err_ce;
-		}
-
-		i915_request_add(tmp);
-		intel_engine_flush_submission(engine);
-		n++;
-	}
-	intel_engine_flush_submission(engine);
-	pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
-		 engine->name, queue_sz, n,
-		 ce[0]->ring->size,
-		 ce[0]->ring->tail,
-		 ce[0]->ring->emit,
-		 rq->tail);
-	i915_request_put(rq);
-
-	/* Create a second request to preempt the first ring */
-	rq = intel_context_create_request(ce[1]);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto err_ce;
-	}
-
-	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-	i915_request_get(rq);
-	i915_request_add(rq);
-
-	err = wait_for_submit(engine, rq, HZ / 2);
-	i915_request_put(rq);
-	if (err) {
-		pr_err("%s: preemption request was not submited\n",
-		       engine->name);
-		err = -ETIME;
-	}
-
-	pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
-		 engine->name,
-		 ce[0]->ring->tail, ce[0]->ring->emit,
-		 ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
-	intel_engine_flush_submission(engine);
-	igt_spinner_end(spin);
-	for (n = 0; n < ARRAY_SIZE(ce); n++) {
-		if (IS_ERR_OR_NULL(ce[n]))
-			break;
-
-		intel_context_unpin(ce[n]);
-		intel_context_put(ce[n]);
-	}
-	if (igt_live_test_end(&t))
-		err = -EIO;
-	return err;
-}
-
-static int live_preempt_ring(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	struct igt_spinner spin;
-	enum intel_engine_id id;
-	int err = 0;
-
-	/*
-	 * Check that we rollback large chunks of a ring in order to do a
-	 * preemption event. Similar to live_unlite_ring, but looking at
-	 * ring size rather than the impact of intel_ring_direction().
-	 */
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	for_each_engine(engine, gt, id) {
-		int n;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (!intel_engine_can_store_dword(engine))
-			continue;
-
-		st_engine_heartbeat_disable(engine);
-
-		for (n = 0; n <= 3; n++) {
-			err = __live_preempt_ring(engine, &spin,
-						  n * SZ_4K / 4, SZ_4K);
-			if (err)
-				break;
-		}
-
-		st_engine_heartbeat_enable(engine);
-		if (err)
-			break;
-	}
-
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int live_preempt_gang(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	/*
-	 * Build as long a chain of preempters as we can, with each
-	 * request higher priority than the last. Once we are ready, we release
-	 * the last batch which then precolates down the chain, each releasing
-	 * the next oldest in turn. The intent is to simply push as hard as we
-	 * can with the number of preemptions, trying to exceed narrow HW
-	 * limits. At a minimum, we insist that we can sort all the user
-	 * high priority levels into execution order.
-	 */
-
-	for_each_engine(engine, gt, id) {
-		struct i915_request *rq = NULL;
-		struct igt_live_test t;
-		IGT_TIMEOUT(end_time);
-		int prio = 0;
-		int err = 0;
-		u32 *cs;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
-			return -EIO;
-
-		do {
-			struct i915_sched_attr attr = {
-				.priority = I915_USER_PRIORITY(prio++),
-			};
-
-			err = create_gang(engine, &rq);
-			if (err)
-				break;
-
-			/* Submit each spinner at increasing priority */
-			engine->schedule(rq, &attr);
-		} while (prio <= I915_PRIORITY_MAX &&
-			 !__igt_timeout(end_time, NULL));
-		pr_debug("%s: Preempt chain of %d requests\n",
-			 engine->name, prio);
-
-		/*
-		 * Such that the last spinner is the highest priority and
-		 * should execute first. When that spinner completes,
-		 * it will terminate the next lowest spinner until there
-		 * are no more spinners and the gang is complete.
-		 */
-		cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
-		if (!IS_ERR(cs)) {
-			*cs = 0;
-			i915_gem_object_unpin_map(rq->batch->obj);
-		} else {
-			err = PTR_ERR(cs);
-			intel_gt_set_wedged(gt);
-		}
-
-		while (rq) { /* wait for each rq from highest to lowest prio */
-			struct i915_request *n = list_next_entry(rq, mock.link);
-
-			if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
-				struct drm_printer p =
-					drm_info_printer(engine->i915->drm.dev);
-
-				pr_err("Failed to flush chain of %d requests, at %d\n",
-				       prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
-				intel_engine_dump(engine, &p,
-						  "%s\n", engine->name);
-
-				err = -ETIME;
-			}
-
-			i915_vma_put(rq->batch);
-			i915_request_put(rq);
-			rq = n;
-		}
-
-		if (igt_live_test_end(&t))
-			err = -EIO;
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static struct i915_vma *
-create_gpr_user(struct intel_engine_cs *engine,
-		struct i915_vma *result,
-		unsigned int offset)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	u32 *cs;
-	int err;
-	int i;
-
-	obj = i915_gem_object_create_internal(engine->i915, 4096);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	vma = i915_vma_instance(obj, result->vm, NULL);
-	if (IS_ERR(vma)) {
-		i915_gem_object_put(obj);
-		return vma;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err) {
-		i915_vma_put(vma);
-		return ERR_PTR(err);
-	}
-
-	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
-	if (IS_ERR(cs)) {
-		i915_vma_put(vma);
-		return ERR_CAST(cs);
-	}
-
-	/* All GPR are clear for new contexts. We use GPR(0) as a constant */
-	*cs++ = MI_LOAD_REGISTER_IMM(1);
-	*cs++ = CS_GPR(engine, 0);
-	*cs++ = 1;
-
-	for (i = 1; i < NUM_GPR; i++) {
-		u64 addr;
-
-		/*
-		 * Perform: GPR[i]++
-		 *
-		 * As we read and write into the context saved GPR[i], if
-		 * we restart this batch buffer from an earlier point, we
-		 * will repeat the increment and store a value > 1.
-		 */
-		*cs++ = MI_MATH(4);
-		*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
-		*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
-		*cs++ = MI_MATH_ADD;
-		*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
-
-		addr = result->node.start + offset + i * sizeof(*cs);
-		*cs++ = MI_STORE_REGISTER_MEM_GEN8;
-		*cs++ = CS_GPR(engine, 2 * i);
-		*cs++ = lower_32_bits(addr);
-		*cs++ = upper_32_bits(addr);
-
-		*cs++ = MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_POLL |
-			MI_SEMAPHORE_SAD_GTE_SDD;
-		*cs++ = i;
-		*cs++ = lower_32_bits(result->node.start);
-		*cs++ = upper_32_bits(result->node.start);
-	}
-
-	*cs++ = MI_BATCH_BUFFER_END;
-	i915_gem_object_flush_map(obj);
-	i915_gem_object_unpin_map(obj);
-
-	return vma;
-}
-
-static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int err;
-
-	obj = i915_gem_object_create_internal(gt->i915, sz);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		i915_gem_object_put(obj);
-		return vma;
-	}
-
-	err = i915_ggtt_pin(vma, NULL, 0, 0);
-	if (err) {
-		i915_vma_put(vma);
-		return ERR_PTR(err);
-	}
-
-	return vma;
-}
-
-static struct i915_request *
-create_gpr_client(struct intel_engine_cs *engine,
-		  struct i915_vma *global,
-		  unsigned int offset)
-{
-	struct i915_vma *batch, *vma;
-	struct intel_context *ce;
-	struct i915_request *rq;
-	int err;
-
-	ce = intel_context_create(engine);
-	if (IS_ERR(ce))
-		return ERR_CAST(ce);
-
-	vma = i915_vma_instance(global->obj, ce->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto out_ce;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err)
-		goto out_ce;
-
-	batch = create_gpr_user(engine, vma, offset);
-	if (IS_ERR(batch)) {
-		err = PTR_ERR(batch);
-		goto out_vma;
-	}
-
-	rq = intel_context_create_request(ce);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto out_batch;
-	}
-
-	i915_vma_lock(vma);
-	err = i915_request_await_object(rq, vma->obj, false);
-	if (!err)
-		err = i915_vma_move_to_active(vma, rq, 0);
-	i915_vma_unlock(vma);
-
-	i915_vma_lock(batch);
-	if (!err)
-		err = i915_request_await_object(rq, batch->obj, false);
-	if (!err)
-		err = i915_vma_move_to_active(batch, rq, 0);
-	if (!err)
-		err = rq->engine->emit_bb_start(rq,
-						batch->node.start,
-						PAGE_SIZE, 0);
-	i915_vma_unlock(batch);
-	i915_vma_unpin(batch);
-
-	if (!err)
-		i915_request_get(rq);
-	i915_request_add(rq);
-
-out_batch:
-	i915_vma_put(batch);
-out_vma:
-	i915_vma_unpin(vma);
-out_ce:
-	intel_context_put(ce);
-	return err ? ERR_PTR(err) : rq;
-}
-
-static int preempt_user(struct intel_engine_cs *engine,
-			struct i915_vma *global,
-			int id)
-{
-	struct i915_sched_attr attr = {
-		.priority = I915_PRIORITY_MAX
-	};
-	struct i915_request *rq;
-	int err = 0;
-	u32 *cs;
-
-	rq = intel_engine_create_kernel_request(engine);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
-
-	cs = intel_ring_begin(rq, 4);
-	if (IS_ERR(cs)) {
-		i915_request_add(rq);
-		return PTR_ERR(cs);
-	}
-
-	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-	*cs++ = i915_ggtt_offset(global);
-	*cs++ = 0;
-	*cs++ = id;
-
-	intel_ring_advance(rq, cs);
-
-	i915_request_get(rq);
-	i915_request_add(rq);
-
-	engine->schedule(rq, &attr);
-
-	if (i915_request_wait(rq, 0, HZ / 2) < 0)
-		err = -ETIME;
-	i915_request_put(rq);
-
-	return err;
-}
-
-static int live_preempt_user(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	struct i915_vma *global;
-	enum intel_engine_id id;
-	u32 *result;
-	int err = 0;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	/*
-	 * In our other tests, we look at preemption in carefully
-	 * controlled conditions in the ringbuffer. Since most of the
-	 * time is spent in user batches, most of our preemptions naturally
-	 * occur there. We want to verify that when we preempt inside a batch
-	 * we continue on from the current instruction and do not roll back
-	 * to the start, or another earlier arbitration point.
-	 *
-	 * To verify this, we create a batch which is a mixture of
-	 * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
-	 * a few preempting contexts thrown into the mix, we look for any
-	 * repeated instructions (which show up as incorrect values).
-	 */
-
-	global = create_global(gt, 4096);
-	if (IS_ERR(global))
-		return PTR_ERR(global);
-
-	result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
-	if (IS_ERR(result)) {
-		i915_vma_unpin_and_release(&global, 0);
-		return PTR_ERR(result);
-	}
-
-	for_each_engine(engine, gt, id) {
-		struct i915_request *client[3] = {};
-		struct igt_live_test t;
-		int i;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
-			continue; /* we need per-context GPR */
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			break;
-		}
-
-		memset(result, 0, 4096);
-
-		for (i = 0; i < ARRAY_SIZE(client); i++) {
-			struct i915_request *rq;
-
-			rq = create_gpr_client(engine, global,
-					       NUM_GPR * i * sizeof(u32));
-			if (IS_ERR(rq))
-				goto end_test;
-
-			client[i] = rq;
-		}
-
-		/* Continuously preempt the set of 3 running contexts */
-		for (i = 1; i <= NUM_GPR; i++) {
-			err = preempt_user(engine, global, i);
-			if (err)
-				goto end_test;
-		}
-
-		if (READ_ONCE(result[0]) != NUM_GPR) {
-			pr_err("%s: Failed to release semaphore\n",
-			       engine->name);
-			err = -EIO;
-			goto end_test;
-		}
-
-		for (i = 0; i < ARRAY_SIZE(client); i++) {
-			int gpr;
-
-			if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
-				err = -ETIME;
-				goto end_test;
-			}
-
-			for (gpr = 1; gpr < NUM_GPR; gpr++) {
-				if (result[NUM_GPR * i + gpr] != 1) {
-					pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
-					       engine->name,
-					       i, gpr, result[NUM_GPR * i + gpr]);
-					err = -EINVAL;
-					goto end_test;
-				}
-			}
-		}
-
-end_test:
-		for (i = 0; i < ARRAY_SIZE(client); i++) {
-			if (!client[i])
-				break;
-
-			i915_request_put(client[i]);
-		}
-
-		/* Flush the semaphores on error */
-		smp_store_mb(result[0], -1);
-		if (igt_live_test_end(&t))
-			err = -EIO;
-		if (err)
-			break;
-	}
-
-	i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
-	return err;
-}
-
-static int live_preempt_timeout(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct igt_spinner spin_lo;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	/*
-	 * Check that we force preemption to occur by cancelling the previous
-	 * context if it refuses to yield the GPU.
-	 */
-	if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
-		return 0;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (!intel_has_reset_engine(gt))
-		return 0;
-
-	if (igt_spinner_init(&spin_lo, gt))
-		return -ENOMEM;
-
-	ctx_hi = kernel_context(gt->i915);
-	if (!ctx_hi)
-		goto err_spin_lo;
-	ctx_hi->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
-	ctx_lo = kernel_context(gt->i915);
-	if (!ctx_lo)
-		goto err_ctx_hi;
-	ctx_lo->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
-	for_each_engine(engine, gt, id) {
-		unsigned long saved_timeout;
-		struct i915_request *rq;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_NOOP); /* preemption disabled */
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		i915_request_add(rq);
-		if (!igt_wait_for_spinner(&spin_lo, rq)) {
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		rq = igt_request_alloc(ctx_hi, engine);
-		if (IS_ERR(rq)) {
-			igt_spinner_end(&spin_lo);
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		/* Flush the previous CS ack before changing timeouts */
-		while (READ_ONCE(engine->execlists.pending[0]))
-			cpu_relax();
-
-		saved_timeout = engine->props.preempt_timeout_ms;
-		engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
-
-		i915_request_get(rq);
-		i915_request_add(rq);
-
-		intel_engine_flush_submission(engine);
-		engine->props.preempt_timeout_ms = saved_timeout;
-
-		if (i915_request_wait(rq, 0, HZ / 10) < 0) {
-			intel_gt_set_wedged(gt);
-			i915_request_put(rq);
-			err = -ETIME;
-			goto err_ctx_lo;
-		}
-
-		igt_spinner_end(&spin_lo);
-		i915_request_put(rq);
-	}
-
-	err = 0;
-err_ctx_lo:
-	kernel_context_close(ctx_lo);
-err_ctx_hi:
-	kernel_context_close(ctx_hi);
-err_spin_lo:
-	igt_spinner_fini(&spin_lo);
-	return err;
-}
-
-static int random_range(struct rnd_state *rnd, int min, int max)
-{
-	return i915_prandom_u32_max_state(max - min, rnd) + min;
-}
-
-static int random_priority(struct rnd_state *rnd)
-{
-	return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
-}
-
-struct preempt_smoke {
-	struct intel_gt *gt;
-	struct i915_gem_context **contexts;
-	struct intel_engine_cs *engine;
-	struct drm_i915_gem_object *batch;
-	unsigned int ncontext;
-	struct rnd_state prng;
-	unsigned long count;
-};
-
-static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
-{
-	return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
-							  &smoke->prng)];
-}
-
-static int smoke_submit(struct preempt_smoke *smoke,
-			struct i915_gem_context *ctx, int prio,
-			struct drm_i915_gem_object *batch)
-{
-	struct i915_request *rq;
-	struct i915_vma *vma = NULL;
-	int err = 0;
-
-	if (batch) {
-		struct i915_address_space *vm;
-
-		vm = i915_gem_context_get_vm_rcu(ctx);
-		vma = i915_vma_instance(batch, vm, NULL);
-		i915_vm_put(vm);
-		if (IS_ERR(vma))
-			return PTR_ERR(vma);
-
-		err = i915_vma_pin(vma, 0, 0, PIN_USER);
-		if (err)
-			return err;
-	}
-
-	ctx->sched.priority = prio;
-
-	rq = igt_request_alloc(ctx, smoke->engine);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto unpin;
-	}
-
-	if (vma) {
-		i915_vma_lock(vma);
-		err = i915_request_await_object(rq, vma->obj, false);
-		if (!err)
-			err = i915_vma_move_to_active(vma, rq, 0);
-		if (!err)
-			err = rq->engine->emit_bb_start(rq,
-							vma->node.start,
-							PAGE_SIZE, 0);
-		i915_vma_unlock(vma);
-	}
-
-	i915_request_add(rq);
-
-unpin:
-	if (vma)
-		i915_vma_unpin(vma);
-
-	return err;
-}
-
-static int smoke_crescendo_thread(void *arg)
-{
-	struct preempt_smoke *smoke = arg;
-	IGT_TIMEOUT(end_time);
-	unsigned long count;
-
-	count = 0;
-	do {
-		struct i915_gem_context *ctx = smoke_context(smoke);
-		int err;
-
-		err = smoke_submit(smoke,
-				   ctx, count % I915_PRIORITY_MAX,
-				   smoke->batch);
-		if (err)
-			return err;
-
-		count++;
-	} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
-	smoke->count = count;
-	return 0;
-}
-
-static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
-#define BATCH BIT(0)
-{
-	struct task_struct *tsk[I915_NUM_ENGINES] = {};
-	struct preempt_smoke arg[I915_NUM_ENGINES];
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	unsigned long count;
-	int err = 0;
-
-	for_each_engine(engine, smoke->gt, id) {
-		arg[id] = *smoke;
-		arg[id].engine = engine;
-		if (!(flags & BATCH))
-			arg[id].batch = NULL;
-		arg[id].count = 0;
-
-		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
-				      "igt/smoke:%d", id);
-		if (IS_ERR(tsk[id])) {
-			err = PTR_ERR(tsk[id]);
-			break;
-		}
-		get_task_struct(tsk[id]);
-	}
-
-	yield(); /* start all threads before we kthread_stop() */
-
-	count = 0;
-	for_each_engine(engine, smoke->gt, id) {
-		int status;
-
-		if (IS_ERR_OR_NULL(tsk[id]))
-			continue;
-
-		status = kthread_stop(tsk[id]);
-		if (status && !err)
-			err = status;
-
-		count += arg[id].count;
-
-		put_task_struct(tsk[id]);
-	}
-
-	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
-		count, flags, smoke->gt->info.num_engines, smoke->ncontext);
-	return 0;
-}
-
-static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
-{
-	enum intel_engine_id id;
-	IGT_TIMEOUT(end_time);
-	unsigned long count;
-
-	count = 0;
-	do {
-		for_each_engine(smoke->engine, smoke->gt, id) {
-			struct i915_gem_context *ctx = smoke_context(smoke);
-			int err;
-
-			err = smoke_submit(smoke,
-					   ctx, random_priority(&smoke->prng),
-					   flags & BATCH ? smoke->batch : NULL);
-			if (err)
-				return err;
-
-			count++;
-		}
-	} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
-	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
-		count, flags, smoke->gt->info.num_engines, smoke->ncontext);
-	return 0;
-}
-
-static int live_preempt_smoke(void *arg)
-{
-	struct preempt_smoke smoke = {
-		.gt = arg,
-		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
-		.ncontext = 256,
-	};
-	const unsigned int phase[] = { 0, BATCH };
-	struct igt_live_test t;
-	int err = -ENOMEM;
-	u32 *cs;
-	int n;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
-		return 0;
-
-	smoke.contexts = kmalloc_array(smoke.ncontext,
-				       sizeof(*smoke.contexts),
-				       GFP_KERNEL);
-	if (!smoke.contexts)
-		return -ENOMEM;
-
-	smoke.batch =
-		i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
-	if (IS_ERR(smoke.batch)) {
-		err = PTR_ERR(smoke.batch);
-		goto err_free;
-	}
-
-	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
-	if (IS_ERR(cs)) {
-		err = PTR_ERR(cs);
-		goto err_batch;
-	}
-	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
-		cs[n] = MI_ARB_CHECK;
-	cs[n] = MI_BATCH_BUFFER_END;
-	i915_gem_object_flush_map(smoke.batch);
-	i915_gem_object_unpin_map(smoke.batch);
-
-	if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
-		err = -EIO;
-		goto err_batch;
-	}
-
-	for (n = 0; n < smoke.ncontext; n++) {
-		smoke.contexts[n] = kernel_context(smoke.gt->i915);
-		if (!smoke.contexts[n])
-			goto err_ctx;
-	}
-
-	for (n = 0; n < ARRAY_SIZE(phase); n++) {
-		err = smoke_crescendo(&smoke, phase[n]);
-		if (err)
-			goto err_ctx;
-
-		err = smoke_random(&smoke, phase[n]);
-		if (err)
-			goto err_ctx;
-	}
-
-err_ctx:
-	if (igt_live_test_end(&t))
-		err = -EIO;
-
-	for (n = 0; n < smoke.ncontext; n++) {
-		if (!smoke.contexts[n])
-			break;
-		kernel_context_close(smoke.contexts[n]);
-	}
-
-err_batch:
-	i915_gem_object_put(smoke.batch);
-err_free:
-	kfree(smoke.contexts);
-
-	return err;
-}
-
-static int nop_virtual_engine(struct intel_gt *gt,
-			      struct intel_engine_cs **siblings,
-			      unsigned int nsibling,
-			      unsigned int nctx,
-			      unsigned int flags)
-#define CHAIN BIT(0)
-{
-	IGT_TIMEOUT(end_time);
-	struct i915_request *request[16] = {};
-	struct intel_context *ve[16];
-	unsigned long n, prime, nc;
-	struct igt_live_test t;
-	ktime_t times[2] = {};
-	int err;
-
-	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
-
-	for (n = 0; n < nctx; n++) {
-		ve[n] = intel_execlists_create_virtual(siblings, nsibling);
-		if (IS_ERR(ve[n])) {
-			err = PTR_ERR(ve[n]);
-			nctx = n;
-			goto out;
-		}
-
-		err = intel_context_pin(ve[n]);
-		if (err) {
-			intel_context_put(ve[n]);
-			nctx = n;
-			goto out;
-		}
-	}
-
-	err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
-	if (err)
-		goto out;
-
-	for_each_prime_number_from(prime, 1, 8192) {
-		times[1] = ktime_get_raw();
-
-		if (flags & CHAIN) {
-			for (nc = 0; nc < nctx; nc++) {
-				for (n = 0; n < prime; n++) {
-					struct i915_request *rq;
-
-					rq = i915_request_create(ve[nc]);
-					if (IS_ERR(rq)) {
-						err = PTR_ERR(rq);
-						goto out;
-					}
-
-					if (request[nc])
-						i915_request_put(request[nc]);
-					request[nc] = i915_request_get(rq);
-					i915_request_add(rq);
-				}
-			}
-		} else {
-			for (n = 0; n < prime; n++) {
-				for (nc = 0; nc < nctx; nc++) {
-					struct i915_request *rq;
-
-					rq = i915_request_create(ve[nc]);
-					if (IS_ERR(rq)) {
-						err = PTR_ERR(rq);
-						goto out;
-					}
-
-					if (request[nc])
-						i915_request_put(request[nc]);
-					request[nc] = i915_request_get(rq);
-					i915_request_add(rq);
-				}
-			}
-		}
-
-		for (nc = 0; nc < nctx; nc++) {
-			if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
-				pr_err("%s(%s): wait for %llx:%lld timed out\n",
-				       __func__, ve[0]->engine->name,
-				       request[nc]->fence.context,
-				       request[nc]->fence.seqno);
-
-				GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
-					  __func__, ve[0]->engine->name,
-					  request[nc]->fence.context,
-					  request[nc]->fence.seqno);
-				GEM_TRACE_DUMP();
-				intel_gt_set_wedged(gt);
-				break;
-			}
-		}
-
-		times[1] = ktime_sub(ktime_get_raw(), times[1]);
-		if (prime == 1)
-			times[0] = times[1];
-
-		for (nc = 0; nc < nctx; nc++) {
-			i915_request_put(request[nc]);
-			request[nc] = NULL;
-		}
-
-		if (__igt_timeout(end_time, NULL))
-			break;
-	}
-
-	err = igt_live_test_end(&t);
-	if (err)
-		goto out;
-
-	pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
-		nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
-		prime, div64_u64(ktime_to_ns(times[1]), prime));
-
-out:
-	if (igt_flush_test(gt->i915))
-		err = -EIO;
-
-	for (nc = 0; nc < nctx; nc++) {
-		i915_request_put(request[nc]);
-		intel_context_unpin(ve[nc]);
-		intel_context_put(ve[nc]);
-	}
-	return err;
-}
-
-static unsigned int
-__select_siblings(struct intel_gt *gt,
-		  unsigned int class,
-		  struct intel_engine_cs **siblings,
-		  bool (*filter)(const struct intel_engine_cs *))
-{
-	unsigned int n = 0;
-	unsigned int inst;
-
-	for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
-		if (!gt->engine_class[class][inst])
-			continue;
-
-		if (filter && !filter(gt->engine_class[class][inst]))
-			continue;
-
-		siblings[n++] = gt->engine_class[class][inst];
-	}
-
-	return n;
-}
-
-static unsigned int
-select_siblings(struct intel_gt *gt,
-		unsigned int class,
-		struct intel_engine_cs **siblings)
-{
-	return __select_siblings(gt, class, siblings, NULL);
-}
-
-static int live_virtual_engine(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	unsigned int class;
-	int err;
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0;
-
-	for_each_engine(engine, gt, id) {
-		err = nop_virtual_engine(gt, &engine, 1, 1, 0);
-		if (err) {
-			pr_err("Failed to wrap engine %s: err=%d\n",
-			       engine->name, err);
-			return err;
-		}
-	}
-
-	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-		int nsibling, n;
-
-		nsibling = select_siblings(gt, class, siblings);
-		if (nsibling < 2)
-			continue;
-
-		for (n = 1; n <= nsibling + 1; n++) {
-			err = nop_virtual_engine(gt, siblings, nsibling,
-						 n, 0);
-			if (err)
-				return err;
-		}
-
-		err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static int mask_virtual_engine(struct intel_gt *gt,
-			       struct intel_engine_cs **siblings,
-			       unsigned int nsibling)
-{
-	struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
-	struct intel_context *ve;
-	struct igt_live_test t;
-	unsigned int n;
-	int err;
-
-	/*
-	 * Check that by setting the execution mask on a request, we can
-	 * restrict it to our desired engine within the virtual engine.
-	 */
-
-	ve = intel_execlists_create_virtual(siblings, nsibling);
-	if (IS_ERR(ve)) {
-		err = PTR_ERR(ve);
-		goto out_close;
-	}
-
-	err = intel_context_pin(ve);
-	if (err)
-		goto out_put;
-
-	err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
-	if (err)
-		goto out_unpin;
-
-	for (n = 0; n < nsibling; n++) {
-		request[n] = i915_request_create(ve);
-		if (IS_ERR(request[n])) {
-			err = PTR_ERR(request[n]);
-			nsibling = n;
-			goto out;
-		}
-
-		/* Reverse order as it's more likely to be unnatural */
-		request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
-
-		i915_request_get(request[n]);
-		i915_request_add(request[n]);
-	}
-
-	for (n = 0; n < nsibling; n++) {
-		if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
-			pr_err("%s(%s): wait for %llx:%lld timed out\n",
-			       __func__, ve->engine->name,
-			       request[n]->fence.context,
-			       request[n]->fence.seqno);
-
-			GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
-				  __func__, ve->engine->name,
-				  request[n]->fence.context,
-				  request[n]->fence.seqno);
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto out;
-		}
-
-		if (request[n]->engine != siblings[nsibling - n - 1]) {
-			pr_err("Executed on wrong sibling '%s', expected '%s'\n",
-			       request[n]->engine->name,
-			       siblings[nsibling - n - 1]->name);
-			err = -EINVAL;
-			goto out;
-		}
-	}
-
-	err = igt_live_test_end(&t);
-out:
-	if (igt_flush_test(gt->i915))
-		err = -EIO;
-
-	for (n = 0; n < nsibling; n++)
-		i915_request_put(request[n]);
-
-out_unpin:
-	intel_context_unpin(ve);
-out_put:
-	intel_context_put(ve);
-out_close:
-	return err;
-}
-
-static int live_virtual_mask(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-	unsigned int class;
-	int err;
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0;
-
-	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-		unsigned int nsibling;
-
-		nsibling = select_siblings(gt, class, siblings);
-		if (nsibling < 2)
-			continue;
-
-		err = mask_virtual_engine(gt, siblings, nsibling);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static int slicein_virtual_engine(struct intel_gt *gt,
-				  struct intel_engine_cs **siblings,
-				  unsigned int nsibling)
-{
-	const long timeout = slice_timeout(siblings[0]);
-	struct intel_context *ce;
-	struct i915_request *rq;
-	struct igt_spinner spin;
-	unsigned int n;
-	int err = 0;
-
-	/*
-	 * Virtual requests must take part in timeslicing on the target engines.
-	 */
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	for (n = 0; n < nsibling; n++) {
-		ce = intel_context_create(siblings[n]);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto out;
-		}
-
-		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out;
-		}
-
-		i915_request_add(rq);
-	}
-
-	ce = intel_execlists_create_virtual(siblings, nsibling);
-	if (IS_ERR(ce)) {
-		err = PTR_ERR(ce);
-		goto out;
-	}
-
-	rq = intel_context_create_request(ce);
-	intel_context_put(ce);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto out;
-	}
-
-	i915_request_get(rq);
-	i915_request_add(rq);
-	if (i915_request_wait(rq, 0, timeout) < 0) {
-		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
-			      __func__, rq->engine->name);
-		GEM_TRACE_DUMP();
-		intel_gt_set_wedged(gt);
-		err = -EIO;
-	}
-	i915_request_put(rq);
-
-out:
-	igt_spinner_end(&spin);
-	if (igt_flush_test(gt->i915))
-		err = -EIO;
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int sliceout_virtual_engine(struct intel_gt *gt,
-				   struct intel_engine_cs **siblings,
-				   unsigned int nsibling)
-{
-	const long timeout = slice_timeout(siblings[0]);
-	struct intel_context *ce;
-	struct i915_request *rq;
-	struct igt_spinner spin;
-	unsigned int n;
-	int err = 0;
-
-	/*
-	 * Virtual requests must allow others a fair timeslice.
-	 */
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	/* XXX We do not handle oversubscription and fairness with normal rq */
-	for (n = 0; n < nsibling; n++) {
-		ce = intel_execlists_create_virtual(siblings, nsibling);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto out;
-		}
-
-		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out;
-		}
-
-		i915_request_add(rq);
-	}
-
-	for (n = 0; !err && n < nsibling; n++) {
-		ce = intel_context_create(siblings[n]);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto out;
-		}
-
-		rq = intel_context_create_request(ce);
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out;
-		}
-
-		i915_request_get(rq);
-		i915_request_add(rq);
-		if (i915_request_wait(rq, 0, timeout) < 0) {
-			GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
-				      __func__, siblings[n]->name);
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-		}
-		i915_request_put(rq);
-	}
-
-out:
-	igt_spinner_end(&spin);
-	if (igt_flush_test(gt->i915))
-		err = -EIO;
-	igt_spinner_fini(&spin);
-	return err;
-}
-
-static int live_virtual_slice(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-	unsigned int class;
-	int err;
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0;
-
-	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-		unsigned int nsibling;
-
-		nsibling = __select_siblings(gt, class, siblings,
-					     intel_engine_has_timeslices);
-		if (nsibling < 2)
-			continue;
-
-		err = slicein_virtual_engine(gt, siblings, nsibling);
-		if (err)
-			return err;
-
-		err = sliceout_virtual_engine(gt, siblings, nsibling);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static int preserved_virtual_engine(struct intel_gt *gt,
-				    struct intel_engine_cs **siblings,
-				    unsigned int nsibling)
-{
-	struct i915_request *last = NULL;
-	struct intel_context *ve;
-	struct i915_vma *scratch;
-	struct igt_live_test t;
-	unsigned int n;
-	int err = 0;
-	u32 *cs;
-
-	scratch = create_scratch(siblings[0]->gt);
-	if (IS_ERR(scratch))
-		return PTR_ERR(scratch);
-
-	err = i915_vma_sync(scratch);
-	if (err)
-		goto out_scratch;
-
-	ve = intel_execlists_create_virtual(siblings, nsibling);
-	if (IS_ERR(ve)) {
-		err = PTR_ERR(ve);
-		goto out_scratch;
-	}
-
-	err = intel_context_pin(ve);
-	if (err)
-		goto out_put;
-
-	err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
-	if (err)
-		goto out_unpin;
-
-	for (n = 0; n < NUM_GPR_DW; n++) {
-		struct intel_engine_cs *engine = siblings[n % nsibling];
-		struct i915_request *rq;
-
-		rq = i915_request_create(ve);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto out_end;
-		}
-
-		i915_request_put(last);
-		last = i915_request_get(rq);
-
-		cs = intel_ring_begin(rq, 8);
-		if (IS_ERR(cs)) {
-			i915_request_add(rq);
-			err = PTR_ERR(cs);
-			goto out_end;
-		}
-
-		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
-		*cs++ = CS_GPR(engine, n);
-		*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
-		*cs++ = 0;
-
-		*cs++ = MI_LOAD_REGISTER_IMM(1);
-		*cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
-		*cs++ = n + 1;
-
-		*cs++ = MI_NOOP;
-		intel_ring_advance(rq, cs);
-
-		/* Restrict this request to run on a particular engine */
-		rq->execution_mask = engine->mask;
-		i915_request_add(rq);
-	}
-
-	if (i915_request_wait(last, 0, HZ / 5) < 0) {
-		err = -ETIME;
-		goto out_end;
-	}
-
-	cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
-	if (IS_ERR(cs)) {
-		err = PTR_ERR(cs);
-		goto out_end;
-	}
-
-	for (n = 0; n < NUM_GPR_DW; n++) {
-		if (cs[n] != n) {
-			pr_err("Incorrect value[%d] found for GPR[%d]\n",
-			       cs[n], n);
-			err = -EINVAL;
-			break;
-		}
-	}
-
-	i915_gem_object_unpin_map(scratch->obj);
-
-out_end:
-	if (igt_live_test_end(&t))
-		err = -EIO;
-	i915_request_put(last);
-out_unpin:
-	intel_context_unpin(ve);
-out_put:
-	intel_context_put(ve);
-out_scratch:
-	i915_vma_unpin_and_release(&scratch, 0);
-	return err;
-}
-
-static int live_virtual_preserved(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-	unsigned int class;
-
-	/*
-	 * Check that the context image retains non-privileged (user) registers
-	 * from one engine to the next. For this we check that the CS_GPR
-	 * are preserved.
-	 */
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0;
-
-	/* As we use CS_GPR we cannot run before they existed on all engines. */
-	if (INTEL_GEN(gt->i915) < 9)
-		return 0;
-
-	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-		int nsibling, err;
-
-		nsibling = select_siblings(gt, class, siblings);
-		if (nsibling < 2)
-			continue;
-
-		err = preserved_virtual_engine(gt, siblings, nsibling);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static int bond_virtual_engine(struct intel_gt *gt,
-			       unsigned int class,
-			       struct intel_engine_cs **siblings,
-			       unsigned int nsibling,
-			       unsigned int flags)
-#define BOND_SCHEDULE BIT(0)
-{
-	struct intel_engine_cs *master;
-	struct i915_request *rq[16];
-	enum intel_engine_id id;
-	struct igt_spinner spin;
-	unsigned long n;
-	int err;
-
-	/*
-	 * A set of bonded requests is intended to be run concurrently
-	 * across a number of engines. We use one request per-engine
-	 * and a magic fence to schedule each of the bonded requests
-	 * at the same time. A consequence of our current scheduler is that
-	 * we only move requests to the HW ready queue when the request
-	 * becomes ready, that is when all of its prerequisite fences have
-	 * been signaled. As one of those fences is the master submit fence,
-	 * there is a delay on all secondary fences as the HW may be
-	 * currently busy. Equally, as all the requests are independent,
-	 * they may have other fences that delay individual request
-	 * submission to HW. Ergo, we do not guarantee that all requests are
-	 * immediately submitted to HW at the same time, just that if the
-	 * rules are abided by, they are ready at the same time as the
-	 * first is submitted. Userspace can embed semaphores in its batch
-	 * to ensure parallel execution of its phases as it requires.
-	 * Though naturally it gets requested that perhaps the scheduler should
-	 * take care of parallel execution, even across preemption events on
-	 * different HW. (The proper answer is of course "lalalala".)
-	 *
-	 * With the submit-fence, we have identified three possible phases
-	 * of synchronisation depending on the master fence: queued (not
-	 * ready), executing, and signaled. The first two are quite simple
-	 * and checked below. However, the signaled master fence handling is
-	 * contentious. Currently we do not distinguish between a signaled
-	 * fence and an expired fence, as once signaled it does not convey
-	 * any information about the previous execution. It may even be freed
-	 * and hence checking later it may not exist at all. Ergo we currently
-	 * do not apply the bonding constraint for an already signaled fence,
-	 * as our expectation is that it should not constrain the secondaries
-	 * and is outside of the scope of the bonded request API (i.e. all
-	 * userspace requests are meant to be running in parallel). As
-	 * it imposes no constraint, and is effectively a no-op, we do not
-	 * check below as normal execution flows are checked extensively above.
-	 *
-	 * XXX Is the degenerate handling of signaled submit fences the
-	 * expected behaviour for userpace?
-	 */
-
-	GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	err = 0;
-	rq[0] = ERR_PTR(-ENOMEM);
-	for_each_engine(master, gt, id) {
-		struct i915_sw_fence fence = {};
-		struct intel_context *ce;
-
-		if (master->class == class)
-			continue;
-
-		ce = intel_context_create(master);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto out;
-		}
-
-		memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
-
-		rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
-		intel_context_put(ce);
-		if (IS_ERR(rq[0])) {
-			err = PTR_ERR(rq[0]);
-			goto out;
-		}
-		i915_request_get(rq[0]);
-
-		if (flags & BOND_SCHEDULE) {
-			onstack_fence_init(&fence);
-			err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
-							       &fence,
-							       GFP_KERNEL);
-		}
-
-		i915_request_add(rq[0]);
-		if (err < 0)
-			goto out;
-
-		if (!(flags & BOND_SCHEDULE) &&
-		    !igt_wait_for_spinner(&spin, rq[0])) {
-			err = -EIO;
-			goto out;
-		}
-
-		for (n = 0; n < nsibling; n++) {
-			struct intel_context *ve;
-
-			ve = intel_execlists_create_virtual(siblings, nsibling);
-			if (IS_ERR(ve)) {
-				err = PTR_ERR(ve);
-				onstack_fence_fini(&fence);
-				goto out;
-			}
-
-			err = intel_virtual_engine_attach_bond(ve->engine,
-							       master,
-							       siblings[n]);
-			if (err) {
-				intel_context_put(ve);
-				onstack_fence_fini(&fence);
-				goto out;
-			}
-
-			err = intel_context_pin(ve);
-			intel_context_put(ve);
-			if (err) {
-				onstack_fence_fini(&fence);
-				goto out;
-			}
-
-			rq[n + 1] = i915_request_create(ve);
-			intel_context_unpin(ve);
-			if (IS_ERR(rq[n + 1])) {
-				err = PTR_ERR(rq[n + 1]);
-				onstack_fence_fini(&fence);
-				goto out;
-			}
-			i915_request_get(rq[n + 1]);
-
-			err = i915_request_await_execution(rq[n + 1],
-							   &rq[0]->fence,
-							   ve->engine->bond_execute);
-			i915_request_add(rq[n + 1]);
-			if (err < 0) {
-				onstack_fence_fini(&fence);
-				goto out;
-			}
-		}
-		onstack_fence_fini(&fence);
-		intel_engine_flush_submission(master);
-		igt_spinner_end(&spin);
-
-		if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
-			pr_err("Master request did not execute (on %s)!\n",
-			       rq[0]->engine->name);
-			err = -EIO;
-			goto out;
-		}
-
-		for (n = 0; n < nsibling; n++) {
-			if (i915_request_wait(rq[n + 1], 0,
-					      MAX_SCHEDULE_TIMEOUT) < 0) {
-				err = -EIO;
-				goto out;
-			}
-
-			if (rq[n + 1]->engine != siblings[n]) {
-				pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
-				       siblings[n]->name,
-				       rq[n + 1]->engine->name,
-				       rq[0]->engine->name);
-				err = -EINVAL;
-				goto out;
-			}
-		}
-
-		for (n = 0; !IS_ERR(rq[n]); n++)
-			i915_request_put(rq[n]);
-		rq[0] = ERR_PTR(-ENOMEM);
-	}
+#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+#include "shmem_utils.h"
 
-out:
-	for (n = 0; !IS_ERR(rq[n]); n++)
-		i915_request_put(rq[n]);
-	if (igt_flush_test(gt->i915))
-		err = -EIO;
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
 
-	igt_spinner_fini(&spin);
-	return err;
-}
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
 
-static int live_virtual_bond(void *arg)
+static struct i915_vma *create_scratch(struct intel_gt *gt)
 {
-	static const struct phase {
-		const char *name;
-		unsigned int flags;
-	} phases[] = {
-		{ "", 0 },
-		{ "schedule", BOND_SCHEDULE },
-		{ },
-	};
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-	unsigned int class;
-	int err;
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0;
-
-	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-		const struct phase *p;
-		int nsibling;
-
-		nsibling = select_siblings(gt, class, siblings);
-		if (nsibling < 2)
-			continue;
-
-		for (p = phases; p->name; p++) {
-			err = bond_virtual_engine(gt,
-						  class, siblings, nsibling,
-						  p->flags);
-			if (err) {
-				pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
-				       __func__, p->name, class, nsibling, err);
-				return err;
-			}
-		}
-	}
-
-	return 0;
+	return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
 }
 
-static int reset_virtual_engine(struct intel_gt *gt,
-				struct intel_engine_cs **siblings,
-				unsigned int nsibling)
+static bool is_active(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine;
-	struct intel_context *ve;
-	struct igt_spinner spin;
-	struct i915_request *rq;
-	unsigned int n;
-	int err = 0;
-
-	/*
-	 * In order to support offline error capture for fast preempt reset,
-	 * we need to decouple the guilty request and ensure that it and its
-	 * descendents are not executed while the capture is in progress.
-	 */
-
-	if (igt_spinner_init(&spin, gt))
-		return -ENOMEM;
-
-	ve = intel_execlists_create_virtual(siblings, nsibling);
-	if (IS_ERR(ve)) {
-		err = PTR_ERR(ve);
-		goto out_spin;
-	}
-
-	for (n = 0; n < nsibling; n++)
-		st_engine_heartbeat_disable(siblings[n]);
-
-	rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto out_heartbeat;
-	}
-	i915_request_add(rq);
-
-	if (!igt_wait_for_spinner(&spin, rq)) {
-		intel_gt_set_wedged(gt);
-		err = -ETIME;
-		goto out_heartbeat;
-	}
-
-	engine = rq->engine;
-	GEM_BUG_ON(engine == ve->engine);
-
-	/* Take ownership of the reset and tasklet */
-	if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
-			     &gt->reset.flags)) {
-		intel_gt_set_wedged(gt);
-		err = -EBUSY;
-		goto out_heartbeat;
-	}
-	tasklet_disable(&engine->execlists.tasklet);
-
-	engine->execlists.tasklet.func(engine->execlists.tasklet.data);
-	GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
-	/* Fake a preemption event; failed of course */
-	spin_lock_irq(&engine->active.lock);
-	__unwind_incomplete_requests(engine);
-	spin_unlock_irq(&engine->active.lock);
-	GEM_BUG_ON(rq->engine != ve->engine);
-
-	/* Reset the engine while keeping our active request on hold */
-	execlists_hold(engine, rq);
-	GEM_BUG_ON(!i915_request_on_hold(rq));
-
-	intel_engine_reset(engine, NULL);
-	GEM_BUG_ON(rq->fence.error != -EIO);
-
-	/* Release our grasp on the engine, letting CS flow again */
-	tasklet_enable(&engine->execlists.tasklet);
-	clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
-
-	/* Check that we do not resubmit the held request */
-	i915_request_get(rq);
-	if (!i915_request_wait(rq, 0, HZ / 5)) {
-		pr_err("%s: on hold request completed!\n",
-		       engine->name);
-		intel_gt_set_wedged(gt);
-		err = -EIO;
-		goto out_rq;
-	}
-	GEM_BUG_ON(!i915_request_on_hold(rq));
+	if (i915_request_is_active(rq))
+		return true;
 
-	/* But is resubmitted on release */
-	execlists_unhold(engine, rq);
-	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
-		pr_err("%s: held request did not complete!\n",
-		       engine->name);
-		intel_gt_set_wedged(gt);
-		err = -ETIME;
-	}
+	if (i915_request_on_hold(rq))
+		return true;
 
-out_rq:
-	i915_request_put(rq);
-out_heartbeat:
-	for (n = 0; n < nsibling; n++)
-		st_engine_heartbeat_enable(siblings[n]);
+	if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+		return true;
 
-	intel_context_put(ve);
-out_spin:
-	igt_spinner_fini(&spin);
-	return err;
+	return false;
 }
 
-static int live_virtual_reset(void *arg)
+static int wait_for_submit(struct intel_engine_cs *engine,
+			   struct i915_request *rq,
+			   unsigned long timeout)
 {
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-	unsigned int class;
-
-	/*
-	 * Check that we handle a reset event within a virtual engine.
-	 * Only the physical engine is reset, but we have to check the flow
-	 * of the virtual requests around the reset, and make sure it is not
-	 * forgotten.
-	 */
-
-	if (intel_uc_uses_guc_submission(&gt->uc))
-		return 0;
-
-	if (!intel_has_reset_engine(gt))
-		return 0;
-
-	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-		int nsibling, err;
-
-		nsibling = select_siblings(gt, class, siblings);
-		if (nsibling < 2)
-			continue;
-
-		err = reset_virtual_engine(gt, siblings, nsibling);
-		if (err)
-			return err;
-	}
+	/* Ignore our own attempts to suppress excess tasklets */
+	tasklet_hi_schedule(&engine->execlists.tasklet);
 
-	return 0;
-}
+	timeout += jiffies;
+	do {
+		bool done = time_after(jiffies, timeout);
 
-int intel_execlists_live_selftests(struct drm_i915_private *i915)
-{
-	static const struct i915_subtest tests[] = {
-		SUBTEST(live_sanitycheck),
-		SUBTEST(live_unlite_switch),
-		SUBTEST(live_unlite_preempt),
-		SUBTEST(live_unlite_ring),
-		SUBTEST(live_pin_rewind),
-		SUBTEST(live_hold_reset),
-		SUBTEST(live_error_interrupt),
-		SUBTEST(live_timeslice_preempt),
-		SUBTEST(live_timeslice_rewind),
-		SUBTEST(live_timeslice_queue),
-		SUBTEST(live_timeslice_nopreempt),
-		SUBTEST(live_busywait_preempt),
-		SUBTEST(live_preempt),
-		SUBTEST(live_late_preempt),
-		SUBTEST(live_nopreempt),
-		SUBTEST(live_preempt_cancel),
-		SUBTEST(live_suppress_self_preempt),
-		SUBTEST(live_chain_preempt),
-		SUBTEST(live_preempt_ring),
-		SUBTEST(live_preempt_gang),
-		SUBTEST(live_preempt_timeout),
-		SUBTEST(live_preempt_user),
-		SUBTEST(live_preempt_smoke),
-		SUBTEST(live_virtual_engine),
-		SUBTEST(live_virtual_mask),
-		SUBTEST(live_virtual_preserved),
-		SUBTEST(live_virtual_slice),
-		SUBTEST(live_virtual_bond),
-		SUBTEST(live_virtual_reset),
-	};
+		if (i915_request_completed(rq)) /* that was quick! */
+			return 0;
 
-	if (!HAS_EXECLISTS(i915))
-		return 0;
+		/* Wait until the HW has acknowleged the submission (or err) */
+		intel_engine_flush_submission(engine);
+		if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+			return 0;
 
-	if (intel_gt_is_wedged(&i915->gt))
-		return 0;
+		if (done)
+			return -ETIME;
 
-	return intel_gt_live_subtests(tests, &i915->gt);
+		cond_resched();
+	} while (1);
 }
 
 static int emit_semaphore_signal(struct intel_context *ce, void *slot)
@@ -4775,9 +139,10 @@ static int live_lrc_layout(void *arg)
 	 * match the layout saved by HW.
 	 */
 
-	lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
 	if (!lrc)
 		return -ENOMEM;
+	GEM_BUG_ON(offset_in_page(lrc));
 
 	err = 0;
 	for_each_engine(engine, gt, id) {
@@ -4794,15 +159,12 @@ static int live_lrc_layout(void *arg)
 		}
 		hw += LRC_STATE_OFFSET / sizeof(*hw);
 
-		execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
-					 engine->kernel_context,
-					 engine,
-					 engine->kernel_context->ring,
-					 true);
+		__lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
+				engine->kernel_context, engine, true);
 
 		dw = 0;
 		do {
-			u32 lri = hw[dw];
+			u32 lri = READ_ONCE(hw[dw]);
 
 			if (lri == 0) {
 				dw++;
@@ -4835,9 +197,11 @@ static int live_lrc_layout(void *arg)
 			dw++;
 
 			while (lri) {
-				if (hw[dw] != lrc[dw]) {
+				u32 offset = READ_ONCE(hw[dw]);
+
+				if (offset != lrc[dw]) {
 					pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
-					       engine->name, dw, hw[dw], lrc[dw]);
+					       engine->name, dw, offset, lrc[dw]);
 					err = -EINVAL;
 					break;
 				}
@@ -4849,7 +213,7 @@ static int live_lrc_layout(void *arg)
 				dw += 2;
 				lri -= 2;
 			}
-		} while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+		} while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
 
 		if (err) {
 			pr_info("%s: HW register image:\n", engine->name);
@@ -4864,7 +228,7 @@ static int live_lrc_layout(void *arg)
 			break;
 	}
 
-	kfree(lrc);
+	free_page((unsigned long)lrc);
 	return err;
 }
 
@@ -5249,6 +613,10 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
 		err = emit_semaphore_signal(engine->kernel_context, slot);
 		if (err)
 			goto err_rq;
+
+		err = wait_for_submit(engine, rq, HZ / 2);
+		if (err)
+			goto err_rq;
 	} else {
 		slot[0] = 1;
 		wmb();
@@ -6242,16 +1610,17 @@ static void garbage_reset(struct intel_engine_cs *engine,
 	const unsigned int bit = I915_RESET_ENGINE + engine->id;
 	unsigned long *lock = &engine->gt->reset.flags;
 
-	if (test_and_set_bit(bit, lock))
-		return;
-
-	tasklet_disable(&engine->execlists.tasklet);
+	local_bh_disable();
+	if (!test_and_set_bit(bit, lock)) {
+		tasklet_disable(&engine->execlists.tasklet);
 
-	if (!rq->fence.error)
-		intel_engine_reset(engine, NULL);
+		if (!rq->fence.error)
+			__intel_engine_reset_bh(engine, NULL);
 
-	tasklet_enable(&engine->execlists.tasklet);
-	clear_and_wake_up_bit(bit, lock);
+		tasklet_enable(&engine->execlists.tasklet);
+		clear_and_wake_up_bit(bit, lock);
+	}
+	local_bh_enable();
 }
 
 static struct i915_request *garbage(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index b25eba50c88e0759614ef75cacd59919f802352f..cf373c72359e53ee95b00dcd4f0cafe473fe83cb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -5,6 +5,7 @@
  */
 
 #include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
 #include "i915_selftest.h"
 
 #include "gem/selftests/mock_context.h"
@@ -56,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
 	return err;
 }
 
-static struct i915_vma *create_scratch(struct intel_gt *gt)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int err;
-
-	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		i915_gem_object_put(obj);
-		return vma;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
-	if (err) {
-		i915_gem_object_put(obj);
-		return ERR_PTR(err);
-	}
-
-	return vma;
-}
-
 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
 {
 	struct drm_i915_mocs_table table;
@@ -101,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
 	if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
 		arg->mocs = table;
 
-	arg->scratch = create_scratch(gt);
+	arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
 	if (IS_ERR(arg->scratch))
 		return PTR_ERR(arg->scratch);
 
@@ -125,7 +99,7 @@ static void live_mocs_fini(struct live_mocs *arg)
 
 static int read_regs(struct i915_request *rq,
 		     u32 addr, unsigned int count,
-		     uint32_t *offset)
+		     u32 *offset)
 {
 	unsigned int i;
 	u32 *cs;
@@ -153,7 +127,7 @@ static int read_regs(struct i915_request *rq,
 
 static int read_mocs_table(struct i915_request *rq,
 			   const struct drm_i915_mocs_table *table,
-			   uint32_t *offset)
+			   u32 *offset)
 {
 	u32 addr;
 
@@ -167,7 +141,7 @@ static int read_mocs_table(struct i915_request *rq,
 
 static int read_l3cc_table(struct i915_request *rq,
 			   const struct drm_i915_mocs_table *table,
-			   uint32_t *offset)
+			   u32 *offset)
 {
 	u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
 
@@ -176,7 +150,7 @@ static int read_l3cc_table(struct i915_request *rq,
 
 static int check_mocs_table(struct intel_engine_cs *engine,
 			    const struct drm_i915_mocs_table *table,
-			    uint32_t **vaddr)
+			    u32 **vaddr)
 {
 	unsigned int i;
 	u32 expect;
@@ -205,7 +179,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
 
 static int check_l3cc_table(struct intel_engine_cs *engine,
 			    const struct drm_i915_mocs_table *table,
-			    uint32_t **vaddr)
+			    u32 **vaddr)
 {
 	/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
 	u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -361,29 +335,34 @@ static int active_engine_reset(struct intel_context *ce,
 static int __live_mocs_reset(struct live_mocs *mocs,
 			     struct intel_context *ce)
 {
+	struct intel_gt *gt = ce->engine->gt;
 	int err;
 
-	err = intel_engine_reset(ce->engine, "mocs");
-	if (err)
-		return err;
+	if (intel_has_reset_engine(gt)) {
+		err = intel_engine_reset(ce->engine, "mocs");
+		if (err)
+			return err;
 
-	err = check_mocs_engine(mocs, ce);
-	if (err)
-		return err;
+		err = check_mocs_engine(mocs, ce);
+		if (err)
+			return err;
 
-	err = active_engine_reset(ce, "mocs");
-	if (err)
-		return err;
+		err = active_engine_reset(ce, "mocs");
+		if (err)
+			return err;
 
-	err = check_mocs_engine(mocs, ce);
-	if (err)
-		return err;
+		err = check_mocs_engine(mocs, ce);
+		if (err)
+			return err;
+	}
 
-	intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+	if (intel_has_gpu_reset(gt)) {
+		intel_gt_reset(gt, ce->engine->mask, "mocs");
 
-	err = check_mocs_engine(mocs, ce);
-	if (err)
-		return err;
+		err = check_mocs_engine(mocs, ce);
+		if (err)
+			return err;
+	}
 
 	return 0;
 }
@@ -398,9 +377,6 @@ static int live_mocs_reset(void *arg)
 
 	/* Check the mocs setup is retained over per-engine and global resets */
 
-	if (!intel_has_reset_engine(gt))
-		return 0;
-
 	err = live_mocs_init(&mocs, gt);
 	if (err)
 		return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 64ef5ee5decf261c6c49076ff33be1dc58f5b2f8..61abc05566019e2fa5a17da266b2c612b12790fb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -6,6 +6,7 @@
 
 #include "intel_context.h"
 #include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
 #include "intel_gt_requests.h"
 #include "intel_ring.h"
 #include "selftest_rc6.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index ef5aeebbeeb066fa7ee92622d0cb8d8560c48c90..8784257ec8085837be65557cd59a3038558a2313 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,6 +9,7 @@
 
 #include "i915_memcpy.h"
 #include "i915_selftest.h"
+#include "intel_gpu_commands.h"
 #include "selftests/igt_reset.h"
 #include "selftests/igt_atomic.h"
 #include "selftests/igt_spinner.h"
@@ -95,10 +96,10 @@ __igt_reset_stolen(struct intel_gt *gt,
 		if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
 					     page << PAGE_SHIFT,
 					     ((page + 1) << PAGE_SHIFT) - 1))
-			memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+			memset_io(s, STACK_MAGIC, PAGE_SIZE);
 
-		in = s;
-		if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+		in = (void __force *)s;
+		if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
 			in = tmp;
 		crc[page] = crc32_le(0, in, PAGE_SIZE);
 
@@ -133,8 +134,8 @@ __igt_reset_stolen(struct intel_gt *gt,
 				      ggtt->error_capture.start,
 				      PAGE_SIZE);
 
-		in = s;
-		if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+		in = (void __force *)s;
+		if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
 			in = tmp;
 		x = crc32_le(0, in, PAGE_SIZE);
 
@@ -320,17 +321,25 @@ static int igt_atomic_engine_reset(void *arg)
 		goto out_unlock;
 
 	for_each_engine(engine, gt, id) {
-		tasklet_disable(&engine->execlists.tasklet);
+		struct tasklet_struct *t = &engine->execlists.tasklet;
+
+		if (t->func)
+			tasklet_disable(t);
 		intel_engine_pm_get(engine);
 
 		for (p = igt_atomic_phases; p->name; p++) {
 			GEM_TRACE("intel_engine_reset(%s) under %s\n",
 				  engine->name, p->name);
+			if (strcmp(p->name, "softirq"))
+				local_bh_disable();
 
 			p->critical_section_begin();
-			err = intel_engine_reset(engine, NULL);
+			err = __intel_engine_reset_bh(engine, NULL);
 			p->critical_section_end();
 
+			if (strcmp(p->name, "softirq"))
+				local_bh_enable();
+
 			if (err) {
 				pr_err("intel_engine_reset(%s) failed under %s\n",
 				       engine->name, p->name);
@@ -339,7 +348,10 @@ static int igt_atomic_engine_reset(void *arg)
 		}
 
 		intel_engine_pm_put(engine);
-		tasklet_enable(&engine->execlists.tasklet);
+		if (t->func) {
+			tasklet_enable(t);
+			tasklet_hi_schedule(t);
+		}
 		if (err)
 			break;
 	}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index aa5675ecb5ccc81c4759d417eb817e0d451909c0..967641fee42a9305802f4c8e6ef82c7108b11dcf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -185,7 +185,10 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq)
 {
 	mutex_lock(&rps->lock);
 	GEM_BUG_ON(!intel_rps_is_active(rps));
-	intel_rps_set(rps, freq);
+	if (wait_for(!intel_rps_set(rps, freq), 50)) {
+		mutex_unlock(&rps->lock);
+		return 0;
+	}
 	GEM_BUG_ON(rps->last_freq != freq);
 	mutex_unlock(&rps->lock);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 2edf2b15885f7c72e99ef5e71f457bdedbb54158..6f3a3687ef0fa34b733331bea17e207fdc735693 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -9,6 +9,7 @@
 #include "intel_context.h"
 #include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
 #include "intel_gt.h"
 #include "intel_gt_requests.h"
 #include "intel_ring.h"
@@ -1090,12 +1091,6 @@ static int live_hwsp_read(void *arg)
 			}
 			count++;
 
-			if (8 * watcher[1].rq->ring->emit >
-			    3 * watcher[1].rq->ring->size) {
-				i915_request_put(rq);
-				break;
-			}
-
 			/* Flush the timeline before manually wrapping again */
 			if (i915_request_wait(rq,
 					      I915_WAIT_INTERRUPTIBLE,
@@ -1104,9 +1099,14 @@ static int live_hwsp_read(void *arg)
 				i915_request_put(rq);
 				goto out;
 			}
-
 			retire_requests(tl);
 			i915_request_put(rq);
+
+			/* Single requests are limited to half a ring at most */
+			if (8 * watcher[1].rq->ring->emit >
+			    3 * watcher[1].rq->ring->size)
+				break;
+
 		} while (!__igt_timeout(end_time, NULL));
 		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 61a0532d0f3d37a50ec355aac21888e2b6969e1e..2070b91cb6077a7814eea7c40587d76b4b186735 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -95,8 +95,9 @@ reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
 }
 
 static struct drm_i915_gem_object *
-read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+read_nonprivs(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	const u32 base = engine->mmio_base;
 	struct drm_i915_gem_object *result;
 	struct i915_request *rq;
@@ -130,7 +131,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	if (err)
 		goto err_obj;
 
-	rq = igt_request_alloc(ctx, engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_pin;
@@ -145,7 +146,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 		goto err_req;
 
 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
-	if (INTEL_GEN(ctx->i915) >= 8)
+	if (INTEL_GEN(engine->i915) >= 8)
 		srm++;
 
 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
@@ -200,16 +201,16 @@ print_results(const struct intel_engine_cs *engine, const u32 *results)
 	}
 }
 
-static int check_whitelist(struct i915_gem_context *ctx,
-			   struct intel_engine_cs *engine)
+static int check_whitelist(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	struct drm_i915_gem_object *results;
 	struct intel_wedge_me wedge;
 	u32 *vaddr;
 	int err;
 	int i;
 
-	results = read_nonprivs(ctx, engine);
+	results = read_nonprivs(ce);
 	if (IS_ERR(results))
 		return PTR_ERR(results);
 
@@ -293,8 +294,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 					int (*reset)(struct intel_engine_cs *),
 					const char *name)
 {
-	struct drm_i915_private *i915 = engine->i915;
-	struct i915_gem_context *ctx, *tmp;
+	struct intel_context *ce, *tmp;
 	struct igt_spinner spin;
 	intel_wakeref_t wakeref;
 	int err;
@@ -302,15 +302,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
 		engine->whitelist.count, engine->name, name);
 
-	ctx = kernel_context(i915);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
 
 	err = igt_spinner_init(&spin, engine->gt);
 	if (err)
 		goto out_ctx;
 
-	err = check_whitelist(ctx, engine);
+	err = check_whitelist(ce);
 	if (err) {
 		pr_err("Invalid whitelist *before* %s reset!\n", name);
 		goto out_spin;
@@ -330,22 +330,22 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 		goto out_spin;
 	}
 
-	err = check_whitelist(ctx, engine);
+	err = check_whitelist(ce);
 	if (err) {
 		pr_err("Whitelist not preserved in context across %s reset!\n",
 		       name);
 		goto out_spin;
 	}
 
-	tmp = kernel_context(i915);
+	tmp = intel_context_create(engine);
 	if (IS_ERR(tmp)) {
 		err = PTR_ERR(tmp);
 		goto out_spin;
 	}
-	kernel_context_close(ctx);
-	ctx = tmp;
+	intel_context_put(ce);
+	ce = tmp;
 
-	err = check_whitelist(ctx, engine);
+	err = check_whitelist(ce);
 	if (err) {
 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
 		       name);
@@ -355,7 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 out_spin:
 	igt_spinner_fini(&spin);
 out_ctx:
-	kernel_context_close(ctx);
+	intel_context_put(ce);
 	return err;
 }
 
@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce)
 	struct intel_engine_cs *engine = ce->engine;
 	struct i915_vma *scratch;
 	struct i915_vma *batch;
-	int err = 0, i, v;
+	int err = 0, i, v, sz;
 	u32 *cs, *results;
 
-	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
+	sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
+	scratch = __vm_create_scratch_for_read(ce->vm, sz);
 	if (IS_ERR(scratch))
 		return PTR_ERR(scratch);
 
@@ -786,15 +787,15 @@ static int live_reset_whitelist(void *arg)
 	return err;
 }
 
-static int read_whitelisted_registers(struct i915_gem_context *ctx,
-				      struct intel_engine_cs *engine,
+static int read_whitelisted_registers(struct intel_context *ce,
 				      struct i915_vma *results)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	struct i915_request *rq;
 	int i, err = 0;
 	u32 srm, *cs;
 
-	rq = igt_request_alloc(ctx, engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -807,7 +808,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
 		goto err_req;
 
 	srm = MI_STORE_REGISTER_MEM;
-	if (INTEL_GEN(ctx->i915) >= 8)
+	if (INTEL_GEN(engine->i915) >= 8)
 		srm++;
 
 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
@@ -834,18 +835,15 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
 	return request_add_sync(rq, err);
 }
 
-static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
-				       struct intel_engine_cs *engine)
+static int scrub_whitelisted_registers(struct intel_context *ce)
 {
-	struct i915_address_space *vm;
+	struct intel_engine_cs *engine = ce->engine;
 	struct i915_request *rq;
 	struct i915_vma *batch;
 	int i, err = 0;
 	u32 *cs;
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	batch = create_batch(vm);
-	i915_vm_put(vm);
+	batch = create_batch(ce->vm);
 	if (IS_ERR(batch))
 		return PTR_ERR(batch);
 
@@ -873,7 +871,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
 	i915_gem_object_flush_map(batch->obj);
 	intel_gt_chipset_flush(engine->gt);
 
-	rq = igt_request_alloc(ctx, engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_unpin;
@@ -1016,7 +1014,6 @@ static int live_isolated_whitelist(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct {
-		struct i915_gem_context *ctx;
 		struct i915_vma *scratch[2];
 	} client[2] = {};
 	struct intel_engine_cs *engine;
@@ -1032,61 +1029,57 @@ static int live_isolated_whitelist(void *arg)
 		return 0;
 
 	for (i = 0; i < ARRAY_SIZE(client); i++) {
-		struct i915_address_space *vm;
-		struct i915_gem_context *c;
-
-		c = kernel_context(gt->i915);
-		if (IS_ERR(c)) {
-			err = PTR_ERR(c);
-			goto err;
-		}
-
-		vm = i915_gem_context_get_vm_rcu(c);
-
-		client[i].scratch[0] = create_scratch(vm, 1024);
+		client[i].scratch[0] =
+			__vm_create_scratch_for_read(gt->vm, 4096);
 		if (IS_ERR(client[i].scratch[0])) {
 			err = PTR_ERR(client[i].scratch[0]);
-			i915_vm_put(vm);
-			kernel_context_close(c);
 			goto err;
 		}
 
-		client[i].scratch[1] = create_scratch(vm, 1024);
+		client[i].scratch[1] =
+			__vm_create_scratch_for_read(gt->vm, 4096);
 		if (IS_ERR(client[i].scratch[1])) {
 			err = PTR_ERR(client[i].scratch[1]);
 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
-			i915_vm_put(vm);
-			kernel_context_close(c);
 			goto err;
 		}
-
-		client[i].ctx = c;
-		i915_vm_put(vm);
 	}
 
 	for_each_engine(engine, gt, id) {
+		struct intel_context *ce[2];
+
 		if (!engine->kernel_context->vm)
 			continue;
 
 		if (!whitelist_writable_count(engine))
 			continue;
 
+		ce[0] = intel_context_create(engine);
+		if (IS_ERR(ce[0])) {
+			err = PTR_ERR(ce[0]);
+			break;
+		}
+		ce[1] = intel_context_create(engine);
+		if (IS_ERR(ce[1])) {
+			err = PTR_ERR(ce[1]);
+			intel_context_put(ce[0]);
+			break;
+		}
+
 		/* Read default values */
-		err = read_whitelisted_registers(client[0].ctx, engine,
-						 client[0].scratch[0]);
+		err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
 		if (err)
-			goto err;
+			goto err_ce;
 
 		/* Try to overwrite registers (should only affect ctx0) */
-		err = scrub_whitelisted_registers(client[0].ctx, engine);
+		err = scrub_whitelisted_registers(ce[0]);
 		if (err)
-			goto err;
+			goto err_ce;
 
 		/* Read values from ctx1, we expect these to be defaults */
-		err = read_whitelisted_registers(client[1].ctx, engine,
-						 client[1].scratch[0]);
+		err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
 		if (err)
-			goto err;
+			goto err_ce;
 
 		/* Verify that both reads return the same default values */
 		err = check_whitelisted_registers(engine,
@@ -1094,31 +1087,29 @@ static int live_isolated_whitelist(void *arg)
 						  client[1].scratch[0],
 						  result_eq);
 		if (err)
-			goto err;
+			goto err_ce;
 
 		/* Read back the updated values in ctx0 */
-		err = read_whitelisted_registers(client[0].ctx, engine,
-						 client[0].scratch[1]);
+		err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
 		if (err)
-			goto err;
+			goto err_ce;
 
 		/* User should be granted privilege to overwhite regs */
 		err = check_whitelisted_registers(engine,
 						  client[0].scratch[0],
 						  client[0].scratch[1],
 						  result_neq);
+err_ce:
+		intel_context_put(ce[1]);
+		intel_context_put(ce[0]);
 		if (err)
-			goto err;
+			break;
 	}
 
 err:
 	for (i = 0; i < ARRAY_SIZE(client); i++) {
-		if (!client[i].ctx)
-			break;
-
 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
-		kernel_context_close(client[i].ctx);
 	}
 
 	if (igt_flush_test(gt->i915))
@@ -1128,18 +1119,21 @@ static int live_isolated_whitelist(void *arg)
 }
 
 static bool
-verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
 		const char *str)
 {
-	struct drm_i915_private *i915 = ctx->i915;
-	struct i915_gem_engines_iter it;
-	struct intel_context *ce;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	bool ok = true;
 
-	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
+	ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
 
-	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
-		enum intel_engine_id id = ce->engine->id;
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce))
+			return false;
 
 		ok &= engine_wa_list_verify(ce,
 					    &lists->engine[id].wa_list,
@@ -1148,6 +1142,8 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
 		ok &= engine_wa_list_verify(ce,
 					    &lists->engine[id].ctx_wa_list,
 					    str) == 0;
+
+		intel_context_put(ce);
 	}
 
 	return ok;
@@ -1157,7 +1153,6 @@ static int
 live_gpu_reset_workarounds(void *arg)
 {
 	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx;
 	intel_wakeref_t wakeref;
 	struct wa_lists lists;
 	bool ok;
@@ -1165,12 +1160,6 @@ live_gpu_reset_workarounds(void *arg)
 	if (!intel_has_gpu_reset(gt))
 		return 0;
 
-	ctx = kernel_context(gt->i915);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
-
-	i915_gem_context_lock_engines(ctx);
-
 	pr_info("Verifying after GPU reset...\n");
 
 	igt_global_reset_lock(gt);
@@ -1178,17 +1167,15 @@ live_gpu_reset_workarounds(void *arg)
 
 	reference_lists_init(gt, &lists);
 
-	ok = verify_wa_lists(ctx, &lists, "before reset");
+	ok = verify_wa_lists(gt, &lists, "before reset");
 	if (!ok)
 		goto out;
 
 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
 
-	ok = verify_wa_lists(ctx, &lists, "after reset");
+	ok = verify_wa_lists(gt, &lists, "after reset");
 
 out:
-	i915_gem_context_unlock_engines(ctx);
-	kernel_context_close(ctx);
 	reference_lists_fini(gt, &lists);
 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 	igt_global_reset_unlock(gt);
@@ -1200,8 +1187,8 @@ static int
 live_engine_reset_workarounds(void *arg)
 {
 	struct intel_gt *gt = arg;
-	struct i915_gem_engines_iter it;
-	struct i915_gem_context *ctx;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct intel_context *ce;
 	struct igt_spinner spin;
 	struct i915_request *rq;
@@ -1212,30 +1199,30 @@ live_engine_reset_workarounds(void *arg)
 	if (!intel_has_reset_engine(gt))
 		return 0;
 
-	ctx = kernel_context(gt->i915);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
-
 	igt_global_reset_lock(gt);
 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 
 	reference_lists_init(gt, &lists);
 
-	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
-		struct intel_engine_cs *engine = ce->engine;
+	for_each_engine(engine, gt, id) {
 		bool ok;
 
 		pr_info("Verifying after %s reset...\n", engine->name);
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			ret = PTR_ERR(ce);
+			break;
+		}
 
-		ok = verify_wa_lists(ctx, &lists, "before reset");
+		ok = verify_wa_lists(gt, &lists, "before reset");
 		if (!ok) {
 			ret = -ESRCH;
 			goto err;
 		}
 
-		intel_engine_reset(engine, "live_workarounds");
+		intel_engine_reset(engine, "live_workarounds:idle");
 
-		ok = verify_wa_lists(ctx, &lists, "after idle reset");
+		ok = verify_wa_lists(gt, &lists, "after idle reset");
 		if (!ok) {
 			ret = -ESRCH;
 			goto err;
@@ -1259,23 +1246,26 @@ live_engine_reset_workarounds(void *arg)
 			goto err;
 		}
 
-		intel_engine_reset(engine, "live_workarounds");
+		intel_engine_reset(engine, "live_workarounds:active");
 
 		igt_spinner_end(&spin);
 		igt_spinner_fini(&spin);
 
-		ok = verify_wa_lists(ctx, &lists, "after busy reset");
+		ok = verify_wa_lists(gt, &lists, "after busy reset");
 		if (!ok) {
 			ret = -ESRCH;
 			goto err;
 		}
-	}
+
 err:
-	i915_gem_context_unlock_engines(ctx);
+		intel_context_put(ce);
+		if (ret)
+			break;
+	}
+
 	reference_lists_fini(gt, &lists);
 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 	igt_global_reset_unlock(gt);
-	kernel_context_close(ctx);
 
 	igt_flush_test(gt->i915);
 
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 5982b62f913d1d953c09128647c5fa9e60aab810..a4d8fc9e2374cae3c8d17e61b65a6cbe2847b223 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 	struct file *file;
 	void *ptr;
 
-	if (obj->ops == &i915_gem_shmem_ops) {
+	if (i915_gem_object_is_shmem(obj)) {
 		file = obj->base.filp;
 		atomic_long_inc(&file->f_count);
 		return file;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2a343a97798720cbdff68093df0a02a6b998a659..4545e90e3bf191749a5ce265a309903046e3f1f6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -579,20 +579,8 @@ int intel_guc_reset_engine(struct intel_guc *guc,
  */
 int intel_guc_resume(struct intel_guc *guc)
 {
-	u32 action[] = {
-		INTEL_GUC_ACTION_EXIT_S_STATE,
-		GUC_POWER_D0,
-	};
-
-	/*
-	 * If GuC communication is enabled but submission is not supported,
-	 * we do not need to resume the GuC but we do need to enable the
-	 * GuC communication on resume (above).
-	 */
-	if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
-		return 0;
-
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	/* XXX: to be implemented with submission interface rework */
+	return 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e84ab67b317d76aff9cbb676bcf948493b4a2510..bc2ba7d0626c617fc4153038a2538fe2b0f1dc2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,13 +47,6 @@ struct intel_guc {
 	struct i915_vma *stage_desc_pool;
 	void *stage_desc_pool_vaddr;
 
-	struct i915_vma *workqueue;
-	void *workqueue_vaddr;
-	spinlock_t wq_lock;
-
-	struct i915_vma *proc_desc;
-	void *proc_desc_vaddr;
-
 	/* Control params for fw initialization */
 	u32 params[GUC_CTL_MAX_DWORDS];
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 5212ff844292adc538c92a01697931451343cc24..17526717368c4bdd0597f3cd31f5be7980999ea3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -4,6 +4,7 @@
  */
 
 #include "gt/intel_gt.h"
+#include "gt/intel_lrc.h"
 #include "intel_guc_ads.h"
 #include "intel_uc.h"
 #include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index f9d0907ea1a540433102e74b50e7ef6aa0d6770a..2270d6b3b27201a1ad5a9de4782cb1151c0ff8e4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -76,7 +76,6 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
 
 static int guc_wait_ucode(struct intel_uncore *uncore)
 {
-	struct drm_device *drm = &uncore->i915->drm;
 	u32 status;
 	int ret;
 
@@ -89,11 +88,11 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
 	 * attempt the ucode load again if this happens.)
 	 */
 	ret = wait_for(guc_ready(uncore, &status), 100);
-	DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
 	if (ret) {
-		drm_err(drm, "GuC load failed: status = 0x%08X\n", status);
-		drm_err(drm, "GuC load failed: status: Reset = %d, "
+		struct drm_device *drm = &uncore->i915->drm;
+
+		drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
+		drm_dbg(drm, "GuC load failed: status: Reset = %d, "
 			"BootROM = 0x%02X, UKernel = 0x%02X, "
 			"MIA = 0x%02X, Auth = 0x%02X\n",
 			REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -103,12 +102,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
 			REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
 
 		if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
-			drm_err(drm, "GuC firmware signature verification failed\n");
+			drm_dbg(drm, "GuC firmware signature verification failed\n");
 			ret = -ENOEXEC;
 		}
 
 		if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
-			drm_err(drm, "GuC firmware exception. EIP: %#x\n",
+			drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
 				intel_uncore_read(uncore, SOFT_SCRATCH(13)));
 			ret = -ENXIO;
 		}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fdfeb4b9b0f54cdc18e6772fea40a467d7fc6eaa..23dc0aeaa0ab4b0e375d4b611710577d24ecf549 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,11 +6,14 @@
 #include <linux/circ_buf.h>
 
 #include "gem/i915_gem_context.h"
+#include "gt/gen8_engine_cs.h"
+#include "gt/intel_breadcrumbs.h"
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_mocs.h"
 #include "gt/intel_ring.h"
 
 #include "intel_guc_submission.h"
@@ -54,6 +57,8 @@
  *
  */
 
+#define GUC_REQUEST_SIZE 64 /* bytes */
+
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
 	return rb_entry(rb, struct i915_priolist, node);
@@ -66,58 +71,6 @@ static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
 	return &base[id];
 }
 
-static int guc_workqueue_create(struct intel_guc *guc)
-{
-	return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
-					      &guc->workqueue_vaddr);
-}
-
-static void guc_workqueue_destroy(struct intel_guc *guc)
-{
-	i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static int guc_proc_desc_create(struct intel_guc *guc)
-{
-	const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
-
-	return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
-					      &guc->proc_desc_vaddr);
-}
-
-static void guc_proc_desc_destroy(struct intel_guc *guc)
-{
-	i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
-}
-
-static void guc_proc_desc_init(struct intel_guc *guc)
-{
-	struct guc_process_desc *desc;
-
-	desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
-
-	/*
-	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
-	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
-	 * space for kernel clients (map on demand instead? May make debug
-	 * easier to have it mapped).
-	 */
-	desc->wq_base_addr = 0;
-	desc->db_base_addr = 0;
-
-	desc->wq_size_bytes = GUC_WQ_SIZE;
-	desc->wq_status = WQ_STATUS_ACTIVE;
-	desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
-}
-
-static void guc_proc_desc_fini(struct intel_guc *guc)
-{
-	memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
-}
-
 static int guc_stage_desc_pool_create(struct intel_guc *guc)
 {
 	u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
@@ -153,8 +106,6 @@ static void guc_stage_desc_init(struct intel_guc *guc)
 	desc->stage_id = 0;
 	desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
 
-	desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
-	desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
 	desc->wq_size = GUC_WQ_SIZE;
 }
 
@@ -166,62 +117,9 @@ static void guc_stage_desc_fini(struct intel_guc *guc)
 	memset(desc, 0, sizeof(*desc));
 }
 
-/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc *guc,
-			       u32 target_engine, u32 context_desc,
-			       u32 ring_tail, u32 fence_id)
-{
-	/* wqi_len is in DWords, and does not include the one-word header */
-	const size_t wqi_size = sizeof(struct guc_wq_item);
-	const u32 wqi_len = wqi_size / sizeof(u32) - 1;
-	struct guc_process_desc *desc = guc->proc_desc_vaddr;
-	struct guc_wq_item *wqi;
-	u32 wq_off;
-
-	lockdep_assert_held(&guc->wq_lock);
-
-	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
-	 * should not have the case where structure wqi is across page, neither
-	 * wrapped to the beginning. This simplifies the implementation below.
-	 *
-	 * XXX: if not the case, we need save data to a temp wqi and copy it to
-	 * workqueue buffer dw by dw.
-	 */
-	BUILD_BUG_ON(wqi_size != 16);
-
-	/* We expect the WQ to be active if we're appending items to it */
-	GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
-
-	/* Free space is guaranteed. */
-	wq_off = READ_ONCE(desc->tail);
-	GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
-			      GUC_WQ_SIZE) < wqi_size);
-	GEM_BUG_ON(wq_off & (wqi_size - 1));
-
-	wqi = guc->workqueue_vaddr + wq_off;
-
-	/* Now fill in the 4-word work queue item */
-	wqi->header = WQ_TYPE_INORDER |
-		      (wqi_len << WQ_LEN_SHIFT) |
-		      (target_engine << WQ_TARGET_SHIFT) |
-		      WQ_NO_WCFLUSH_WAIT;
-	wqi->context_desc = context_desc;
-	wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
-	GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
-	wqi->fence_id = fence_id;
-
-	/* Make the update visible to GuC */
-	WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
-}
-
 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
-	u32 ctx_desc = rq->context->lrc.ccid;
-	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
-
-	guc_wq_item_append(guc, engine->guc_id, ctx_desc,
-			   ring_tail, rq->fence.seqno);
+	/* Leaving stub as this function will be used in future patches */
 }
 
 /*
@@ -244,16 +142,12 @@ static void guc_submit(struct intel_engine_cs *engine,
 {
 	struct intel_guc *guc = &engine->gt->uc.guc;
 
-	spin_lock(&guc->wq_lock);
-
 	do {
 		struct i915_request *rq = *out++;
 
 		flush_ggtt_writes(rq->ring->vma);
 		guc_add_request(guc, rq);
 	} while (out != end);
-
-	spin_unlock(&guc->wq_lock);
 }
 
 static inline int rq_prio(const struct i915_request *rq)
@@ -388,17 +282,26 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
 	__tasklet_disable_sync_once(&execlists->tasklet);
 }
 
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void guc_reset_state(struct intel_context *ce,
+			    struct intel_engine_cs *engine,
+			    u32 head,
+			    bool scrub)
 {
-	struct i915_request * const *port, *rq;
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
 
-	/* Note we are only using the inflight and not the pending queue */
+	/*
+	 * We want a simple context + ring to execute the breadcrumb update.
+	 * We cannot rely on the context being intact across the GPU hang,
+	 * so clear it and rebuild just what we need for the breadcrumb.
+	 * All pending requests for this context will be zapped, and any
+	 * future request will be after userspace has had the opportunity
+	 * to recreate its own state.
+	 */
+	if (scrub)
+		lrc_init_regs(ce, engine, true);
 
-	for (port = execlists->active; (rq = *port); port++)
-		schedule_out(rq);
-	execlists->active =
-		memset(execlists->inflight, 0, sizeof(execlists->inflight));
+	/* Rerun the request; its payload has been neutered (if guilty). */
+	lrc_update_regs(ce, engine, head);
 }
 
 static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -409,8 +312,6 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
 	spin_lock_irqsave(&engine->active.lock, flags);
 
-	cancel_port_requests(execlists);
-
 	/* Push back any incomplete requests for replay after the reset. */
 	rq = execlists_unwind_incomplete_requests(execlists);
 	if (!rq)
@@ -420,7 +321,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 		stalled = false;
 
 	__i915_request_reset(rq, stalled);
-	intel_lr_context_reset(engine, rq->context, rq->head, stalled);
+	guc_reset_state(rq->context, engine, rq->head, stalled);
 
 out_unlock:
 	spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -451,9 +352,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 	 */
 	spin_lock_irqsave(&engine->active.lock, flags);
 
-	/* Cancel the requests on the HW and clear the ELSP tracker. */
-	cancel_port_requests(execlists);
-
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
 		i915_request_set_error_once(rq, -EIO);
@@ -496,12 +394,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
 		     atomic_read(&execlists->tasklet.count));
 }
 
-/*
- * Everything below here is concerned with setup & teardown, and is
- * therefore not part of the somewhat time-critical batch-submission
- * path of guc_submit() above.
- */
-
 /*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
@@ -522,30 +414,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	 */
 	GEM_BUG_ON(!guc->stage_desc_pool);
 
-	ret = guc_workqueue_create(guc);
-	if (ret)
-		goto err_pool;
-
-	ret = guc_proc_desc_create(guc);
-	if (ret)
-		goto err_workqueue;
-
-	spin_lock_init(&guc->wq_lock);
-
 	return 0;
-
-err_workqueue:
-	guc_workqueue_destroy(guc);
-err_pool:
-	guc_stage_desc_pool_destroy(guc);
-	return ret;
 }
 
 void intel_guc_submission_fini(struct intel_guc *guc)
 {
 	if (guc->stage_desc_pool) {
-		guc_proc_desc_destroy(guc);
-		guc_workqueue_destroy(guc);
 		guc_stage_desc_pool_destroy(guc);
 	}
 }
@@ -576,33 +450,186 @@ static void guc_interrupts_release(struct intel_gt *gt)
 	intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
 }
 
-static void guc_set_default_submission(struct intel_engine_cs *engine)
+static int guc_context_alloc(struct intel_context *ce)
+{
+	return lrc_alloc(ce, ce->engine);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+			       struct i915_gem_ww_ctx *ww,
+			       void **vaddr)
+{
+	return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
 {
+	return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+	.alloc = guc_context_alloc,
+
+	.pre_pin = guc_context_pre_pin,
+	.pin = guc_context_pin,
+	.unpin = lrc_unpin,
+	.post_unpin = lrc_post_unpin,
+
+	.enter = intel_context_enter_engine,
+	.exit = intel_context_exit_engine,
+
+	.reset = lrc_reset,
+	.destroy = lrc_destroy,
+};
+
+static int guc_request_alloc(struct i915_request *request)
+{
+	int ret;
+
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
 	/*
-	 * We inherit a bunch of functions from execlists that we'd like
-	 * to keep using:
-	 *
-	 *    engine->submit_request = execlists_submit_request;
-	 *    engine->cancel_requests = execlists_cancel_requests;
-	 *    engine->schedule = execlists_schedule;
+	 * Flush enough space to reduce the likelihood of waiting after
+	 * we start building the request - in which case we will just
+	 * have to repeat work.
+	 */
+	request->reserved_space += GUC_REQUEST_SIZE;
+
+	/*
+	 * Note that after this point, we have committed to using
+	 * this request as it is being used to both track the
+	 * state of engine initialisation and liveness of the
+	 * golden renderstate above. Think twice before you try
+	 * to cancel/unwind this request now.
+	 */
+
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+	if (ret)
+		return ret;
+
+	request->reserved_space -= GUC_REQUEST_SIZE;
+	return 0;
+}
+
+static inline void queue_request(struct intel_engine_cs *engine,
+				 struct i915_request *rq,
+				 int prio)
+{
+	GEM_BUG_ON(!list_empty(&rq->sched.link));
+	list_add_tail(&rq->sched.link,
+		      i915_sched_lookup_priolist(engine, prio));
+	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+	struct intel_engine_cs *engine = rq->engine;
+	unsigned long flags;
+
+	/* Will be called from irq-context when using foreign fences. */
+	spin_lock_irqsave(&engine->active.lock, flags);
+
+	queue_request(engine, rq, rq_prio(rq));
+
+	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+	GEM_BUG_ON(list_empty(&rq->sched.link));
+
+	tasklet_hi_schedule(&engine->execlists.tasklet);
+
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+	struct intel_timeline *tl;
+
+	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+		intel_timeline_reset_seqno(tl);
+}
+
+static void guc_sanitize(struct intel_engine_cs *engine)
+{
+	/*
+	 * Poison residual state on resume, in case the suspend didn't!
 	 *
-	 * But we need to override the actual submission backend in order
-	 * to talk to the GuC.
+	 * We have to assume that across suspend/resume (or other loss
+	 * of control) that the contents of our pinned buffers has been
+	 * lost, replaced by garbage. Since this doesn't always happen,
+	 * let's poison such state so that we more quickly spot when
+	 * we falsely assume it has been preserved.
 	 */
-	intel_execlists_set_default_submission(engine);
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
 
-	engine->execlists.tasklet.func = guc_submission_tasklet;
+	/*
+	 * The kernel_context HWSP is stored in the status_page. As above,
+	 * that may be lost on resume/initialisation, and so we need to
+	 * reset the value in the HWSP.
+	 */
+	sanitize_hwsp(engine);
 
-	/* do not use execlists park/unpark */
-	engine->park = engine->unpark = NULL;
+	/* And scrub the dirty cachelines for the HWSP */
+	clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void setup_hwsp(struct intel_engine_cs *engine)
+{
+	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+	ENGINE_WRITE_FW(engine,
+			RING_HWS_PGA,
+			i915_ggtt_offset(engine->status_page.vma));
+}
+
+static void start_engine(struct intel_engine_cs *engine)
+{
+	ENGINE_WRITE_FW(engine,
+			RING_MODE_GEN7,
+			_MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+
+	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+	ENGINE_POSTING_READ(engine, RING_MI_MODE);
+}
+
+static int guc_resume(struct intel_engine_cs *engine)
+{
+	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+	intel_mocs_init_engine(engine);
+
+	intel_breadcrumbs_reset(engine->breadcrumbs);
+
+	setup_hwsp(engine);
+	start_engine(engine);
+
+	return 0;
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+	engine->submit_request = guc_submit_request;
+	engine->schedule = i915_schedule;
+	engine->execlists.tasklet.func = guc_submission_tasklet;
 
 	engine->reset.prepare = guc_reset_prepare;
 	engine->reset.rewind = guc_reset_rewind;
 	engine->reset.cancel = guc_reset_cancel;
 	engine->reset.finish = guc_reset_finish;
 
-	engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
 	engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+	/*
+	 * TODO: GuC supports timeslicing and semaphores as well, but they're
+	 * handled by the firmware so some minor tweaks are required before
+	 * enabling.
+	 *
+	 * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+	 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+	 */
+
+	engine->emit_bb_start = gen8_emit_bb_start;
 
 	/*
 	 * For the breadcrumb irq to work we need the interrupts to stay
@@ -613,35 +640,92 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
 	GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
 }
 
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_release(struct intel_engine_cs *engine)
 {
-	struct intel_gt *gt = guc_to_gt(guc);
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
+	engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+	tasklet_kill(&engine->execlists.tasklet);
+
+	intel_engine_cleanup_common(engine);
+	lrc_fini_wa_ctx(engine);
+}
+
+static void guc_default_vfuncs(struct intel_engine_cs *engine)
+{
+	/* Default vfuncs which can be overridden by each engine. */
+
+	engine->resume = guc_resume;
+
+	engine->cops = &guc_context_ops;
+	engine->request_alloc = guc_request_alloc;
+
+	engine->emit_flush = gen8_emit_flush_xcs;
+	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+	if (INTEL_GEN(engine->i915) >= 12) {
+		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+		engine->emit_flush = gen12_emit_flush_xcs;
+	}
+	engine->set_default_submission = guc_set_default_submission;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+	switch (INTEL_GEN(engine->i915)) {
+	case 12:
+		engine->emit_flush = gen12_emit_flush_rcs;
+		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+		break;
+	case 11:
+		engine->emit_flush = gen11_emit_flush_rcs;
+		engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+		break;
+	default:
+		engine->emit_flush = gen8_emit_flush_rcs;
+		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+		break;
+	}
+}
+
+static inline void guc_default_irqs(struct intel_engine_cs *engine)
+{
+	engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+}
+
+int intel_guc_submission_setup(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *i915 = engine->i915;
 
 	/*
-	 * We're using GuC work items for submitting work through GuC. Since
-	 * we're coalescing multiple requests from a single context into a
-	 * single work item prior to assigning it to execlist_port, we can
-	 * never have more work items than the total number of ports (for all
-	 * engines). The GuC firmware is controlling the HEAD of work queue,
-	 * and it is guaranteed that it will remove the work item from the
-	 * queue before our request is completed.
+	 * The setup relies on several assumptions (e.g. irqs always enabled)
+	 * that are only valid on gen11+
 	 */
-	BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
-		     sizeof(struct guc_wq_item) *
-		     I915_NUM_ENGINES > GUC_WQ_SIZE);
+	GEM_BUG_ON(INTEL_GEN(i915) < 11);
+
+	tasklet_init(&engine->execlists.tasklet,
+		     guc_submission_tasklet, (unsigned long)engine);
+
+	guc_default_vfuncs(engine);
+	guc_default_irqs(engine);
 
-	guc_proc_desc_init(guc);
+	if (engine->class == RENDER_CLASS)
+		rcs_submission_override(engine);
+
+	lrc_init_wa_ctx(engine);
+
+	/* Finally, take ownership and responsibility for cleanup! */
+	engine->sanitize = guc_sanitize;
+	engine->release = guc_release;
+
+	return 0;
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
 	guc_stage_desc_init(guc);
 
 	/* Take over from manual control of ELSP (execlists) */
-	guc_interrupts_capture(gt);
-
-	for_each_engine(engine, gt, id) {
-		engine->set_default_submission = guc_set_default_submission;
-		engine->set_default_submission(engine);
-	}
+	guc_interrupts_capture(guc_to_gt(guc));
 }
 
 void intel_guc_submission_disable(struct intel_guc *guc)
@@ -655,7 +739,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
 	guc_interrupts_release(gt);
 
 	guc_stage_desc_fini(guc);
-	guc_proc_desc_fini(guc);
 }
 
 static bool __guc_submission_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 4cf9d3e5026372c87eb62c745e09948a66678adc..5f7b9e6347d04648956e688a6481976fe2448074 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -19,6 +19,7 @@ void intel_guc_submission_disable(struct intel_guc *guc);
 void intel_guc_submission_fini(struct intel_guc *guc);
 int intel_guc_preempt_work_create(struct intel_guc *guc);
 void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+int intel_guc_submission_setup(struct intel_engine_cs *engine);
 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
 
 static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 4e6070e95fe9c7be9626b56a9ca8327eb03f4423..6abb8f2dc33d6735137de03adda6652c843d6e41 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -15,6 +15,29 @@
 static const struct intel_uc_ops uc_ops_off;
 static const struct intel_uc_ops uc_ops_on;
 
+static void uc_expand_default_options(struct intel_uc *uc)
+{
+	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+	if (i915->params.enable_guc != -1)
+		return;
+
+	/* Don't enable GuC/HuC on pre-Gen12 */
+	if (INTEL_GEN(i915) < 12) {
+		i915->params.enable_guc = 0;
+		return;
+	}
+
+	/* Don't enable GuC/HuC on older Gen12 platforms */
+	if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+		i915->params.enable_guc = 0;
+		return;
+	}
+
+	/* Default: enable HuC authentication only */
+	i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+}
+
 /* Reset GuC providing us with fresh state for both GuC and HuC.
  */
 static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -52,9 +75,6 @@ static void __confirm_options(struct intel_uc *uc)
 		yesno(intel_uc_wants_guc_submission(uc)),
 		yesno(intel_uc_wants_huc(uc)));
 
-	if (i915->params.enable_guc == -1)
-		return;
-
 	if (i915->params.enable_guc == 0) {
 		GEM_BUG_ON(intel_uc_wants_guc(uc));
 		GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
@@ -79,8 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
 			 "Incompatible option enable_guc=%d - %s\n",
 			 i915->params.enable_guc, "GuC submission is N/A");
 
-	if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
-					  ENABLE_GUC_LOAD_HUC))
+	if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
 		drm_info(&i915->drm,
 			 "Incompatible option enable_guc=%d - %s\n",
 			 i915->params.enable_guc, "undocumented flag");
@@ -88,6 +107,8 @@ static void __confirm_options(struct intel_uc *uc)
 
 void intel_uc_init_early(struct intel_uc *uc)
 {
+	uc_expand_default_options(uc);
+
 	intel_guc_init_early(&uc->guc);
 	intel_huc_init_early(&uc->huc);
 
@@ -175,19 +196,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
 
 static void guc_handle_mmio_msg(struct intel_guc *guc)
 {
-	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
 	/* we need communication to be enabled to reply to GuC */
 	GEM_BUG_ON(!guc_communication_enabled(guc));
 
-	if (!guc->mmio_msg)
-		return;
-
-	spin_lock_irq(&i915->irq_lock);
-	intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
-	spin_unlock_irq(&i915->irq_lock);
-
-	guc->mmio_msg = 0;
+	spin_lock_irq(&guc->irq_lock);
+	if (guc->mmio_msg) {
+		intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+		guc->mmio_msg = 0;
+	}
+	spin_unlock_irq(&guc->irq_lock);
 }
 
 static void guc_reset_interrupts(struct intel_guc *guc)
@@ -207,7 +224,8 @@ static void guc_disable_interrupts(struct intel_guc *guc)
 
 static int guc_enable_communication(struct intel_guc *guc)
 {
-	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+	struct intel_gt *gt = guc_to_gt(guc);
+	struct drm_i915_private *i915 = gt->i915;
 	int ret;
 
 	GEM_BUG_ON(guc_communication_enabled(guc));
@@ -227,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
 	guc_enable_interrupts(guc);
 
 	/* check for CT messages received before we enabled interrupts */
-	spin_lock_irq(&i915->irq_lock);
+	spin_lock_irq(&gt->irq_lock);
 	intel_guc_ct_event_handler(&guc->ct);
-	spin_unlock_irq(&i915->irq_lock);
+	spin_unlock_irq(&gt->irq_lock);
 
 	drm_dbg(&i915->drm, "GuC communication enabled\n");
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 602f1a0bc587145849a85a726026f63eebbcfdbf..67b06fde1225957fe276db06e6ada66042eecfa6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -152,16 +152,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
 			uc_fw->path = NULL;
 		}
 	}
-
-	/* We don't want to enable GuC/HuC on pre-Gen11 by default */
-	if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
-		uc_fw->path = NULL;
 }
 
 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
 {
-	if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
-				       ENABLE_GUC_LOAD_HUC))
+	if (i915->params.enable_guc & ENABLE_GUC_MASK)
 		return i915->params.guc_firmware_path;
 	return "";
 }
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 16b582cb97ed708cd494440902b1f168bd45ba73..d54ea0e4681d69b2d6b834e4b2dbcf80e1b1ba0b 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,11 +37,18 @@
 #include <linux/slab.h>
 
 #include "i915_drv.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_lrc.h"
 #include "gt/intel_ring.h"
+#include "gt/intel_gt_requests.h"
 #include "gvt.h"
 #include "i915_pvinfo.h"
 #include "trace.h"
 
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
 #define INVALID_OP    (~0U)
 
 #define OP_LEN_MI           9
@@ -454,6 +461,7 @@ enum {
 	RING_BUFFER_INSTRUCTION,
 	BATCH_BUFFER_INSTRUCTION,
 	BATCH_BUFFER_2ND_LEVEL,
+	RING_BUFFER_CTX,
 };
 
 enum {
@@ -495,6 +503,7 @@ struct parser_exec_state {
 	 */
 	int saved_buf_addr_type;
 	bool is_ctx_wa;
+	bool is_init_ctx;
 
 	const struct cmd_info *info;
 
@@ -708,6 +717,11 @@ static inline u32 cmd_val(struct parser_exec_state *s, int index)
 	return *cmd_ptr(s, index);
 }
 
+static inline bool is_init_ctx(struct parser_exec_state *s)
+{
+	return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
+}
+
 static void parser_exec_state_dump(struct parser_exec_state *s)
 {
 	int cnt = 0;
@@ -721,7 +735,8 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
 
 	gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
 			s->buf_type == RING_BUFFER_INSTRUCTION ?
-			"RING_BUFFER" : "BATCH_BUFFER",
+			"RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
+				"CTX_BUFFER" : "BATCH_BUFFER"),
 			s->buf_addr_type == GTT_BUFFER ?
 			"GTT" : "PPGTT", s->ip_gma);
 
@@ -756,7 +771,8 @@ static inline void update_ip_va(struct parser_exec_state *s)
 	if (WARN_ON(s->ring_head == s->ring_tail))
 		return;
 
-	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+	if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+			s->buf_type == RING_BUFFER_CTX) {
 		unsigned long ring_top = s->ring_start + s->ring_size;
 
 		if (s->ring_head > s->ring_tail) {
@@ -820,68 +836,12 @@ static inline int cmd_length(struct parser_exec_state *s)
 	*addr = val; \
 } while (0)
 
-static bool is_shadowed_mmio(unsigned int offset)
-{
-	bool ret = false;
-
-	if ((offset == 0x2168) || /*BB current head register UDW */
-	    (offset == 0x2140) || /*BB current header register */
-	    (offset == 0x211c) || /*second BB header register UDW */
-	    (offset == 0x2114)) { /*second BB header register UDW */
-		ret = true;
-	}
-	return ret;
-}
-
-static inline bool is_force_nonpriv_mmio(unsigned int offset)
-{
-	return (offset >= 0x24d0 && offset < 0x2500);
-}
-
-static int force_nonpriv_reg_handler(struct parser_exec_state *s,
-		unsigned int offset, unsigned int index, char *cmd)
-{
-	struct intel_gvt *gvt = s->vgpu->gvt;
-	unsigned int data;
-	u32 ring_base;
-	u32 nopid;
-
-	if (!strcmp(cmd, "lri"))
-		data = cmd_val(s, index + 1);
-	else {
-		gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
-			offset, cmd);
-		return -EINVAL;
-	}
-
-	ring_base = s->engine->mmio_base;
-	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
-
-	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
-			data != nopid) {
-		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
-			offset, data);
-		patch_value(s, cmd_ptr(s, index), nopid);
-		return 0;
-	}
-	return 0;
-}
-
 static inline bool is_mocs_mmio(unsigned int offset)
 {
 	return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
 		((offset >= 0xb020) && (offset <= 0xb0a0));
 }
 
-static int mocs_cmd_reg_handler(struct parser_exec_state *s,
-				unsigned int offset, unsigned int index)
-{
-	if (!is_mocs_mmio(offset))
-		return -EINVAL;
-	vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
-	return 0;
-}
-
 static int is_cmd_update_pdps(unsigned int offset,
 			      struct parser_exec_state *s)
 {
@@ -929,6 +889,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 	struct intel_vgpu *vgpu = s->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
 	u32 ctx_sr_ctl;
+	u32 *vreg, vreg_old;
 
 	if (offset + 4 > gvt->device_info.mmio_size) {
 		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -936,34 +897,101 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 		return -EFAULT;
 	}
 
+	if (is_init_ctx(s)) {
+		struct intel_gvt_mmio_info *mmio_info;
+
+		intel_gvt_mmio_set_cmd_accessible(gvt, offset);
+		mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+		if (mmio_info && mmio_info->write)
+			intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
+		return 0;
+	}
+
 	if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
 		gvt_vgpu_err("%s access to non-render register (%x)\n",
 				cmd, offset);
 		return -EBADRQC;
 	}
 
-	if (is_shadowed_mmio(offset)) {
-		gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
-		return 0;
+	if (!strncmp(cmd, "srm", 3) ||
+			!strncmp(cmd, "lrm", 3)) {
+		if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
+				offset != 0x21f0) {
+			gvt_vgpu_err("%s access to register (%x)\n",
+					cmd, offset);
+			return -EPERM;
+		} else
+			return 0;
 	}
 
-	if (is_mocs_mmio(offset) &&
-	    mocs_cmd_reg_handler(s, offset, index))
-		return -EINVAL;
+	if (!strncmp(cmd, "lrr-src", 7) ||
+			!strncmp(cmd, "lrr-dst", 7)) {
+		gvt_vgpu_err("not allowed cmd %s\n", cmd);
+		return -EPERM;
+	}
+
+	if (!strncmp(cmd, "pipe_ctrl", 9)) {
+		/* TODO: add LRI POST logic here */
+		return 0;
+	}
 
-	if (is_force_nonpriv_mmio(offset) &&
-		force_nonpriv_reg_handler(s, offset, index, cmd))
+	if (strncmp(cmd, "lri", 3))
 		return -EPERM;
 
+	/* below are all lri handlers */
+	vreg = &vgpu_vreg(s->vgpu, offset);
+	if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
+		gvt_vgpu_err("%s access to non-render register (%x)\n",
+				cmd, offset);
+		return -EBADRQC;
+	}
+
+	if (is_cmd_update_pdps(offset, s) &&
+	    cmd_pdp_mmio_update_handler(s, offset, index))
+		return -EINVAL;
+
 	if (offset == i915_mmio_reg_offset(DERRMR) ||
 		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
 		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
 		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
 	}
 
-	if (is_cmd_update_pdps(offset, s) &&
-	    cmd_pdp_mmio_update_handler(s, offset, index))
-		return -EINVAL;
+	if (is_mocs_mmio(offset))
+		*vreg = cmd_val(s, index + 1);
+
+	vreg_old = *vreg;
+
+	if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
+		u32 cmdval_new, cmdval;
+		struct intel_gvt_mmio_info *mmio_info;
+
+		cmdval = cmd_val(s, index + 1);
+
+		mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+		if (!mmio_info) {
+			cmdval_new = cmdval;
+		} else {
+			u64 ro_mask = mmio_info->ro_mask;
+			int ret;
+
+			if (likely(!ro_mask))
+				ret = mmio_info->write(s->vgpu, offset,
+						&cmdval, 4);
+			else {
+				gvt_vgpu_err("try to write RO reg %x\n",
+						offset);
+				ret = -EBADRQC;
+			}
+			if (ret)
+				return ret;
+			cmdval_new = *vreg;
+		}
+		if (cmdval_new != cmdval)
+			patch_value(s, cmd_ptr(s, index+1), cmdval_new);
+	}
+
+	/* only patch cmd. restore vreg value if changed in mmio write handler*/
+	*vreg = vreg_old;
 
 	/* TODO
 	 * In order to let workload with inhibit context to generate
@@ -1215,6 +1243,8 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
 		s->buf_type = BATCH_BUFFER_INSTRUCTION;
 		ret = ip_gma_set(s, s->ret_ip_gma_bb);
 		s->buf_addr_type = s->saved_buf_addr_type;
+	} else if (s->buf_type == RING_BUFFER_CTX) {
+		ret = ip_gma_set(s, s->ring_tail);
 	} else {
 		s->buf_type = RING_BUFFER_INSTRUCTION;
 		s->buf_addr_type = GTT_BUFFER;
@@ -2763,7 +2793,8 @@ static int command_scan(struct parser_exec_state *s,
 	gma_bottom = rb_start +  rb_len;
 
 	while (s->ip_gma != gma_tail) {
-		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+		if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+				s->buf_type == RING_BUFFER_CTX) {
 			if (!(s->ip_gma >= rb_start) ||
 				!(s->ip_gma < gma_bottom)) {
 				gvt_vgpu_err("ip_gma %lx out of ring scope."
@@ -3056,6 +3087,172 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	return 0;
 }
 
+/* generate dummy contexts by sending empty requests to HW, and let
+ * the HW to fill Engine Contexts. This dummy contexts are used for
+ * initialization purpose (update reg whitelist), so referred to as
+ * init context here
+ */
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->gt->i915;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
+	struct i915_request *rq;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_request *requests[I915_NUM_ENGINES] = {};
+	bool is_ctx_pinned[I915_NUM_ENGINES] = {};
+	int ret = 0;
+
+	if (gvt->is_reg_whitelist_updated)
+		return;
+
+	for_each_engine(engine, &dev_priv->gt, id) {
+		ret = intel_context_pin(s->shadow[id]);
+		if (ret) {
+			gvt_vgpu_err("fail to pin shadow ctx\n");
+			goto out;
+		}
+		is_ctx_pinned[id] = true;
+
+		rq = i915_request_create(s->shadow[id]);
+		if (IS_ERR(rq)) {
+			gvt_vgpu_err("fail to alloc default request\n");
+			ret = -EIO;
+			goto out;
+		}
+		requests[id] = i915_request_get(rq);
+		i915_request_add(rq);
+	}
+
+	if (intel_gt_wait_for_idle(&dev_priv->gt,
+				I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+		ret = -EIO;
+		goto out;
+	}
+
+	/* scan init ctx to update cmd accessible list */
+	for_each_engine(engine, &dev_priv->gt, id) {
+		int size = engine->context_size - PAGE_SIZE;
+		void *vaddr;
+		struct parser_exec_state s;
+		struct drm_i915_gem_object *obj;
+		struct i915_request *rq;
+
+		rq = requests[id];
+		GEM_BUG_ON(!i915_request_completed(rq));
+		GEM_BUG_ON(!intel_context_is_pinned(rq->context));
+		obj = rq->context->state->obj;
+
+		if (!obj) {
+			ret = -EIO;
+			goto out;
+		}
+
+		i915_gem_object_set_cache_coherency(obj,
+						    I915_CACHE_LLC);
+
+		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(vaddr)) {
+			gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
+				id, PTR_ERR(vaddr));
+			ret = PTR_ERR(vaddr);
+			goto out;
+		}
+
+		s.buf_type = RING_BUFFER_CTX;
+		s.buf_addr_type = GTT_BUFFER;
+		s.vgpu = vgpu;
+		s.engine = engine;
+		s.ring_start = 0;
+		s.ring_size = size;
+		s.ring_head = 0;
+		s.ring_tail = size;
+		s.rb_va = vaddr + start;
+		s.workload = NULL;
+		s.is_ctx_wa = false;
+		s.is_init_ctx = true;
+
+		/* skipping the first RING_CTX_SIZE(0x50) dwords */
+		ret = ip_gma_set(&s, RING_CTX_SIZE);
+		if (ret) {
+			i915_gem_object_unpin_map(obj);
+			goto out;
+		}
+
+		ret = command_scan(&s, 0, size, 0, size);
+		if (ret)
+			gvt_err("Scan init ctx error\n");
+
+		i915_gem_object_unpin_map(obj);
+	}
+
+out:
+	if (!ret)
+		gvt->is_reg_whitelist_updated = true;
+
+	for (id = 0; id < I915_NUM_ENGINES ; id++) {
+		if (requests[id])
+			i915_request_put(requests[id]);
+
+		if (is_ctx_pinned[id])
+			intel_context_unpin(s->shadow[id]);
+	}
+}
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	unsigned long gma_head, gma_tail, gma_start, ctx_size;
+	struct parser_exec_state s;
+	int ring_id = workload->engine->id;
+	struct intel_context *ce = vgpu->submission.shadow[ring_id];
+	int ret;
+
+	GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
+
+	ctx_size = workload->engine->context_size - PAGE_SIZE;
+
+	/* Only ring contxt is loaded to HW for inhibit context, no need to
+	 * scan engine context
+	 */
+	if (is_inhibit_context(ce))
+		return 0;
+
+	gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
+	gma_head = 0;
+	gma_tail = ctx_size;
+
+	s.buf_type = RING_BUFFER_CTX;
+	s.buf_addr_type = GTT_BUFFER;
+	s.vgpu = workload->vgpu;
+	s.engine = workload->engine;
+	s.ring_start = gma_start;
+	s.ring_size = ctx_size;
+	s.ring_head = gma_start + gma_head;
+	s.ring_tail = gma_start + gma_tail;
+	s.rb_va = ce->lrc_reg_state;
+	s.workload = workload;
+	s.is_ctx_wa = false;
+	s.is_init_ctx = false;
+
+	/* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
+	 * context
+	 */
+	ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
+	if (ret)
+		goto out;
+
+	ret = command_scan(&s, gma_head, gma_tail,
+		gma_start, ctx_size);
+out:
+	if (ret)
+		gvt_vgpu_err("scan shadow ctx error\n");
+
+	return ret;
+}
+
 static int init_cmd_table(struct intel_gvt *gvt)
 {
 	unsigned int gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index ab25d151932ac276cb7b29d24bc256d547cbf0d2..416d345e2816ff516fbc58bab1c8ac28d453d3dc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -40,6 +40,7 @@
 
 struct intel_gvt;
 struct intel_shadow_wa_ctx;
+struct intel_vgpu;
 struct intel_vgpu_workload;
 
 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
@@ -50,4 +51,8 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
 
 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
 
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu);
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index d62cd14605a32e333470eeafe945dedc13319b86..84ad74b37d667102bbacbe9e1ee53157221ef3a7 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
 			       const struct intel_engine_cs *engine);
 
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
-			       intel_engine_mask_t engine_mask);
-
 #endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 67b6ede9e707c8111d8575fa96c9ecf1a9b71238..0daa3931aef739c2d70be185003433f692258916 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,6 +38,10 @@
 
 #include <linux/types.h>
 
+#include "display/intel_display.h"
+
+struct intel_vgpu;
+
 #define _PLANE_CTL_FORMAT_SHIFT		24
 #define _PLANE_CTL_TILED_SHIFT		10
 #define _PIPE_V_SRCSZ_SHIFT		0
@@ -98,8 +102,6 @@ enum DDI_PORT {
 	DDI_PORT_E	= 4
 };
 
-struct intel_gvt;
-
 /* color space conversion and gamma correction are not included */
 struct intel_vgpu_primary_plane_format {
 	u8	enabled;	/* plane is enabled */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b0e173f2d9904c8e9ec79b9fb8835d8c117a9660..3bf45672ef9870bcaf786f3b12edb74b40c6b06f 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,10 +34,19 @@
 #ifndef _GVT_GTT_H_
 #define _GVT_GTT_H_
 
-#define I915_GTT_PAGE_SHIFT         12
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+
+#include "gt/intel_gtt.h"
 
+struct intel_gvt;
+struct intel_vgpu;
 struct intel_vgpu_mm;
 
+#define I915_GTT_PAGE_SHIFT         12
+
 #define INTEL_GVT_INVALID_ADDR (~0UL)
 
 struct intel_gvt_gtt_entry {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index cf3578e3f4ddae8a8439de15833c5fa202d08365..03c993d68f105a5d87a4b9cdf5de1383779a8fb0 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -33,6 +33,10 @@
 #ifndef _GVT_H_
 #define _GVT_H_
 
+#include <uapi/linux/pci_regs.h>
+
+#include "i915_drv.h"
+
 #include "debug.h"
 #include "hypercall.h"
 #include "mmio.h"
@@ -244,7 +248,7 @@ struct gvt_mmio_block {
 #define INTEL_GVT_MMIO_HASH_BITS 11
 
 struct intel_gvt_mmio {
-	u8 *mmio_attribute;
+	u16 *mmio_attribute;
 /* Register contains RO bits */
 #define F_RO		(1 << 0)
 /* Register contains graphics address */
@@ -263,6 +267,8 @@ struct intel_gvt_mmio {
  * logical context image
  */
 #define F_SR_IN_CTX	(1 << 7)
+/* Value of command write of this reg needs to be patched */
+#define F_CMD_WRITE_PATCH	(1 << 8)
 
 	struct gvt_mmio_block *mmio_block;
 	unsigned int num_mmio_block;
@@ -329,6 +335,7 @@ struct intel_gvt {
 		u32 *mocs_mmio_offset_list;
 		u32 mocs_mmio_offset_list_cnt;
 	} engine_mmio_list;
+	bool is_reg_whitelist_updated;
 
 	struct dentry *debugfs_root;
 };
@@ -412,6 +419,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
 
+/* ring context size i.e. the first 0x50 dwords*/
+#define RING_CTX_SIZE 320
+
 struct intel_vgpu_creation_params {
 	__u64 handle;
 	__u64 low_gm_sz;  /* in MB */
@@ -683,6 +693,35 @@ static inline void intel_gvt_mmio_set_sr_in_ctx(
 }
 
 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+/**
+ * intel_gvt_mmio_set_cmd_write_patch -
+ *				mark an MMIO if its cmd write needs to be
+ *				patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_write_patch(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
+ * be patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if GPU commmand write to an MMIO should be patched
+ */
+static inline bool intel_gvt_mmio_is_cmd_write_patch(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
+}
+
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_debugfs_init(struct intel_gvt *gvt);
 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa7e75cb3e6afb881af95610c8d9f19297ce750c..6eeaeecb7f852d58039b2c00c2978e4e0e0902f0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -83,7 +83,7 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
 }
 
-static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
 						  unsigned int offset)
 {
 	struct intel_gvt_mmio_info *e;
@@ -96,7 +96,7 @@ static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
 }
 
 static int new_mmio_info(struct intel_gvt *gvt,
-		u32 offset, u8 flags, u32 size,
+		u32 offset, u16 flags, u32 size,
 		u32 addr_mask, u32 ro_mask, u32 device,
 		gvt_mmio_func read, gvt_mmio_func write)
 {
@@ -118,7 +118,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
 			return -ENOMEM;
 
 		info->offset = i;
-		p = find_mmio_info(gvt, info->offset);
+		p = intel_gvt_find_mmio_info(gvt, info->offset);
 		if (p) {
 			WARN(1, "dup mmio definition offset %x\n",
 				info->offset);
@@ -1651,7 +1651,7 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
 	return 0;
 }
 
-/**
+/*
  * FixMe:
  * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
  * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
@@ -1965,7 +1965,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
 	/* RING MODE */
 #define RING_REG(base) _MMIO((base) + 0x29c)
-	MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+	MMIO_RING_DFH(RING_REG, D_ALL,
+		F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
 		ring_mode_mmio_write);
 #undef RING_REG
 
@@ -2885,8 +2886,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
 	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
 	MMIO_D(_MMIO(0xb110), D_BDW);
 
-	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
-		NULL, force_nonpriv_write);
+	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
+		D_BDW_PLUS, NULL, force_nonpriv_write);
 
 	MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
 	MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
@@ -3626,7 +3627,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
 	/*
 	 * Normal tracked MMIOs.
 	 */
-	mmio_info = find_mmio_info(gvt, offset);
+	mmio_info = intel_gvt_find_mmio_info(gvt, offset);
 	if (!mmio_info) {
 		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
 		goto default_rw;
@@ -3686,14 +3687,13 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
 	}
 }
 
-static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
-					  u32 offset, void *data)
+static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
 {
 	struct intel_vgpu *vgpu = data;
 	struct drm_i915_private *dev_priv = gvt->gt->i915;
 
 	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
-		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+		intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index fcd663811d3706e524d73c1afa4f23be93f71bb8..287cd142629ee1b1ac3e630732770c65356d5129 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,7 +32,10 @@
 #ifndef _GVT_INTERRUPT_H_
 #define _GVT_INTERRUPT_H_
 
-#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+
+#include "i915_reg.h"
 
 enum intel_gvt_event_type {
 	RCS_MI_USER_INTERRUPT = 0,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9e862dc73579bb0ab848df67abbc8b3396a37309..7c26af39fbfc9fb9e33031358f92afe09be71e04 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -80,6 +80,9 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
 	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
 	void *data);
 
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+						  unsigned int offset);
+
 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index afe574d6b3b52073c6bea40c4976898726da93a2..c9589e26af936b845854f556fe7b34f33e1e23c5 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
 
 #include "i915_drv.h"
 #include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_ring.h"
 #include "gvt.h"
 #include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 3b25e7fe32f6fad5d8fa786c38b5e5ba28565932..b6b69777af4953fb2db84d54b990517d7881f7d3 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,6 +36,18 @@
 #ifndef __GVT_RENDER_H__
 #define __GVT_RENDER_H__
 
+#include <linux/types.h>
+
+#include "gt/intel_engine_types.h"
+#include "gt/intel_lrc_reg.h"
+#include "i915_reg.h"
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+struct intel_gvt;
+struct intel_vgpu;
+
 struct engine_mmio {
 	enum intel_engine_id id;
 	i915_reg_t reg;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 6f92cde71971ea7aac72fe838f08e5b70009b5c9..550a456e936fc44ee69bad487cf31d4b7596f974 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -33,6 +33,8 @@
 #ifndef _GVT_MPT_H_
 #define _GVT_MPT_H_
 
+#include "gvt.h"
+
 /**
  * DOC: Hypervisor Service APIs for GVT-g Core Logic
  *
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index b58860dee970be01986f462edde7d4a9ff556ab3..244cc7320b541a1096481372461c9b6f677884f1 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -133,4 +133,6 @@
 #define RING_GFX_MODE(base)	_MMIO((base) + 0x29c)
 #define VF_GUARDBAND		_MMIO(0x83a4)
 
+
+#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index aed2ef6466a2db765db71bfa52696d698aa134ee..43f31c2eab14fd85b2f312fd9169d24f1015bdfa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -37,6 +37,8 @@
 
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_lrc.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
@@ -135,6 +137,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	int i;
 	bool skip = false;
 	int ring_id = workload->engine->id;
+	int ret;
 
 	GEM_BUG_ON(!intel_context_is_pinned(ctx));
 
@@ -161,16 +164,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 		COPY_REG(bb_per_ctx_ptr);
 		COPY_REG(rcs_indirect_ctx);
 		COPY_REG(rcs_indirect_ctx_offset);
-	}
+	} else if (workload->engine->id == BCS0)
+		intel_gvt_hypervisor_read_gpa(vgpu,
+				workload->ring_context_gpa +
+				BCS_TILE_REGISTER_VAL_OFFSET,
+				(void *)shadow_ring_context +
+				BCS_TILE_REGISTER_VAL_OFFSET, 4);
 #undef COPY_REG
 #undef COPY_REG_MASKED
 
+	/* don't copy Ring Context (the first 0x50 dwords),
+	 * only copy the Engine Context part from guest
+	 */
 	intel_gvt_hypervisor_read_gpa(vgpu,
 			workload->ring_context_gpa +
-			sizeof(*shadow_ring_context),
+			RING_CTX_SIZE,
 			(void *)shadow_ring_context +
-			sizeof(*shadow_ring_context),
-			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+			RING_CTX_SIZE,
+			I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
 
 	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
 
@@ -237,6 +248,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 		gpa_size = I915_GTT_PAGE_SIZE;
 		dst = context_base + (i << I915_GTT_PAGE_SHIFT);
 	}
+	ret = intel_gvt_scan_engine_context(workload);
+	if (ret) {
+		gvt_vgpu_err("invalid cmd found in guest context pages\n");
+		return ret;
+	}
 	s->last_ctx[ring_id].valid = true;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 64e7a0b791c378e43827ddc9b72a7a6085b11b7b..7c86984a842f465f8fdaa75e0d548b796a95f58f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -36,6 +36,11 @@
 #ifndef _GVT_SCHEDULER_H_
 #define _GVT_SCHEDULER_H_
 
+#include "gt/intel_engine_types.h"
+
+#include "execlist.h"
+#include "interrupt.h"
+
 struct intel_gvt_workload_scheduler {
 	struct intel_vgpu *current_vgpu;
 	struct intel_vgpu *next_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index cbe5931906e0a8976aa7ecd4b0d820d05fd6ea42..6a16d0ca7cdac7bed326f55ede411f455efb0ed9 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -499,9 +499,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 
 	mutex_lock(&gvt->lock);
 	vgpu = __intel_gvt_create_vgpu(gvt, &param);
-	if (!IS_ERR(vgpu))
+	if (!IS_ERR(vgpu)) {
 		/* calculate left instance change for types */
 		intel_gvt_update_vgpu_types(gvt);
+		intel_gvt_update_reg_whitelist(vgpu);
+	}
 	mutex_unlock(&gvt->lock);
 
 	return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 9ed19b8bca60093a3ff9ab666ecf0f445dcf8183..3bc616cc1ad23145fbedf1c5b9643bc6f8b980e7 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -159,8 +159,7 @@ __active_retire(struct i915_active *ref)
 		GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
 
 		/* Make the cached node available for reuse with any timeline */
-		if (IS_ENABLED(CONFIG_64BIT))
-			ref->cache->timeline = 0; /* needs cmpxchg(u64) */
+		ref->cache->timeline = 0; /* needs cmpxchg(u64) */
 	}
 
 	spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -256,7 +255,6 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
 		if (cached == idx)
 			return it;
 
-#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
 		/*
 		 * An unclaimed cache [.timeline=0] can only be claimed once.
 		 *
@@ -267,9 +265,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
 		 * only the winner of that race will cmpxchg return the old
 		 * value of 0).
 		 */
-		if (!cached && !cmpxchg(&it->timeline, 0, idx))
+		if (!cached && !cmpxchg64(&it->timeline, 0, idx))
 			return it;
-#endif
 	}
 
 	BUILD_BUG_ON(offsetof(typeof(*it), node));
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b0899b665e85207007de4c7f43f5b805523ac931..ced9a96d7c34be0bb922240db86eda36b3690965 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
  */
 
 #include "gt/intel_engine.h"
+#include "gt/intel_gpu_commands.h"
 
 #include "i915_drv.h"
 #include "i915_memcpy.h"
@@ -1142,7 +1143,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 	void *dst, *src;
 	int ret;
 
-	dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+	dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
 	if (IS_ERR(dst))
 		return dst;
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 77e76b665098f2ca30d6a45c88a74c08a41a758c..88336ff4bf097a87a1636a92f8da003c5e116f24 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -45,6 +45,7 @@
 #include "i915_debugfs.h"
 #include "i915_debugfs_params.h"
 #include "i915_irq.h"
+#include "i915_scheduler.h"
 #include "i915_trace.h"
 #include "intel_pm.h"
 #include "intel_sideband.h"
@@ -209,7 +210,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	spin_unlock(&obj->vma.lock);
 
 	seq_printf(m, " (pinned x %d)", pin_count);
-	if (obj->stolen)
+	if (i915_gem_object_is_stolen(obj))
 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 	if (i915_gem_object_is_framebuffer(obj))
 		seq_printf(m, " (fb)");
@@ -219,145 +220,6 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (%s)", engine->name);
 }
 
-struct file_stats {
-	struct i915_address_space *vm;
-	unsigned long count;
-	u64 total;
-	u64 active, inactive;
-	u64 closed;
-};
-
-static int per_file_stats(int id, void *ptr, void *data)
-{
-	struct drm_i915_gem_object *obj = ptr;
-	struct file_stats *stats = data;
-	struct i915_vma *vma;
-
-	if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
-		return 0;
-
-	stats->count++;
-	stats->total += obj->base.size;
-
-	spin_lock(&obj->vma.lock);
-	if (!stats->vm) {
-		for_each_ggtt_vma(vma, obj) {
-			if (!drm_mm_node_allocated(&vma->node))
-				continue;
-
-			if (i915_vma_is_active(vma))
-				stats->active += vma->node.size;
-			else
-				stats->inactive += vma->node.size;
-
-			if (i915_vma_is_closed(vma))
-				stats->closed += vma->node.size;
-		}
-	} else {
-		struct rb_node *p = obj->vma.tree.rb_node;
-
-		while (p) {
-			long cmp;
-
-			vma = rb_entry(p, typeof(*vma), obj_node);
-			cmp = i915_vma_compare(vma, stats->vm, NULL);
-			if (cmp == 0) {
-				if (drm_mm_node_allocated(&vma->node)) {
-					if (i915_vma_is_active(vma))
-						stats->active += vma->node.size;
-					else
-						stats->inactive += vma->node.size;
-
-					if (i915_vma_is_closed(vma))
-						stats->closed += vma->node.size;
-				}
-				break;
-			}
-			if (cmp < 0)
-				p = p->rb_right;
-			else
-				p = p->rb_left;
-		}
-	}
-	spin_unlock(&obj->vma.lock);
-
-	i915_gem_object_put(obj);
-	return 0;
-}
-
-#define print_file_stats(m, name, stats) do { \
-	if (stats.count) \
-		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
-			   name, \
-			   stats.count, \
-			   stats.total, \
-			   stats.active, \
-			   stats.inactive, \
-			   stats.closed); \
-} while (0)
-
-static void print_context_stats(struct seq_file *m,
-				struct drm_i915_private *i915)
-{
-	struct file_stats kstats = {};
-	struct i915_gem_context *ctx, *cn;
-
-	spin_lock(&i915->gem.contexts.lock);
-	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
-		struct i915_gem_engines_iter it;
-		struct intel_context *ce;
-
-		if (!kref_get_unless_zero(&ctx->ref))
-			continue;
-
-		spin_unlock(&i915->gem.contexts.lock);
-
-		for_each_gem_engine(ce,
-				    i915_gem_context_lock_engines(ctx), it) {
-			if (intel_context_pin_if_active(ce)) {
-				rcu_read_lock();
-				if (ce->state)
-					per_file_stats(0,
-						       ce->state->obj, &kstats);
-				per_file_stats(0, ce->ring->vma->obj, &kstats);
-				rcu_read_unlock();
-				intel_context_unpin(ce);
-			}
-		}
-		i915_gem_context_unlock_engines(ctx);
-
-		mutex_lock(&ctx->mutex);
-		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
-			struct file_stats stats = {
-				.vm = rcu_access_pointer(ctx->vm),
-			};
-			struct drm_file *file = ctx->file_priv->file;
-			struct task_struct *task;
-			char name[80];
-
-			rcu_read_lock();
-			idr_for_each(&file->object_idr, per_file_stats, &stats);
-			rcu_read_unlock();
-
-			rcu_read_lock();
-			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
-			snprintf(name, sizeof(name), "%s",
-				 task ? task->comm : "<unknown>");
-			rcu_read_unlock();
-
-			print_file_stats(m, name, stats);
-		}
-		mutex_unlock(&ctx->mutex);
-
-		spin_lock(&i915->gem.contexts.lock);
-		list_safe_reset_next(ctx, cn, link);
-		i915_gem_context_put(ctx);
-	}
-	spin_unlock(&i915->gem.contexts.lock);
-
-	print_file_stats(m, "[k]contexts", kstats);
-}
-
 static int i915_gem_object_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -371,311 +233,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	for_each_memory_region(mr, i915, id)
 		seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
 			   mr->name, &mr->total, &mr->avail);
-	seq_putc(m, '\n');
-
-	print_context_stats(m, i915);
-
-	return 0;
-}
-
-static void gen8_display_interrupt_info(struct seq_file *m)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		enum intel_display_power_domain power_domain;
-		intel_wakeref_t wakeref;
-
-		power_domain = POWER_DOMAIN_PIPE(pipe);
-		wakeref = intel_display_power_get_if_enabled(dev_priv,
-							     power_domain);
-		if (!wakeref) {
-			seq_printf(m, "Pipe %c power disabled\n",
-				   pipe_name(pipe));
-			continue;
-		}
-		seq_printf(m, "Pipe %c IMR:\t%08x\n",
-			   pipe_name(pipe),
-			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
-		seq_printf(m, "Pipe %c IIR:\t%08x\n",
-			   pipe_name(pipe),
-			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
-		seq_printf(m, "Pipe %c IER:\t%08x\n",
-			   pipe_name(pipe),
-			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
-		intel_display_power_put(dev_priv, power_domain, wakeref);
-	}
-
-	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
-		   I915_READ(GEN8_DE_PORT_IMR));
-	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
-		   I915_READ(GEN8_DE_PORT_IIR));
-	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
-		   I915_READ(GEN8_DE_PORT_IER));
-
-	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
-		   I915_READ(GEN8_DE_MISC_IMR));
-	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
-		   I915_READ(GEN8_DE_MISC_IIR));
-	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
-		   I915_READ(GEN8_DE_MISC_IER));
-
-	seq_printf(m, "PCU interrupt mask:\t%08x\n",
-		   I915_READ(GEN8_PCU_IMR));
-	seq_printf(m, "PCU interrupt identity:\t%08x\n",
-		   I915_READ(GEN8_PCU_IIR));
-	seq_printf(m, "PCU interrupt enable:\t%08x\n",
-		   I915_READ(GEN8_PCU_IER));
-}
-
-static int i915_interrupt_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_engine_cs *engine;
-	intel_wakeref_t wakeref;
-	int i, pipe;
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		intel_wakeref_t pref;
-
-		seq_printf(m, "Master Interrupt Control:\t%08x\n",
-			   I915_READ(GEN8_MASTER_IRQ));
-
-		seq_printf(m, "Display IER:\t%08x\n",
-			   I915_READ(VLV_IER));
-		seq_printf(m, "Display IIR:\t%08x\n",
-			   I915_READ(VLV_IIR));
-		seq_printf(m, "Display IIR_RW:\t%08x\n",
-			   I915_READ(VLV_IIR_RW));
-		seq_printf(m, "Display IMR:\t%08x\n",
-			   I915_READ(VLV_IMR));
-		for_each_pipe(dev_priv, pipe) {
-			enum intel_display_power_domain power_domain;
-
-			power_domain = POWER_DOMAIN_PIPE(pipe);
-			pref = intel_display_power_get_if_enabled(dev_priv,
-								  power_domain);
-			if (!pref) {
-				seq_printf(m, "Pipe %c power disabled\n",
-					   pipe_name(pipe));
-				continue;
-			}
-
-			seq_printf(m, "Pipe %c stat:\t%08x\n",
-				   pipe_name(pipe),
-				   I915_READ(PIPESTAT(pipe)));
-
-			intel_display_power_put(dev_priv, power_domain, pref);
-		}
-
-		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-		seq_printf(m, "Port hotplug:\t%08x\n",
-			   I915_READ(PORT_HOTPLUG_EN));
-		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
-			   I915_READ(VLV_DPFLIPSTAT));
-		seq_printf(m, "DPINVGTT:\t%08x\n",
-			   I915_READ(DPINVGTT));
-		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
-		for (i = 0; i < 4; i++) {
-			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
-				   i, I915_READ(GEN8_GT_IMR(i)));
-			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
-				   i, I915_READ(GEN8_GT_IIR(i)));
-			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
-				   i, I915_READ(GEN8_GT_IER(i)));
-		}
-
-		seq_printf(m, "PCU interrupt mask:\t%08x\n",
-			   I915_READ(GEN8_PCU_IMR));
-		seq_printf(m, "PCU interrupt identity:\t%08x\n",
-			   I915_READ(GEN8_PCU_IIR));
-		seq_printf(m, "PCU interrupt enable:\t%08x\n",
-			   I915_READ(GEN8_PCU_IER));
-	} else if (INTEL_GEN(dev_priv) >= 11) {
-		if (HAS_MASTER_UNIT_IRQ(dev_priv))
-			seq_printf(m, "Master Unit Interrupt Control:  %08x\n",
-				   I915_READ(DG1_MSTR_UNIT_INTR));
-
-		seq_printf(m, "Master Interrupt Control:  %08x\n",
-			   I915_READ(GEN11_GFX_MSTR_IRQ));
-
-		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
-			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
-		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
-			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
-		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
-			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
-		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
-			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
-		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
-			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
-		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
-			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
-
-		seq_printf(m, "Display Interrupt Control:\t%08x\n",
-			   I915_READ(GEN11_DISPLAY_INT_CTL));
-
-		gen8_display_interrupt_info(m);
-	} else if (INTEL_GEN(dev_priv) >= 8) {
-		seq_printf(m, "Master Interrupt Control:\t%08x\n",
-			   I915_READ(GEN8_MASTER_IRQ));
-
-		for (i = 0; i < 4; i++) {
-			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
-				   i, I915_READ(GEN8_GT_IMR(i)));
-			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
-				   i, I915_READ(GEN8_GT_IIR(i)));
-			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
-				   i, I915_READ(GEN8_GT_IER(i)));
-		}
-
-		gen8_display_interrupt_info(m);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		intel_wakeref_t pref;
-
-		seq_printf(m, "Display IER:\t%08x\n",
-			   I915_READ(VLV_IER));
-		seq_printf(m, "Display IIR:\t%08x\n",
-			   I915_READ(VLV_IIR));
-		seq_printf(m, "Display IIR_RW:\t%08x\n",
-			   I915_READ(VLV_IIR_RW));
-		seq_printf(m, "Display IMR:\t%08x\n",
-			   I915_READ(VLV_IMR));
-		for_each_pipe(dev_priv, pipe) {
-			enum intel_display_power_domain power_domain;
-
-			power_domain = POWER_DOMAIN_PIPE(pipe);
-			pref = intel_display_power_get_if_enabled(dev_priv,
-								  power_domain);
-			if (!pref) {
-				seq_printf(m, "Pipe %c power disabled\n",
-					   pipe_name(pipe));
-				continue;
-			}
-
-			seq_printf(m, "Pipe %c stat:\t%08x\n",
-				   pipe_name(pipe),
-				   I915_READ(PIPESTAT(pipe)));
-			intel_display_power_put(dev_priv, power_domain, pref);
-		}
-
-		seq_printf(m, "Master IER:\t%08x\n",
-			   I915_READ(VLV_MASTER_IER));
-
-		seq_printf(m, "Render IER:\t%08x\n",
-			   I915_READ(GTIER));
-		seq_printf(m, "Render IIR:\t%08x\n",
-			   I915_READ(GTIIR));
-		seq_printf(m, "Render IMR:\t%08x\n",
-			   I915_READ(GTIMR));
-
-		seq_printf(m, "PM IER:\t\t%08x\n",
-			   I915_READ(GEN6_PMIER));
-		seq_printf(m, "PM IIR:\t\t%08x\n",
-			   I915_READ(GEN6_PMIIR));
-		seq_printf(m, "PM IMR:\t\t%08x\n",
-			   I915_READ(GEN6_PMIMR));
-
-		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-		seq_printf(m, "Port hotplug:\t%08x\n",
-			   I915_READ(PORT_HOTPLUG_EN));
-		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
-			   I915_READ(VLV_DPFLIPSTAT));
-		seq_printf(m, "DPINVGTT:\t%08x\n",
-			   I915_READ(DPINVGTT));
-		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
-	} else if (!HAS_PCH_SPLIT(dev_priv)) {
-		seq_printf(m, "Interrupt enable:    %08x\n",
-			   I915_READ(GEN2_IER));
-		seq_printf(m, "Interrupt identity:  %08x\n",
-			   I915_READ(GEN2_IIR));
-		seq_printf(m, "Interrupt mask:      %08x\n",
-			   I915_READ(GEN2_IMR));
-		for_each_pipe(dev_priv, pipe)
-			seq_printf(m, "Pipe %c stat:         %08x\n",
-				   pipe_name(pipe),
-				   I915_READ(PIPESTAT(pipe)));
-	} else {
-		seq_printf(m, "North Display Interrupt enable:		%08x\n",
-			   I915_READ(DEIER));
-		seq_printf(m, "North Display Interrupt identity:	%08x\n",
-			   I915_READ(DEIIR));
-		seq_printf(m, "North Display Interrupt mask:		%08x\n",
-			   I915_READ(DEIMR));
-		seq_printf(m, "South Display Interrupt enable:		%08x\n",
-			   I915_READ(SDEIER));
-		seq_printf(m, "South Display Interrupt identity:	%08x\n",
-			   I915_READ(SDEIIR));
-		seq_printf(m, "South Display Interrupt mask:		%08x\n",
-			   I915_READ(SDEIMR));
-		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
-			   I915_READ(GTIER));
-		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
-			   I915_READ(GTIIR));
-		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
-			   I915_READ(GTIMR));
-	}
-
-	if (INTEL_GEN(dev_priv) >= 11) {
-		seq_printf(m, "RCS Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
-		seq_printf(m, "BCS Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
-		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
-		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
-		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
-		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_GUC_SG_INTR_MASK));
-		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
-			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
-		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
-		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
-			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
-
-	} else if (INTEL_GEN(dev_priv) >= 6) {
-		for_each_uabi_engine(engine, dev_priv) {
-			seq_printf(m,
-				   "Graphics Interrupt mask (%s):	%08x\n",
-				   engine->name, ENGINE_READ(engine, RING_IMR));
-		}
-	}
-
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
-static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	unsigned int i;
-
-	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
-
-	rcu_read_lock();
-	for (i = 0; i < i915->ggtt.num_fences; i++) {
-		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
-		struct i915_vma *vma = reg->vma;
-
-		seq_printf(m, "Fence %d, pin count = %d, object = ",
-			   i, atomic_read(&reg->pin_count));
-		if (!vma)
-			seq_puts(m, "unused");
-		else
-			i915_debugfs_describe_obj(m, vma->obj);
-		seq_putc(m, '\n');
-	}
-	rcu_read_unlock();
 
 	return 0;
 }
@@ -802,7 +359,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		u32 rpmodectl, freq_sts;
 
-		rpmodectl = I915_READ(GEN6_RP_CONTROL);
+		rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
 		seq_printf(m, "Video Turbo Mode: %s\n",
 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 		seq_printf(m, "HW control enabled: %s\n",
@@ -847,19 +404,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 		int max_freq;
 
-		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
 		if (IS_GEN9_LP(dev_priv)) {
-			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
-			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+			rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
+			gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
 		} else {
-			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+			rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
+			gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
 		}
 
 		/* RPSTAT1 is in the GT power well */
 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
-		reqf = I915_READ(GEN6_RPNSWREQ);
+		reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
 		if (INTEL_GEN(dev_priv) >= 9)
 			reqf >>= 23;
 		else {
@@ -871,24 +428,24 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		}
 		reqf = intel_gpu_freq(rps, reqf);
 
-		rpmodectl = I915_READ(GEN6_RP_CONTROL);
-		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
-		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
-
-		rpstat = I915_READ(GEN6_RPSTAT1);
-		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
-		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
-		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
-		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
-		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
-		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+		rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
+		rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
+		rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
+
+		rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
+		rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+		rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+		rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+		rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+		rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+		rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
 		cagf = intel_rps_read_actual_frequency(rps);
 
 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 		if (INTEL_GEN(dev_priv) >= 11) {
-			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
-			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+			pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+			pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
 			/*
 			 * The equivalent to the PM ISR & IIR cannot be read
 			 * without affecting the current state of the system
@@ -896,17 +453,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 			pm_isr = 0;
 			pm_iir = 0;
 		} else if (INTEL_GEN(dev_priv) >= 8) {
-			pm_ier = I915_READ(GEN8_GT_IER(2));
-			pm_imr = I915_READ(GEN8_GT_IMR(2));
-			pm_isr = I915_READ(GEN8_GT_ISR(2));
-			pm_iir = I915_READ(GEN8_GT_IIR(2));
+			pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
+			pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
+			pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
+			pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
 		} else {
-			pm_ier = I915_READ(GEN6_PMIER);
-			pm_imr = I915_READ(GEN6_PMIMR);
-			pm_isr = I915_READ(GEN6_PMISR);
-			pm_iir = I915_READ(GEN6_PMIIR);
+			pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
+			pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
+			pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
+			pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
 		}
-		pm_mask = I915_READ(GEN6_PMINTRMSK);
+		pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
 
 		seq_printf(m, "Video Turbo Mode: %s\n",
 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -936,27 +493,27 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 		seq_printf(m, "CAGF: %dMHz\n", cagf);
-		seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+		seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
 			   rpupei,
 			   intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
-		seq_printf(m, "RP CUR UP: %d (%dun)\n",
+		seq_printf(m, "RP CUR UP: %d (%lldun)\n",
 			   rpcurup,
 			   intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
-		seq_printf(m, "RP PREV UP: %d (%dns)\n",
+		seq_printf(m, "RP PREV UP: %d (%lldns)\n",
 			   rpprevup,
 			   intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
 		seq_printf(m, "Up threshold: %d%%\n",
 			   rps->power.up_threshold);
 
-		seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+		seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
 			   rpdownei,
 			   intel_gt_pm_interval_to_ns(&dev_priv->gt,
 						      rpdownei));
-		seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+		seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
 			   rpcurdown,
 			   intel_gt_pm_interval_to_ns(&dev_priv->gt,
 						      rpcurdown));
-		seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+		seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
 			   rpprevdown,
 			   intel_gt_pm_interval_to_ns(&dev_priv->gt,
 						      rpprevdown));
@@ -1011,111 +568,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_ring_freq_table(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_rps *rps = &dev_priv->gt.rps;
-	unsigned int max_gpu_freq, min_gpu_freq;
-	intel_wakeref_t wakeref;
-	int gpu_freq, ia_freq;
-
-	if (!HAS_LLC(dev_priv))
-		return -ENODEV;
-
-	min_gpu_freq = rps->min_freq;
-	max_gpu_freq = rps->max_freq;
-	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-		/* Convert GT frequency to 50 HZ units */
-		min_gpu_freq /= GEN9_FREQ_SCALER;
-		max_gpu_freq /= GEN9_FREQ_SCALER;
-	}
-
-	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
-		ia_freq = gpu_freq;
-		sandybridge_pcode_read(dev_priv,
-				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
-				       &ia_freq, NULL);
-		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-			   intel_gpu_freq(rps,
-					  (gpu_freq *
-					   (IS_GEN9_BC(dev_priv) ||
-					    INTEL_GEN(dev_priv) >= 10 ?
-					    GEN9_FREQ_SCALER : 1))),
-			   ((ia_freq >> 0) & 0xff) * 100,
-			   ((ia_freq >> 8) & 0xff) * 100);
-	}
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
-static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
-{
-	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
-		   ring->space, ring->head, ring->tail, ring->emit);
-}
-
-static int i915_context_status(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	struct i915_gem_context *ctx, *cn;
-
-	spin_lock(&i915->gem.contexts.lock);
-	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
-		struct i915_gem_engines_iter it;
-		struct intel_context *ce;
-
-		if (!kref_get_unless_zero(&ctx->ref))
-			continue;
-
-		spin_unlock(&i915->gem.contexts.lock);
-
-		seq_puts(m, "HW context ");
-		if (ctx->pid) {
-			struct task_struct *task;
-
-			task = get_pid_task(ctx->pid, PIDTYPE_PID);
-			if (task) {
-				seq_printf(m, "(%s [%d]) ",
-					   task->comm, task->pid);
-				put_task_struct(task);
-			}
-		} else if (IS_ERR(ctx->file_priv)) {
-			seq_puts(m, "(deleted) ");
-		} else {
-			seq_puts(m, "(kernel) ");
-		}
-
-		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
-		seq_putc(m, '\n');
-
-		for_each_gem_engine(ce,
-				    i915_gem_context_lock_engines(ctx), it) {
-			if (intel_context_pin_if_active(ce)) {
-				seq_printf(m, "%s: ", ce->engine->name);
-				if (ce->state)
-					i915_debugfs_describe_obj(m, ce->state->obj);
-				describe_ctx_ring(m, ce->ring);
-				seq_putc(m, '\n');
-				intel_context_unpin(ce);
-			}
-		}
-		i915_gem_context_unlock_engines(ctx);
-
-		seq_putc(m, '\n');
-
-		spin_lock(&i915->gem.contexts.lock);
-		list_safe_reset_next(ctx, cn, link);
-		i915_gem_context_put(ctx);
-	}
-	spin_unlock(&i915->gem.contexts.lock);
-
-	return 0;
-}
-
 static const char *swizzle_string(unsigned swizzle)
 {
 	switch (swizzle) {
@@ -1193,20 +645,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-static const char *rps_power_to_str(unsigned int power)
-{
-	static const char * const strings[] = {
-		[LOW_POWER] = "low power",
-		[BETWEEN] = "mixed",
-		[HIGH_POWER] = "high power",
-	};
-
-	if (power >= ARRAY_SIZE(strings) || !strings[power])
-		return "unknown";
-
-	return strings[power];
-}
-
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1231,42 +669,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 		   intel_gpu_freq(rps, rps->efficient_freq),
 		   intel_gpu_freq(rps, rps->boost_freq));
 
-	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
-
-	if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
-		u32 rpup, rpupei;
-		u32 rpdown, rpdownei;
-
-		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
-		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
-		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
-		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
-		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
-		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
-			   rps_power_to_str(rps->power.mode));
-		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
-			   rpup && rpupei ? 100 * rpup / rpupei : 0,
-			   rps->power.up_threshold);
-		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
-			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
-			   rps->power.down_threshold);
-	} else {
-		seq_puts(m, "\nRPS Autotuning inactive\n");
-	}
-
-	return 0;
-}
-
-static int i915_llc(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const bool edram = INTEL_GEN(dev_priv) > 8;
-
-	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
-	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
-		   dev_priv->edram_size_mb);
+	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
 
 	return 0;
 }
@@ -1280,7 +683,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 		seq_puts(m, "Runtime power management not supported\n");
 
 	seq_printf(m, "Runtime power status: %s\n",
-		   enableddisabled(!dev_priv->power_domains.wakeref));
+		   enableddisabled(!dev_priv->power_domains.init_wakeref));
 
 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
 	seq_printf(m, "IRQs disabled: %s\n",
@@ -1306,34 +709,28 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 
 static int i915_engine_info(struct seq_file *m, void *unused)
 {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
 	intel_wakeref_t wakeref;
 	struct drm_printer p;
 
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-	seq_printf(m, "GT awake? %s [%d]\n",
-		   yesno(dev_priv->gt.awake),
-		   atomic_read(&dev_priv->gt.wakeref.count));
-	seq_printf(m, "CS timestamp frequency: %u Hz\n",
-		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
+	seq_printf(m, "GT awake? %s [%d], %llums\n",
+		   yesno(i915->gt.awake),
+		   atomic_read(&i915->gt.wakeref.count),
+		   ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+	seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+		   i915->gt.clock_frequency,
+		   i915->gt.clock_period_ns);
 
 	p = drm_seq_file_printer(m);
-	for_each_uabi_engine(engine, dev_priv)
+	for_each_uabi_engine(engine, i915)
 		intel_engine_dump(engine, &p, "%s\n", engine->name);
 
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+	intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
 
-	return 0;
-}
-
-static int i915_shrinker_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-
-	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
-	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
 	return 0;
 }
@@ -1411,7 +808,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
 	 * This would lead to infinite waits as we're doing timestamp
 	 * difference on the CS with only 32bits.
 	 */
-	if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
+	if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
 		return -EINVAL;
 
 	atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1529,55 +926,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
 			i915_drop_caches_get, i915_drop_caches_set,
 			"0x%08llx\n");
 
-static int
-i915_cache_sharing_get(void *data, u64 *val)
-{
-	struct drm_i915_private *dev_priv = data;
-	intel_wakeref_t wakeref;
-	u32 snpcr = 0;
-
-	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
-		return -ENODEV;
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
-		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
-	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
-
-	return 0;
-}
-
-static int
-i915_cache_sharing_set(void *data, u64 val)
-{
-	struct drm_i915_private *dev_priv = data;
-	intel_wakeref_t wakeref;
-
-	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
-		return -ENODEV;
-
-	if (val > 3)
-		return -EINVAL;
-
-	drm_dbg(&dev_priv->drm,
-		"Manually setting uncore sharing to %llu\n", val);
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		u32 snpcr;
-
-		/* Update the cache sharing policy here as well */
-		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-		snpcr &= ~GEN6_MBC_SNPCR_MASK;
-		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
-		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
-	}
-
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
-			i915_cache_sharing_get, i915_cache_sharing_set,
-			"%llu\n");
-
 static int i915_sseu_status(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1621,16 +969,10 @@ static const struct file_operations i915_forcewake_fops = {
 static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
-	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
-	{"i915_gem_interrupt", i915_interrupt_info, 0},
 	{"i915_frequency_info", i915_frequency_info, 0},
-	{"i915_ring_freq_table", i915_ring_freq_table, 0},
-	{"i915_context_status", i915_context_status, 0},
 	{"i915_swizzle_info", i915_swizzle_info, 0},
-	{"i915_llc", i915_llc, 0},
 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
 	{"i915_engine_info", i915_engine_info, 0},
-	{"i915_shrinker_info", i915_shrinker_info, 0},
 	{"i915_wa_registers", i915_wa_registers, 0},
 	{"i915_sseu_status", i915_sseu_status, 0},
 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
@@ -1643,7 +985,6 @@ static const struct i915_debugfs_files {
 } i915_debugfs_files[] = {
 	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
 	{"i915_wedged", &i915_wedged_fops},
-	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 	{"i915_error_state", &i915_error_state_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 99eb0d7bbc447fb7b4c58798ce66e6752189bc18..8e9cb44e66e530914801b1a43c5c5a454efe41ab 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,12 +58,14 @@
 #include "display/intel_hotplug.h"
 #include "display/intel_overlay.h"
 #include "display/intel_pipe_crc.h"
+#include "display/intel_pps.h"
 #include "display/intel_sprite.h"
 #include "display/intel_vga.h"
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_rc6.h"
@@ -410,6 +412,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	intel_setup_mchbar(dev_priv);
+	intel_device_info_runtime_init(dev_priv);
 
 	ret = intel_gt_init_mmio(&dev_priv->gt);
 	if (ret)
@@ -516,8 +519,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	if (i915_inject_probe_failure(dev_priv))
 		return -ENODEV;
 
-	intel_device_info_runtime_init(dev_priv);
-
 	if (HAS_PPGTT(dev_priv)) {
 		if (intel_vgpu_active(dev_priv) &&
 		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
@@ -609,14 +610,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 		goto err_msi;
 
 	intel_opregion_setup(dev_priv);
+
+	intel_pcode_init(dev_priv);
+
 	/*
-	 * Fill the dram structure to get the system raw bandwidth and
-	 * dram info. This will be used for memory latency calculation.
+	 * Fill the dram structure to get the system dram info. This will be
+	 * used for memory latency calculation.
 	 */
 	intel_dram_detect(dev_priv);
 
-	intel_pcode_init(dev_priv);
-
 	intel_bw_init_hw(dev_priv);
 
 	return 0;
@@ -733,6 +735,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	 * events.
 	 */
 	drm_kms_helper_poll_fini(&dev_priv->drm);
+	drm_atomic_helper_shutdown(&dev_priv->drm);
 
 	intel_gt_driver_unregister(&dev_priv->gt);
 	acpi_video_unregister();
@@ -935,8 +938,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
 
 	i915_gem_suspend(i915);
 
-	drm_atomic_helper_shutdown(&i915->drm);
-
 	intel_gvt_driver_remove(i915);
 
 	intel_modeset_driver_remove(i915);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c6964f82a1bb68268d837d311dad592a37d33e92..26d69d06aa6d131809fb6be42cba048d344e6768 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,9 +79,9 @@
 #include "gem/i915_gem_shrinker.h"
 #include "gem/i915_gem_stolen.h"
 
-#include "gt/intel_lrc.h"
 #include "gt/intel_engine.h"
 #include "gt/intel_gt_types.h"
+#include "gt/intel_region_lmem.h"
 #include "gt/intel_workarounds.h"
 #include "gt/uc/intel_uc.h"
 
@@ -103,7 +103,6 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
-#include "intel_region_lmem.h"
 
 /* General customization:
  */
@@ -416,6 +415,7 @@ struct intel_fbc {
 		u16 gen9_wa_cfb_stride;
 		u16 interval;
 		s8 fence_id;
+		bool psr2_active;
 	} state_cache;
 
 	/*
@@ -1122,23 +1122,11 @@ struct drm_i915_private {
 		 * crtc_state->wm.need_postvbl_update.
 		 */
 		struct mutex wm_mutex;
-
-		/*
-		 * Set during HW readout of watermarks/DDB.  Some platforms
-		 * need to know when we're still using BIOS-provided values
-		 * (which we don't fully trust).
-		 *
-		 * FIXME get rid of this.
-		 */
-		bool distrust_bios_wm;
 	} wm;
 
 	struct dram_info {
-		bool valid;
-		bool is_16gb_dimm;
+		bool wm_lv_0_adjust_needed;
 		u8 num_channels;
-		u8 ranks;
-		u32 bandwidth_kbps;
 		bool symmetric_memory;
 		enum intel_dram_type {
 			INTEL_DRAM_UNKNOWN,
@@ -1147,6 +1135,7 @@ struct drm_i915_private {
 			INTEL_DRAM_LPDDR3,
 			INTEL_DRAM_LPDDR4
 		} type;
+		u8 num_qgv_points;
 	} dram_info;
 
 	struct intel_bw_info {
@@ -1169,9 +1158,6 @@ struct drm_i915_private {
 		struct i915_gem_contexts {
 			spinlock_t lock; /* locks list */
 			struct list_head list;
-
-			struct llist_head free_list;
-			struct work_struct free_work;
 		} contexts;
 
 		/*
@@ -1185,6 +1171,8 @@ struct drm_i915_private {
 		struct file *mmap_singleton;
 	} gem;
 
+	u8 framestart_delay;
+
 	u8 pch_ssc_use;
 
 	/* For i915gm/i945gm vblank irq workaround */
@@ -1569,16 +1557,30 @@ enum {
 	TGL_REVID_D0,
 };
 
-extern const struct i915_rev_steppings tgl_uy_revids[];
-extern const struct i915_rev_steppings tgl_revids[];
+#define TGL_UY_REVIDS_SIZE	4
+#define TGL_REVIDS_SIZE		2
+
+extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
+extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
 
 static inline const struct i915_rev_steppings *
 tgl_revids_get(struct drm_i915_private *dev_priv)
 {
-	if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
-		return &tgl_uy_revids[INTEL_REVID(dev_priv)];
-	else
-		return &tgl_revids[INTEL_REVID(dev_priv)];
+	u8 revid = INTEL_REVID(dev_priv);
+	u8 size;
+	const struct i915_rev_steppings *tgl_revid_tbl;
+
+	if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+		tgl_revid_tbl = tgl_uy_revids;
+		size = ARRAY_SIZE(tgl_uy_revids);
+	} else {
+		tgl_revid_tbl = tgl_revids;
+		size = ARRAY_SIZE(tgl_revids);
+	}
+
+	revid = min_t(u8, revid, size - 1);
+
+	return &tgl_revid_tbl[revid];
 }
 
 #define IS_TGL_DISP_REVID(p, since, until) \
@@ -1588,14 +1590,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
 
 #define IS_TGL_UY_GT_REVID(p, since, until) \
 	((IS_TGL_U(p) || IS_TGL_Y(p)) && \
-	 tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
-	 tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
+	 tgl_revids_get(p)->gt_stepping >= (since) && \
+	 tgl_revids_get(p)->gt_stepping <= (until))
 
 #define IS_TGL_GT_REVID(p, since, until) \
 	(IS_TIGERLAKE(p) && \
 	 !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
-	 tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
-	 tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
+	 tgl_revids_get(p)->gt_stepping >= (since) && \
+	 tgl_revids_get(p)->gt_stepping <= (until))
 
 #define RKL_REVID_A0		0x0
 #define RKL_REVID_B0		0x1
@@ -1646,8 +1648,6 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
 		(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
 		(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
-#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
-		(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
 
 #define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
 
@@ -1749,10 +1749,17 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
 
 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
 
+#define HAS_VRR(i915)	(INTEL_GEN(i915) >= 12)
+
 /* Only valid when HAS_DISPLAY() is true */
 #define INTEL_DISPLAY_ENABLED(dev_priv) \
 	(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
 
+static inline bool run_as_guest(void)
+{
+	return !hypervisor_is_type(X86_HYPER_NATIVE);
+}
+
 static inline bool intel_vtd_active(void)
 {
 #ifdef CONFIG_INTEL_IOMMU
@@ -1761,7 +1768,7 @@ static inline bool intel_vtd_active(void)
 #endif
 
 	/* Running as a guest, we assume the host is enforcing VT'd */
-	return !hypervisor_is_type(X86_HYPER_NATIVE);
+	return run_as_guest();
 }
 
 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -1793,8 +1800,6 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_init_early(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-int i915_gem_freeze(struct drm_i915_private *dev_priv);
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
 
 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
 
@@ -1967,43 +1972,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
 
-#define __I915_REG_OP(op__, dev_priv__, ...) \
-	intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-
-#define I915_READ(reg__)	 __I915_REG_OP(read, dev_priv, (reg__))
-#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-
-#define POSTING_READ(reg__)	__I915_REG_OP(posting_read, dev_priv, (reg__))
-
-/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections, such as inside IRQ handlers, where forcewake is explicitly
- * controlled.
- *
- * Think twice, and think again, before using these.
- *
- * As an example, these accessors can possibly be used between:
- *
- * spin_lock_irq(&dev_priv->uncore.lock);
- * intel_uncore_forcewake_get__locked();
- *
- * and
- *
- * intel_uncore_forcewake_put__locked();
- * spin_unlock_irq(&dev_priv->uncore.lock);
- *
- *
- * Note: some registers may not need forcewake held, so
- * intel_uncore_forcewake_{get,put} can be omitted, see
- * intel_uncore_forcewake_for_reg().
- *
- * Certain architectures will die if the same cacheline is concurrently accessed
- * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
- * a more localised lock guarding all access to that bank of registers.
- */
-#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-
 /* i915_mm.c */
 int remap_io_mapping(struct vm_area_struct *vma,
 		     unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2026,16 +1994,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
 	return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
 }
 
-static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
-{
-	return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
-				1000000000);
-}
-
-static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
-{
-	return div_u64(val * 1000000000,
-		       RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
-}
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58276694c848d2e5e2e83e732dffec6538d4f4a7..aa4490934469408f3a54220c7ba1feaa019c7808 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -179,108 +179,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 	return ret;
 }
 
-static int
-i915_gem_create(struct drm_file *file,
-		struct intel_memory_region *mr,
-		u64 *size_p,
-		u32 *handle_p)
-{
-	struct drm_i915_gem_object *obj;
-	u32 handle;
-	u64 size;
-	int ret;
-
-	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
-	size = round_up(*size_p, mr->min_page_size);
-	if (size == 0)
-		return -EINVAL;
-
-	/* For most of the ABI (e.g. mmap) we think in system pages */
-	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
-	/* Allocate the new object */
-	obj = i915_gem_object_create_region(mr, size, 0);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
-
-	ret = drm_gem_handle_create(file, &obj->base, &handle);
-	/* drop reference from allocate - handle holds it now */
-	i915_gem_object_put(obj);
-	if (ret)
-		return ret;
-
-	*handle_p = handle;
-	*size_p = size;
-	return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
-		     struct drm_device *dev,
-		     struct drm_mode_create_dumb *args)
-{
-	enum intel_memory_type mem_type;
-	int cpp = DIV_ROUND_UP(args->bpp, 8);
-	u32 format;
-
-	switch (cpp) {
-	case 1:
-		format = DRM_FORMAT_C8;
-		break;
-	case 2:
-		format = DRM_FORMAT_RGB565;
-		break;
-	case 4:
-		format = DRM_FORMAT_XRGB8888;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* have to work out size/pitch and return them */
-	args->pitch = ALIGN(args->width * cpp, 64);
-
-	/* align stride to page size so that we can remap */
-	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
-						    DRM_FORMAT_MOD_LINEAR))
-		args->pitch = ALIGN(args->pitch, 4096);
-
-	if (args->pitch < args->width)
-		return -EINVAL;
-
-	args->size = mul_u32_u32(args->pitch, args->height);
-
-	mem_type = INTEL_MEMORY_SYSTEM;
-	if (HAS_LMEM(to_i915(dev)))
-		mem_type = INTEL_MEMORY_LOCAL;
-
-	return i915_gem_create(file,
-			       intel_memory_region_by_type(to_i915(dev),
-							   mem_type),
-			       &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file)
-{
-	struct drm_i915_private *i915 = to_i915(dev);
-	struct drm_i915_gem_create *args = data;
-
-	i915_gem_flush_free_objects(i915);
-
-	return i915_gem_create(file,
-			       intel_memory_region_by_type(i915,
-							   INTEL_MEMORY_SYSTEM),
-			       &args->size, &args->handle);
-}
-
 static int
 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
 	    bool needs_clflush)
@@ -1059,14 +957,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	    i915_gem_object_is_tiled(obj) &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (obj->mm.madv == I915_MADV_WILLNEED) {
-			GEM_BUG_ON(!obj->mm.quirked);
-			__i915_gem_object_unpin_pages(obj);
-			obj->mm.quirked = false;
+			GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+			i915_gem_object_clear_tiling_quirk(obj);
+			i915_gem_object_make_shrinkable(obj);
 		}
 		if (args->madv == I915_MADV_WILLNEED) {
-			GEM_BUG_ON(obj->mm.quirked);
-			__i915_gem_object_pin_pages(obj);
-			obj->mm.quirked = true;
+			GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+			i915_gem_object_make_unshrinkable(obj);
+			i915_gem_object_set_tiling_quirk(obj);
 		}
 	}
 
@@ -1207,8 +1105,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
-	i915_gem_driver_release__contexts(dev_priv);
-
 	intel_gt_driver_release(&dev_priv->gt);
 
 	intel_wa_list_free(&dev_priv->gt_wa_list);
@@ -1249,53 +1145,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
 }
 
-int i915_gem_freeze(struct drm_i915_private *dev_priv)
-{
-	/* Discard all purgeable objects, let userspace recover those as
-	 * required after resuming.
-	 */
-	i915_gem_shrink_all(dev_priv);
-
-	return 0;
-}
-
-int i915_gem_freeze_late(struct drm_i915_private *i915)
-{
-	struct drm_i915_gem_object *obj;
-	intel_wakeref_t wakeref;
-
-	/*
-	 * Called just before we write the hibernation image.
-	 *
-	 * We need to update the domain tracking to reflect that the CPU
-	 * will be accessing all the pages to create and restore from the
-	 * hibernation, and so upon restoration those pages will be in the
-	 * CPU domain.
-	 *
-	 * To make sure the hibernation image contains the latest state,
-	 * we update that state just before writing out the image.
-	 *
-	 * To try and reduce the hibernation image, we manually shrink
-	 * the objects as well, see i915_gem_freeze()
-	 */
-
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
-	i915_gem_shrink(i915, -1UL, NULL, ~0);
-	i915_gem_drain_freed_objects(i915);
-
-	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
-		i915_gem_object_lock(obj, NULL);
-		drm_WARN_ON(&i915->drm,
-			    i915_gem_object_set_to_cpu_domain(obj, true));
-		i915_gem_object_unlock(obj);
-	}
-
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
-	return 0;
-}
-
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a4cad3f154caf189c4b5731e47b1a87ebd757182..e622aee6e4be90a599f29ae3496108714580f67a 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -38,11 +38,18 @@ struct drm_i915_private;
 
 #define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
 
+#ifdef CONFIG_DRM_I915_DEBUG_GEM_ONCE
+#define __GEM_BUG(cond) BUG()
+#else
+#define __GEM_BUG(cond) \
+	WARN(1, "%s:%d GEM_BUG_ON(%s)\n", __func__, __LINE__, __stringify(cond))
+#endif
+
 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {	\
 		GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
 			      __func__, __LINE__, __stringify(condition)); \
 		GEM_TRACE_DUMP(); \
-		BUG(); \
+		__GEM_BUG(condition); \
 		} \
 	} while(0)
 #define GEM_WARN_ON(expr) WARN_ON(expr)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e1a66c8245b859f4c4d710c6cf15eaff8b529472..4d2d59a9942bd01bd39612d155aa64750f016e54 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,6 +61,17 @@ mark_free(struct drm_mm_scan *scan,
 	return drm_mm_scan_add_block(scan, &vma->node);
 }
 
+static bool defer_evict(struct i915_vma *vma)
+{
+	if (i915_vma_is_active(vma))
+		return true;
+
+	if (i915_vma_is_scanout(vma))
+		return true;
+
+	return false;
+}
+
 /**
  * i915_gem_evict_something - Evict vmas to make room for binding a new one
  * @vm: address space to evict from
@@ -150,7 +161,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 		 * To notice when we complete one full cycle, we record the
 		 * first active element seen, before moving it to the tail.
 		 */
-		if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+		if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
 			if (!active)
 				active = vma;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5ee1567f3d1be4e3ca9d81e73a698b4fac74c2c..3ee2f682eff61cf2544b3e1ece0f59cd20ff3afe 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,22 +55,17 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 			       struct sg_table *pages)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct device *kdev = &dev_priv->drm.pdev->dev;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	if (unlikely(ggtt->do_idle_maps)) {
-		/* XXX This does not prevent more requests being submitted! */
-		if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
-						     -MAX_SCHEDULE_TIMEOUT)) {
-			drm_err(&dev_priv->drm,
-				"Failed to wait for idle; VT'd may hang.\n");
-			/* Wait a bit, in hopes it avoids the hang */
-			udelay(10);
-		}
-	}
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+
+	/* XXX This does not prevent more requests being submitted! */
+	if (unlikely(ggtt->do_idle_maps))
+		/* Wait a bit, in the hope it avoids the hang */
+		usleep_range(100, 250);
 
-	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_sg(&i915->drm.pdev->dev,
+		     pages->sgl, pages->nents,
+		     PCI_DMA_BIDIRECTIONAL);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index f96032c60a12df1e4d687653134401c6e88094f2..75c3bfc2486e406f064f5b5634dbdd4cad84c762 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -154,7 +154,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
 			return -ENODEV;
 		break;
 	case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
-		value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
+		value = i915->gt.clock_frequency;
 		break;
 	case I915_PARAM_MMAP_GTT_COHERENT:
 		value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8cac4c5881fdb705d17e6dc9aa0d3e0b276301b..f962693404b79eaa63753be923980d03fd2db9ab 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -485,7 +485,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 				const char *header,
 				const struct i915_gem_context_coredump *ctx)
 {
-	const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+	const u32 period = m->i915->gt.clock_period_ns;
 
 	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
 		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -1051,7 +1051,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 		for_each_sgt_daddr(dma, iter, vma->pages) {
 			void __iomem *s;
 
-			s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+			s = io_mapping_map_wc(&mem->iomap,
+					      dma - mem->region.start,
+					      PAGE_SIZE);
 			ret = compress_page(compress,
 					    (void __force *)s, dst,
 					    true);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6cdb052e385050c1e74f45c0a0fd066649d6a3e2..1a701367a718187998aa89583c3a4c618bb62ea6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -327,10 +327,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
 	lockdep_assert_held(&dev_priv->irq_lock);
 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
 
-	val = I915_READ(PORT_HOTPLUG_EN);
+	val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
 	val &= ~mask;
 	val |= bits;
-	I915_WRITE(PORT_HOTPLUG_EN, val);
+	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
 }
 
 /**
@@ -376,8 +376,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 	if (new_val != dev_priv->irq_mask &&
 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
 		dev_priv->irq_mask = new_val;
-		I915_WRITE(DEIMR, dev_priv->irq_mask);
-		POSTING_READ(DEIMR);
+		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
+		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
 	}
 }
 
@@ -401,15 +401,15 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 		return;
 
-	old_val = I915_READ(GEN8_DE_PORT_IMR);
+	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
 
 	new_val = old_val;
 	new_val &= ~interrupt_mask;
 	new_val |= (~enabled_irq_mask & interrupt_mask);
 
 	if (new_val != old_val) {
-		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
-		POSTING_READ(GEN8_DE_PORT_IMR);
+		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
+		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
 	}
 }
 
@@ -440,8 +440,8 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 
 	if (new_val != dev_priv->de_irq_mask[pipe]) {
 		dev_priv->de_irq_mask[pipe] = new_val;
-		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
 	}
 }
 
@@ -455,7 +455,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 				  u32 interrupt_mask,
 				  u32 enabled_irq_mask)
 {
-	u32 sdeimr = I915_READ(SDEIMR);
+	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
 	sdeimr &= ~interrupt_mask;
 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
 
@@ -466,8 +466,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 		return;
 
-	I915_WRITE(SDEIMR, sdeimr);
-	POSTING_READ(SDEIMR);
+	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
+	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
 }
 
 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
@@ -533,8 +533,8 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 
-	I915_WRITE(reg, enable_mask | status_mask);
-	POSTING_READ(reg);
+	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+	intel_uncore_posting_read(&dev_priv->uncore, reg);
 }
 
 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
@@ -556,8 +556,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 
-	I915_WRITE(reg, enable_mask | status_mask);
-	POSTING_READ(reg);
+	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+	intel_uncore_posting_read(&dev_priv->uncore, reg);
 }
 
 static bool i915_has_asle(struct drm_i915_private *dev_priv)
@@ -715,28 +715,18 @@ u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
 	if (!vblank->max_vblank_count)
 		return 0;
 
-	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
+	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
 }
 
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct drm_vblank_crtc *vblank =
 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 	const struct drm_display_mode *mode = &vblank->hwmode;
-	u32 vblank_start = mode->crtc_vblank_start;
-	u32 vtotal = mode->crtc_vtotal;
 	u32 htotal = mode->crtc_htotal;
 	u32 clock = mode->crtc_clock;
-	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+	u32 scan_prev_time, scan_curr_time, scan_post_time;
 
 	/*
 	 * To avoid the race condition where we might cross into the
@@ -763,8 +753,28 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
 						  PIPE_FRMTMSTMP(crtc->pipe));
 	} while (scan_post_time != scan_prev_time);
 
-	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
-					clock), 1000 * htotal);
+	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+				   clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+	struct drm_vblank_crtc *vblank =
+		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+	const struct drm_display_mode *mode = &vblank->hwmode;
+	u32 vblank_start = mode->crtc_vblank_start;
+	u32 vtotal = mode->crtc_vtotal;
+	u32 scanline;
+
+	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
 	scanline = min(scanline, vtotal - 1);
 	scanline = (scanline + vblank_start) % vtotal;
 
@@ -883,7 +893,20 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
 	if (stime)
 		*stime = ktime_get();
 
-	if (use_scanline_counter) {
+	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+		position = __intel_get_crtc_scanline(crtc);
+
+		/*
+		 * Already exiting vblank? If so, shift our position
+		 * so it looks like we're already apporaching the full
+		 * vblank end. This should make the generated timestamp
+		 * more or less match when the active portion will start.
+		 */
+		if (position >= vbl_start && scanlines < position)
+			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+	} else if (use_scanline_counter) {
 		/* No obvious pixelcount register. Only query vertical
 		 * scanout position from Display scan line register.
 		 */
@@ -1004,9 +1027,9 @@ static void ivb_parity_work(struct work_struct *work)
 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
 		goto out;
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-	POSTING_READ(GEN7_MISCCPCTL);
+	misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
 
 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
 		i915_reg_t reg;
@@ -1020,13 +1043,13 @@ static void ivb_parity_work(struct work_struct *work)
 
 		reg = GEN7_L3CDERRST1(slice);
 
-		error_status = I915_READ(reg);
+		error_status = intel_uncore_read(&dev_priv->uncore, reg);
 		row = GEN7_PARITY_ERROR_ROW(error_status);
 		bank = GEN7_PARITY_ERROR_BANK(error_status);
 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 
-		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
-		POSTING_READ(reg);
+		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+		intel_uncore_posting_read(&dev_priv->uncore, reg);
 
 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
@@ -1047,7 +1070,7 @@ static void ivb_parity_work(struct work_struct *work)
 		kfree(parity_event[1]);
 	}
 
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
 
 out:
 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
@@ -1062,17 +1085,12 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
 	switch (pin) {
 	case HPD_PORT_TC1:
-		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
 	case HPD_PORT_TC2:
-		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
 	case HPD_PORT_TC3:
-		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
 	case HPD_PORT_TC4:
-		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
 	case HPD_PORT_TC5:
-		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
 	case HPD_PORT_TC6:
-		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
+		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
 	default:
 		return false;
 	}
@@ -1096,13 +1114,10 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
 	switch (pin) {
 	case HPD_PORT_A:
-		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
 	case HPD_PORT_B:
-		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
 	case HPD_PORT_C:
-		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
 	case HPD_PORT_D:
-		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
+		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
 	default:
 		return false;
 	}
@@ -1112,17 +1127,12 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
 	switch (pin) {
 	case HPD_PORT_TC1:
-		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
 	case HPD_PORT_TC2:
-		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
 	case HPD_PORT_TC3:
-		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
 	case HPD_PORT_TC4:
-		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
 	case HPD_PORT_TC5:
-		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
 	case HPD_PORT_TC6:
-		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
+		return val & ICP_TC_HPD_LONG_DETECT(pin);
 	default:
 		return false;
 	}
@@ -1337,7 +1347,7 @@ static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 				     enum pipe pipe)
 {
 	display_pipe_crc_irq_handler(dev_priv, pipe,
-				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
 				     0, 0, 0, 0);
 }
 
@@ -1345,11 +1355,11 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 				     enum pipe pipe)
 {
 	display_pipe_crc_irq_handler(dev_priv, pipe,
-				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
-				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
-				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
-				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
-				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
 }
 
 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -1358,19 +1368,19 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 	u32 res1, res2;
 
 	if (INTEL_GEN(dev_priv) >= 3)
-		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
 	else
 		res1 = 0;
 
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
 	else
 		res2 = 0;
 
 	display_pipe_crc_irq_handler(dev_priv, pipe,
-				     I915_READ(PIPE_CRC_RES_RED(pipe)),
-				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
-				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
+				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
 				     res1, res2);
 }
 
@@ -1379,7 +1389,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
 	enum pipe pipe;
 
 	for_each_pipe(dev_priv, pipe) {
-		I915_WRITE(PIPESTAT(pipe),
+		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
 			   PIPESTAT_INT_STATUS_MASK |
 			   PIPE_FIFO_UNDERRUN_STATUS);
 
@@ -1433,7 +1443,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 			continue;
 
 		reg = PIPESTAT(pipe);
-		pipe_stats[pipe] = I915_READ(reg) & status_mask;
+		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 
 		/*
@@ -1446,8 +1456,8 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 		 * an interrupt is still pending.
 		 */
 		if (pipe_stats[pipe]) {
-			I915_WRITE(reg, pipe_stats[pipe]);
-			I915_WRITE(reg, enable_mask);
+			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
+			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
 		}
 	}
 	spin_unlock(&dev_priv->irq_lock);
@@ -1530,6 +1540,9 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 			intel_handle_vblank(dev_priv, pipe);
 
+		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+			flip_done_handler(dev_priv, pipe);
+
 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 
@@ -1563,18 +1576,18 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 	 * bits can itself generate a new hotplug interrupt :(
 	 */
 	for (i = 0; i < 10; i++) {
-		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
 
 		if (tmp == 0)
 			return hotplug_status;
 
 		hotplug_status |= tmp;
-		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
 	}
 
 	drm_WARN_ONCE(&dev_priv->drm, 1,
 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
-		      I915_READ(PORT_HOTPLUG_STAT));
+		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
 
 	return hotplug_status;
 }
@@ -1623,9 +1636,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 		u32 hotplug_status = 0;
 		u32 ier = 0;
 
-		gt_iir = I915_READ(GTIIR);
-		pm_iir = I915_READ(GEN6_PMIIR);
-		iir = I915_READ(VLV_IIR);
+		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
+		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
+		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
 
 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
 			break;
@@ -1645,14 +1658,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
 		 * bits this time around.
 		 */
-		I915_WRITE(VLV_MASTER_IER, 0);
-		ier = I915_READ(VLV_IER);
-		I915_WRITE(VLV_IER, 0);
+		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
 
 		if (gt_iir)
-			I915_WRITE(GTIIR, gt_iir);
+			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
 		if (pm_iir)
-			I915_WRITE(GEN6_PMIIR, pm_iir);
+			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
 
 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1670,10 +1683,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
 		 */
 		if (iir)
-			I915_WRITE(VLV_IIR, iir);
+			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
 
-		I915_WRITE(VLV_IER, ier);
-		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
 
 		if (gt_iir)
 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
@@ -1710,8 +1723,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		u32 hotplug_status = 0;
 		u32 ier = 0;
 
-		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
-		iir = I915_READ(VLV_IIR);
+		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
 
 		if (master_ctl == 0 && iir == 0)
 			break;
@@ -1731,9 +1744,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
 		 * bits this time around.
 		 */
-		I915_WRITE(GEN8_MASTER_IRQ, 0);
-		ier = I915_READ(VLV_IER);
-		I915_WRITE(VLV_IER, 0);
+		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
 
 		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
 
@@ -1754,10 +1767,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
 		 */
 		if (iir)
-			I915_WRITE(VLV_IIR, iir);
+			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
 
-		I915_WRITE(VLV_IER, ier);
-		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 
 		if (hotplug_status)
 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1783,7 +1796,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
 	 * errors.
 	 */
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
 	if (!hotplug_trigger) {
 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
 			PORTD_HOTPLUG_STATUS_MASK |
@@ -1792,7 +1805,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
 		dig_hotplug_reg &= ~mask;
 	}
 
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
 	if (!hotplug_trigger)
 		return;
 
@@ -1837,7 +1850,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 		for_each_pipe(dev_priv, pipe)
 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
 				pipe_name(pipe),
-				I915_READ(FDI_RX_IIR(pipe)));
+				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
 	}
 
 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -1856,7 +1869,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 
 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
 {
-	u32 err_int = I915_READ(GEN7_ERR_INT);
+	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
 	enum pipe pipe;
 
 	if (err_int & ERR_INT_POISON)
@@ -1874,12 +1887,12 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
 		}
 	}
 
-	I915_WRITE(GEN7_ERR_INT, err_int);
+	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
 }
 
 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
 {
-	u32 serr_int = I915_READ(SERR_INT);
+	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
 	enum pipe pipe;
 
 	if (serr_int & SERR_INT_POISON)
@@ -1889,7 +1902,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
 
-	I915_WRITE(SERR_INT, serr_int);
+	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
 }
 
 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -1922,7 +1935,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 		for_each_pipe(dev_priv, pipe)
 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
 				pipe_name(pipe),
-				I915_READ(FDI_RX_IIR(pipe)));
+				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
 	}
 
 	if (pch_iir & SDE_ERROR_CPT)
@@ -1938,8 +1951,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 	if (ddi_hotplug_trigger) {
 		u32 dig_hotplug_reg;
 
-		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
-		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
+		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
 
 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 				   ddi_hotplug_trigger, dig_hotplug_reg,
@@ -1950,8 +1963,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 	if (tc_hotplug_trigger) {
 		u32 dig_hotplug_reg;
 
-		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
-		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
+		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
 
 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 				   tc_hotplug_trigger, dig_hotplug_reg,
@@ -1976,8 +1989,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 	if (hotplug_trigger) {
 		u32 dig_hotplug_reg;
 
-		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 				   hotplug_trigger, dig_hotplug_reg,
@@ -1988,8 +2001,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 	if (hotplug2_trigger) {
 		u32 dig_hotplug_reg;
 
-		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
-		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
+		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
 
 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 				   hotplug2_trigger, dig_hotplug_reg,
@@ -2009,8 +2022,8 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
 {
 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
-	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
-	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
+	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
 
 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 			   hotplug_trigger, dig_hotplug_reg,
@@ -2042,6 +2055,9 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 		if (de_iir & DE_PIPE_VBLANK(pipe))
 			intel_handle_vblank(dev_priv, pipe);
 
+		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+			flip_done_handler(dev_priv, pipe);
+
 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
@@ -2051,7 +2067,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 
 	/* check event from PCH */
 	if (de_iir & DE_PCH_EVENT) {
-		u32 pch_iir = I915_READ(SDEIIR);
+		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
 
 		if (HAS_PCH_CPT(dev_priv))
 			cpt_irq_handler(dev_priv, pch_iir);
@@ -2059,7 +2075,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 			ibx_irq_handler(dev_priv, pch_iir);
 
 		/* should clear PCH hotplug event before clear CPU irq */
-		I915_WRITE(SDEIIR, pch_iir);
+		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
 	}
 
 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
@@ -2079,10 +2095,10 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 		ivb_err_int_handler(dev_priv);
 
 	if (de_iir & DE_EDP_PSR_INT_HSW) {
-		u32 psr_iir = I915_READ(EDP_PSR_IIR);
+		u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
 
 		intel_psr_irq_handler(dev_priv, psr_iir);
-		I915_WRITE(EDP_PSR_IIR, psr_iir);
+		intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
 	}
 
 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2092,18 +2108,21 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 		intel_opregion_asle_intr(dev_priv);
 
 	for_each_pipe(dev_priv, pipe) {
-		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
 			intel_handle_vblank(dev_priv, pipe);
+
+		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+			flip_done_handler(dev_priv, pipe);
 	}
 
 	/* check event from PCH */
 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
-		u32 pch_iir = I915_READ(SDEIIR);
+		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
 
 		cpt_irq_handler(dev_priv, pch_iir);
 
 		/* clear PCH hotplug event before clear CPU irq */
-		I915_WRITE(SDEIIR, pch_iir);
+		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
 	}
 }
 
@@ -2190,8 +2209,8 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
 {
 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 			   hotplug_trigger, dig_hotplug_reg,
@@ -2210,8 +2229,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 	if (trigger_tc) {
 		u32 dig_hotplug_reg;
 
-		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
-		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
+		intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
 
 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 				   trigger_tc, dig_hotplug_reg,
@@ -2222,8 +2241,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 	if (trigger_tbt) {
 		u32 dig_hotplug_reg;
 
-		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
-		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
+		intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
 
 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
 				   trigger_tbt, dig_hotplug_reg,
@@ -2300,8 +2319,8 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 		else
 			iir_reg = EDP_PSR_IIR;
 
-		psr_iir = I915_READ(iir_reg);
-		I915_WRITE(iir_reg, psr_iir);
+		psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+		intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
 
 		if (psr_iir)
 			found = true;
@@ -2325,7 +2344,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
 	 * Incase of dual link, TE comes from DSI_1
 	 * this is to check if dual link is enabled
 	 */
-	val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
+	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
 	val &= PORT_SYNC_MODE_ENABLE;
 
 	/*
@@ -2337,7 +2356,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
 
 	/* Check if DSI configured in command mode */
-	val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
 	val = val & OP_MODE_MASK;
 
 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -2346,7 +2365,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
 	}
 
 	/* Get PIPE for handling VBLANK event */
-	val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
 	case TRANS_DDI_EDP_INPUT_A_ON:
 		pipe = PIPE_A;
@@ -2366,8 +2385,16 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
 
 	/* clear TE in dsi IIR */
 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
-	tmp = I915_READ(DSI_INTR_IDENT_REG(port));
-	I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+}
+
+static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+{
+	if (INTEL_GEN(i915) >= 9)
+		return GEN9_PIPE_PLANE1_FLIP_DONE;
+	else
+		return GEN8_PIPE_PRIMARY_FLIP_DONE;
 }
 
 static irqreturn_t
@@ -2378,9 +2405,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 	enum pipe pipe;
 
 	if (master_ctl & GEN8_DE_MISC_IRQ) {
-		iir = I915_READ(GEN8_DE_MISC_IIR);
+		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
 		if (iir) {
-			I915_WRITE(GEN8_DE_MISC_IIR, iir);
+			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
 			ret = IRQ_HANDLED;
 			gen8_de_misc_irq_handler(dev_priv, iir);
 		} else {
@@ -2390,9 +2417,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 	}
 
 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
-		iir = I915_READ(GEN11_DE_HPD_IIR);
+		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
 		if (iir) {
-			I915_WRITE(GEN11_DE_HPD_IIR, iir);
+			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
 			ret = IRQ_HANDLED;
 			gen11_hpd_irq_handler(dev_priv, iir);
 		} else {
@@ -2402,11 +2429,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 	}
 
 	if (master_ctl & GEN8_DE_PORT_IRQ) {
-		iir = I915_READ(GEN8_DE_PORT_IIR);
+		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
 		if (iir) {
 			bool found = false;
 
-			I915_WRITE(GEN8_DE_PORT_IIR, iir);
+			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
 			ret = IRQ_HANDLED;
 
 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
@@ -2459,7 +2486,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
 			continue;
 
-		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
 		if (!iir) {
 			drm_err(&dev_priv->drm,
 				"The master control interrupt lied (DE PIPE)!\n");
@@ -2467,12 +2494,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 		}
 
 		ret = IRQ_HANDLED;
-		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
+		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
 
 		if (iir & GEN8_PIPE_VBLANK)
 			intel_handle_vblank(dev_priv, pipe);
 
-		if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
+		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
 			flip_done_handler(dev_priv, pipe);
 
 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
@@ -2496,9 +2523,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 		 * scheme also closed the SDE interrupt handling race we've seen
 		 * on older pch-split platforms. But this needs testing.
 		 */
-		iir = I915_READ(SDEIIR);
+		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
 		if (iir) {
-			I915_WRITE(SDEIIR, iir);
+			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
 			ret = IRQ_HANDLED;
 
 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
@@ -2741,7 +2768,7 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
 	 * only when vblank interrupts are actually enabled.
 	 */
 	if (dev_priv->vblank_enabled++ == 0)
-		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
 
 	return i8xx_enable_vblank(crtc);
 }
@@ -2798,16 +2825,16 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
 	else
 		port = PORT_A;
 
-	tmp =  I915_READ(DSI_INTR_MASK_REG(port));
+	tmp =  intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
 	if (enable)
 		tmp &= ~DSI_TE_EVENT;
 	else
 		tmp |= DSI_TE_EVENT;
 
-	I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
+	intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
 
-	tmp = I915_READ(DSI_INTR_IDENT_REG(port));
-	I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
 
 	return true;
 }
@@ -2835,19 +2862,6 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
 	return 0;
 }
 
-void skl_enable_flip_done(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-	enum pipe pipe = crtc->pipe;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&i915->irq_lock, irqflags);
-
-	bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
-	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
@@ -2869,7 +2883,7 @@ void i915gm_disable_vblank(struct drm_crtc *crtc)
 	i8xx_disable_vblank(crtc);
 
 	if (--dev_priv->vblank_enabled == 0)
-		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
 }
 
 void i965_disable_vblank(struct drm_crtc *crtc)
@@ -2912,19 +2926,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void skl_disable_flip_done(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-	enum pipe pipe = crtc->pipe;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&i915->irq_lock, irqflags);
-
-	bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
-	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2935,7 +2936,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
 	GEN3_IRQ_RESET(uncore, SDE);
 
 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
-		I915_WRITE(SERR_INT, 0xffffffff);
+		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
 }
 
 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -2948,7 +2949,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
 
 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
-	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
 
 	i9xx_pipestat_irq_reset(dev_priv);
 
@@ -3011,8 +3012,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
 
 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(VLV_MASTER_IER, 0);
-	POSTING_READ(VLV_MASTER_IER);
+	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
 
 	gen5_gt_irq_reset(&dev_priv->gt);
 
@@ -3117,13 +3118,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
-
-	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+		gen8_de_pipe_flip_done_mask(dev_priv);
 	enum pipe pipe;
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
-
 	spin_lock_irq(&dev_priv->irq_lock);
 
 	if (!intel_irqs_enabled(dev_priv)) {
@@ -3165,8 +3163,8 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
-	I915_WRITE(GEN8_MASTER_IRQ, 0);
-	POSTING_READ(GEN8_MASTER_IRQ);
+	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
 
 	gen8_gt_irq_reset(&dev_priv->gt);
 
@@ -3212,7 +3210,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
 	 * duration to 2ms (which is the minimum in the Display Port spec).
 	 * The pulse duration bits are reserved on LPT+.
 	 */
-	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
 	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
 		     PORTB_HOTPLUG_ENABLE |
 		     PORTC_HOTPLUG_ENABLE |
@@ -3221,7 +3219,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
 		     PORTC_PULSE_DURATION_MASK |
 		     PORTD_PULSE_DURATION_MASK);
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
-	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
 }
 
 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3270,20 +3268,20 @@ static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug;
 
-	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
 	hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
 		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
 		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
 		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
-	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
 }
 
 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug;
 
-	hotplug = I915_READ(SHOTPLUG_CTL_TC);
+	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
 	hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
@@ -3291,7 +3289,7 @@ static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
-	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
 }
 
 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3302,7 +3300,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
 	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
-		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3330,12 +3328,12 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	val = I915_READ(SOUTH_CHICKEN1);
+	val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
 	val |= (INVERT_DDIA_HPD |
 		INVERT_DDIB_HPD |
 		INVERT_DDIC_HPD |
 		INVERT_DDID_HPD);
-	I915_WRITE(SOUTH_CHICKEN1, val);
+	intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
 
 	icp_hpd_irq_setup(dev_priv);
 }
@@ -3344,7 +3342,7 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug;
 
-	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
 	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3352,14 +3350,14 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
-	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+	intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
 }
 
 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug;
 
-	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
 	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3367,7 +3365,7 @@ static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
-	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+	intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
 }
 
 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3378,11 +3376,11 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 
-	val = I915_READ(GEN11_DE_HPD_IMR);
+	val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
 	val &= ~hotplug_irqs;
 	val |= ~enabled_irqs & hotplug_irqs;
-	I915_WRITE(GEN11_DE_HPD_IMR, val);
-	POSTING_READ(GEN11_DE_HPD_IMR);
+	intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
+	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
 
 	gen11_tc_hpd_detection_setup(dev_priv);
 	gen11_tbt_hpd_detection_setup(dev_priv);
@@ -3425,25 +3423,25 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 
 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
 	if (HAS_PCH_CNP(dev_priv)) {
-		val = I915_READ(SOUTH_CHICKEN1);
+		val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
-		I915_WRITE(SOUTH_CHICKEN1, val);
+		intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
 	}
 
 	/* Enable digital hotplug on the PCH */
-	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
 	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
 		     PORTB_HOTPLUG_ENABLE |
 		     PORTC_HOTPLUG_ENABLE |
 		     PORTD_HOTPLUG_ENABLE);
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
-	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
 
-	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
 	hotplug &= ~PORTE_HOTPLUG_ENABLE;
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
-	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
 }
 
 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3451,7 +3449,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	u32 hotplug_irqs, enabled_irqs;
 
 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
-		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
@@ -3482,11 +3480,11 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
 	 * duration to 2ms (which is the minimum in the Display Port spec)
 	 * The pulse duration bits are reserved on HSW+.
 	 */
-	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+	hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
 	hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
 		     DIGITAL_PORTA_PULSE_DURATION_MASK);
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
-	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
 }
 
 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3536,7 +3534,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug;
 
-	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
 	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
 		     PORTB_HOTPLUG_ENABLE |
 		     PORTC_HOTPLUG_ENABLE |
@@ -3544,7 +3542,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 		     BXT_DDIB_HPD_INVERT |
 		     BXT_DDIC_HPD_INVERT);
 	hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
-	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
 }
 
 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3598,6 +3596,9 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
 			      DE_DP_A_HOTPLUG_IVB);
 	} else {
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
@@ -3605,6 +3606,8 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
 				DE_PIPEA_CRC_DONE | DE_POISON);
 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+			      DE_PLANE_FLIP_DONE(PLANE_A) |
+			      DE_PLANE_FLIP_DONE(PLANE_B) |
 			      DE_DP_A_HOTPLUG);
 	}
 
@@ -3664,8 +3667,8 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
 		vlv_display_irq_postinstall(dev_priv);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-	POSTING_READ(VLV_MASTER_IER);
+	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
 }
 
 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3695,11 +3698,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 			de_port_masked |= DSI0_TE | DSI1_TE;
 	}
 
-	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
-					   GEN8_PIPE_FIFO_UNDERRUN;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
+	de_pipe_enables = de_pipe_masked |
+		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+		gen8_de_pipe_flip_done_mask(dev_priv);
 
 	de_port_enables = de_port_masked;
 	if (IS_GEN9_LP(dev_priv))
@@ -3778,14 +3779,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
 
 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
 
-	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
 	if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
 		dg1_master_intr_enable(uncore->regs);
-		POSTING_READ(DG1_MSTR_UNIT_INTR);
+		intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
 	} else {
 		gen11_master_intr_enable(uncore->regs);
-		POSTING_READ(GEN11_GFX_MSTR_IRQ);
+		intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
 	}
 }
 
@@ -3798,8 +3799,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
 		vlv_display_irq_postinstall(dev_priv);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
-	POSTING_READ(GEN8_MASTER_IRQ);
+	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
 }
 
 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
@@ -3889,11 +3890,11 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
 {
 	u32 emr;
 
-	*eir = I915_READ(EIR);
+	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
 
-	I915_WRITE(EIR, *eir);
+	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
 
-	*eir_stuck = I915_READ(EIR);
+	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
 	if (*eir_stuck == 0)
 		return;
 
@@ -3907,9 +3908,9 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
 	 * (or by a GPU reset) so we mask any bit that
 	 * remains set.
 	 */
-	emr = I915_READ(EMR);
-	I915_WRITE(EMR, 0xffffffff);
-	I915_WRITE(EMR, emr | *eir_stuck);
+	emr = intel_uncore_read(&dev_priv->uncore, EMR);
+	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
+	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
 }
 
 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -3975,7 +3976,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
 
 	if (I915_HAS_HOTPLUG(dev_priv)) {
 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
-		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
 	}
 
 	i9xx_pipestat_irq_reset(dev_priv);
@@ -3989,7 +3990,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 enable_mask;
 
-	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+	intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
 			  I915_ERROR_MEMORY_REFRESH));
 
 	/* Unmask the interrupts that we always want on. */
@@ -4042,7 +4043,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 		u32 hotplug_status = 0;
 		u32 iir;
 
-		iir = I915_READ(GEN2_IIR);
+		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
 		if (iir == 0)
 			break;
 
@@ -4059,7 +4060,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
 
-		I915_WRITE(GEN2_IIR, iir);
+		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
 			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
@@ -4085,7 +4086,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
-	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
 
 	i9xx_pipestat_irq_reset(dev_priv);
 
@@ -4112,7 +4113,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
 		error_mask = ~(I915_ERROR_PAGE_TABLE |
 			       I915_ERROR_MEMORY_REFRESH);
 	}
-	I915_WRITE(EMR, error_mask);
+	intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
 
 	/* Unmask the interrupts that we always want on. */
 	dev_priv->irq_mask =
@@ -4188,7 +4189,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 		u32 hotplug_status = 0;
 		u32 iir;
 
-		iir = I915_READ(GEN2_IIR);
+		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
 		if (iir == 0)
 			break;
 
@@ -4204,7 +4205,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
 
-		I915_WRITE(GEN2_IIR, iir);
+		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
 			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 2efe609519ca23f3887f796358f4a6bd25047ade..25f25cd9581813286fd0adf7da346e15573e89f3 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -118,9 +118,6 @@ void i965_disable_vblank(struct drm_crtc *crtc);
 void ilk_disable_vblank(struct drm_crtc *crtc);
 void bdw_disable_vblank(struct drm_crtc *crtc);
 
-void skl_enable_flip_done(struct intel_crtc *crtc);
-void skl_disable_flip_done(struct intel_crtc *crtc);
-
 void gen2_irq_reset(struct intel_uncore *uncore);
 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
 		    i915_reg_t iir, i915_reg_t ier);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 43039dc8c607415577bede43c9d44206331bfb39..666808cb3a32609f78858205933b43e85a252576 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -62,7 +62,7 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
 {
 	struct remap_pfn *r = data;
 
-	if (GEM_WARN_ON(!r->sgt.pfn))
+	if (GEM_WARN_ON(!r->sgt.sgp))
 		return -EINVAL;
 
 	/* Special PTE are not associated with any struct page */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7f139ea4a90b205832791269a19b95569978dc41..6939634e56ed69fcfdcfd716bb1e7dc4d3f1007e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -185,7 +185,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
 
 i915_param_named(enable_dpcd_backlight, int, 0400,
 	"Enable support for DPCD backlight control"
-	"(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
+	"(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT)
 i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 330c03e2b4f7051a2dae20e0912a4c930c997f1f..f031966af5b700edc59fa830b649cc75d1bc822a 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -32,6 +32,7 @@ struct drm_printer;
 
 #define ENABLE_GUC_SUBMISSION		BIT(0)
 #define ENABLE_GUC_LOAD_HUC		BIT(1)
+#define ENABLE_GUC_MASK			GENMASK(1, 0)
 
 /*
  * Invoke param, a function-like macro, for each i915 param, with arguments:
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 11fe790b19690b7897fce684f0dd559dbc6ba6a0..020b5f561f07dcaa886a053ebffaa1fac411102b 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -455,6 +455,7 @@ static const struct intel_device_info snb_m_gt2_info = {
 	.has_llc = 1, \
 	.has_rc6 = 1, \
 	.has_rc6p = 1, \
+	.has_reset_engine = true, \
 	.has_rps = true, \
 	.dma_mask_size = 40, \
 	.ppgtt_type = INTEL_PPGTT_ALIASING, \
@@ -513,6 +514,7 @@ static const struct intel_device_info vlv_info = {
 	.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
 	.has_runtime_pm = 1,
 	.has_rc6 = 1,
+	.has_reset_engine = true,
 	.has_rps = true,
 	.display.has_gmch = 1,
 	.display.has_hotplug = 1,
@@ -571,8 +573,7 @@ static const struct intel_device_info hsw_gt3_info = {
 	.dma_mask_size = 39, \
 	.ppgtt_type = INTEL_PPGTT_FULL, \
 	.ppgtt_size = 48, \
-	.has_64bit_reloc = 1, \
-	.has_reset_engine = 1
+	.has_64bit_reloc = 1
 
 #define BDW_PLATFORM \
 	GEN8_FEATURES, \
@@ -639,7 +640,6 @@ static const struct intel_device_info chv_info = {
 	GEN8_FEATURES, \
 	GEN(9), \
 	GEN9_DEFAULT_PAGE_SIZES, \
-	.has_logical_ring_preemption = 1, \
 	.display.has_csr = 1, \
 	.has_gt_uc = 1, \
 	.display.has_hdcp = 1, \
@@ -700,7 +700,6 @@ static const struct intel_device_info skl_gt4_info = {
 	.has_rps = true, \
 	.display.has_dp_mst = 1, \
 	.has_logical_ring_contexts = 1, \
-	.has_logical_ring_preemption = 1, \
 	.has_gt_uc = 1, \
 	.dma_mask_size = 39, \
 	.ppgtt_type = INTEL_PPGTT_FULL, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 649c26518d26dd6e811e56a11030051240549d00..112ba5f2ce90e9334e0733769dd533901ce9719d 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -198,8 +198,11 @@
 #include "gem/i915_gem_context.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_lrc.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
@@ -1635,7 +1638,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
 	struct drm_i915_gem_object *bo;
 	struct i915_vma *vma;
 	const u64 delay_ticks = 0xffffffffffffffff -
-		i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
+		intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
+					      atomic64_read(&stream->perf->noa_programming_delay));
 	const u32 base = stream->engine->mmio_base;
 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
 	u32 *batch, *ts0, *cs, *jump;
@@ -3516,7 +3520,8 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
 
 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
 {
-	return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
+	return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+					     2ULL << exponent);
 }
 
 /**
@@ -4370,11 +4375,11 @@ void i915_perf_init(struct drm_i915_private *i915)
 	if (perf->ops.enable_metric_set) {
 		mutex_init(&perf->lock);
 
-		oa_sample_rate_hard_limit =
-			RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
+		/* Choose a representative limit */
+		oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
 
 		mutex_init(&perf->metrics_lock);
-		idr_init(&perf->metrics_idr);
+		idr_init_base(&perf->metrics_idr, 1);
 
 		/* We set up some ratelimit state to potentially throttle any
 		 * _NOTES about spurious, invalid OA reports which we don't
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 9856479b56d8bf17ea4f2731601b7d0cc7177cb4..2b88c0baa1bf5e684faf4f409cc14a6967279372 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -26,8 +26,6 @@
 	 BIT(I915_SAMPLE_WAIT) | \
 	 BIT(I915_SAMPLE_SEMA))
 
-#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
-
 static cpumask_t i915_pmu_cpumask;
 static unsigned int i915_pmu_target_cpu = -1;
 
@@ -56,17 +54,42 @@ static bool is_engine_config(u64 config)
 	return config < __I915_PMU_OTHER(0);
 }
 
-static unsigned int config_enabled_bit(u64 config)
+static unsigned int other_bit(const u64 config)
+{
+	unsigned int val;
+
+	switch (config) {
+	case I915_PMU_ACTUAL_FREQUENCY:
+		val =  __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+		break;
+	case I915_PMU_REQUESTED_FREQUENCY:
+		val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+		break;
+	case I915_PMU_RC6_RESIDENCY:
+		val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+		break;
+	default:
+		/*
+		 * Events that do not require sampling, or tracking state
+		 * transitions between enabled and disabled can be ignored.
+		 */
+		return -1;
+	}
+
+	return I915_ENGINE_SAMPLE_COUNT + val;
+}
+
+static unsigned int config_bit(const u64 config)
 {
 	if (is_engine_config(config))
 		return engine_config_sample(config);
 	else
-		return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+		return other_bit(config);
 }
 
-static u64 config_enabled_mask(u64 config)
+static u64 config_mask(u64 config)
 {
-	return BIT_ULL(config_enabled_bit(config));
+	return BIT_ULL(config_bit(config));
 }
 
 static bool is_engine_event(struct perf_event *event)
@@ -74,15 +97,15 @@ static bool is_engine_event(struct perf_event *event)
 	return is_engine_config(event->attr.config);
 }
 
-static unsigned int event_enabled_bit(struct perf_event *event)
+static unsigned int event_bit(struct perf_event *event)
 {
-	return config_enabled_bit(event->attr.config);
+	return config_bit(event->attr.config);
 }
 
 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
 {
 	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
-	u64 enable;
+	u32 enable;
 
 	/*
 	 * Only some counters need the sampling timer.
@@ -95,8 +118,8 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
 	 * Mask out all the ones which do not need the timer, or in
 	 * other words keep all the ones that could need the timer.
 	 */
-	enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
-		  config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+	enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+		  config_mask(I915_PMU_REQUESTED_FREQUENCY) |
 		  ENGINE_SAMPLE_MASK;
 
 	/*
@@ -137,11 +160,9 @@ static u64 __get_rc6(struct intel_gt *gt)
 	return val;
 }
 
-#if IS_ENABLED(CONFIG_PM)
-
-static inline s64 ktime_since(const ktime_t kt)
+static inline s64 ktime_since_raw(const ktime_t kt)
 {
-	return ktime_to_ns(ktime_sub(ktime_get(), kt));
+	return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
 }
 
 static u64 get_rc6(struct intel_gt *gt)
@@ -170,7 +191,7 @@ static u64 get_rc6(struct intel_gt *gt)
 		 * on top of the last known real value, as the approximated RC6
 		 * counter value.
 		 */
-		val = ktime_since(pmu->sleep_last);
+		val = ktime_since_raw(pmu->sleep_last);
 		val += pmu->sample[__I915_SAMPLE_RC6].cur;
 	}
 
@@ -193,7 +214,7 @@ static void init_rc6(struct i915_pmu *pmu)
 		pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
 		pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
 					pmu->sample[__I915_SAMPLE_RC6].cur;
-		pmu->sleep_last = ktime_get();
+		pmu->sleep_last = ktime_get_raw();
 	}
 }
 
@@ -202,21 +223,9 @@ static void park_rc6(struct drm_i915_private *i915)
 	struct i915_pmu *pmu = &i915->pmu;
 
 	pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
-	pmu->sleep_last = ktime_get();
-}
-
-#else
-
-static u64 get_rc6(struct intel_gt *gt)
-{
-	return __get_rc6(gt);
+	pmu->sleep_last = ktime_get_raw();
 }
 
-static void init_rc6(struct i915_pmu *pmu) { }
-static void park_rc6(struct drm_i915_private *i915) {}
-
-#endif
-
 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
 {
 	if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
@@ -355,8 +364,8 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
 static bool frequency_sampling_enabled(struct i915_pmu *pmu)
 {
 	return pmu->enable &
-	       (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
-		config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+	       (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+		config_mask(I915_PMU_REQUESTED_FREQUENCY));
 }
 
 static void
@@ -374,7 +383,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 	if (!intel_gt_pm_get_if_awake(gt))
 		return;
 
-	if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+	if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
 		u32 val;
 
 		/*
@@ -396,7 +405,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 				intel_gpu_freq(rps, val), period_ns / 1000);
 	}
 
-	if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+	if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
 				intel_gpu_freq(rps, rps->cur_freq),
 				period_ns / 1000);
@@ -483,6 +492,8 @@ config_status(struct drm_i915_private *i915, u64 config)
 		if (!HAS_RC6(i915))
 			return -ENODEV;
 		break;
+	case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+		break;
 	default:
 		return -ENOENT;
 	}
@@ -590,6 +601,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 		case I915_PMU_RC6_RESIDENCY:
 			val = get_rc6(&i915->gt);
 			break;
+		case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+			val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+			break;
 		}
 	}
 
@@ -622,9 +636,13 @@ static void i915_pmu_enable(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
-	unsigned int bit = event_enabled_bit(event);
 	struct i915_pmu *pmu = &i915->pmu;
 	unsigned long flags;
+	unsigned int bit;
+
+	bit = event_bit(event);
+	if (bit == -1)
+		goto update;
 
 	spin_lock_irqsave(&pmu->lock, flags);
 
@@ -670,6 +688,7 @@ static void i915_pmu_enable(struct perf_event *event)
 
 	spin_unlock_irqrestore(&pmu->lock, flags);
 
+update:
 	/*
 	 * Store the current counter value so we can report the correct delta
 	 * for all listeners. Even when the event was already enabled and has
@@ -682,10 +701,13 @@ static void i915_pmu_disable(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
-	unsigned int bit = event_enabled_bit(event);
+	unsigned int bit = event_bit(event);
 	struct i915_pmu *pmu = &i915->pmu;
 	unsigned long flags;
 
+	if (bit == -1)
+		return;
+
 	spin_lock_irqsave(&pmu->lock, flags);
 
 	if (is_engine_event(event)) {
@@ -882,6 +904,7 @@ create_event_attributes(struct i915_pmu *pmu)
 		__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
 		__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
 		__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+		__event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
 	};
 	static const struct {
 		enum drm_i915_pmu_engine_sample sample;
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8405d6da5b9a65ef0a9a78a7044b9bb098c18b93..60f9595f902cdf651c81d4a426f26d28e161649a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -14,6 +14,21 @@
 
 struct drm_i915_private;
 
+/**
+ * Non-engine events that we need to track enabled-disabled transition and
+ * current state.
+ */
+enum i915_pmu_tracked_events {
+	__I915_PMU_ACTUAL_FREQUENCY_ENABLED = 0,
+	__I915_PMU_REQUESTED_FREQUENCY_ENABLED,
+	__I915_PMU_RC6_RESIDENCY_ENABLED,
+	__I915_PMU_TRACKED_EVENT_COUNT, /* count marker */
+};
+
+/**
+ * Slots used from the sampling timer (non-engine events) with some extras for
+ * convenience.
+ */
 enum {
 	__I915_SAMPLE_FREQ_ACT = 0,
 	__I915_SAMPLE_FREQ_REQ,
@@ -28,8 +43,7 @@ enum {
  * It is also used to know to needed number of event reference counters.
  */
 #define I915_PMU_MASK_BITS \
-	((1 << I915_PMU_SAMPLE_BITS) + \
-	 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+	(I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT)
 
 #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
 
@@ -66,18 +80,17 @@ struct i915_pmu {
 	 */
 	struct hrtimer timer;
 	/**
-	 * @enable: Bitmask of all currently enabled events.
+	 * @enable: Bitmask of specific enabled events.
+	 *
+	 * For some events we need to track their state and do some internal
+	 * house keeping.
 	 *
-	 * Bits are derived from uAPI event numbers in a way that low 16 bits
-	 * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
-	 * bit 0), and higher bits correspond to other events (for instance
-	 * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+	 * Each engine event sampler type and event listed in enum
+	 * i915_pmu_tracked_events gets a bit in this field.
 	 *
-	 * In other words, low 16 bits are not per engine but per engine
-	 * sampler type, while the upper bits are directly mapped to other
-	 * event types.
+	 * Low bits are engine samplers and other events continue from there.
 	 */
-	u64 enable;
+	u32 enable;
 
 	/**
 	 * @timer_last:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5375b219cc3b7c6f7a45046e5e778edc8fe1c866..7146cd0f32560cc1ec248d347a267515899586b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define HDPORT_STATE			_MMIO(0x45050)
 #define   HDPORT_DPLL_USED_MASK		REG_GENMASK(14, 12)
-#define   HDPORT_PHY_USED_DP(phy)	REG_BIT(2 * (phy) + 2)
-#define   HDPORT_PHY_USED_HDMI(phy)	REG_BIT(2 * (phy) + 1)
+#define   HDPORT_DDI_USED(phy)		REG_BIT(2 * (phy) + 1)
 #define   HDPORT_ENABLED		REG_BIT(0)
 
 /* Make render/texture TLB fetches lower priorty than associated data
@@ -4347,12 +4346,13 @@ enum {
 #define _TRANS_VRR_CTL_B		0x61420
 #define _TRANS_VRR_CTL_C		0x62420
 #define _TRANS_VRR_CTL_D		0x63420
-#define TRANS_VRR_CTL(trans)		_MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
-#define   VRR_CTL_VRR_ENABLE		REG_BIT(31)
-#define   VRR_CTL_IGN_MAX_SHIFT		REG_BIT(30)
-#define   VRR_CTL_FLIP_LINE_EN		REG_BIT(29)
-#define   VRR_CTL_LINE_COUNT_MASK	REG_GENMASK(10, 3)
-#define   VRR_CTL_SW_FULLLINE_COUNT	REG_BIT(0)
+#define TRANS_VRR_CTL(trans)			_MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define   VRR_CTL_VRR_ENABLE			REG_BIT(31)
+#define   VRR_CTL_IGN_MAX_SHIFT			REG_BIT(30)
+#define   VRR_CTL_FLIP_LINE_EN			REG_BIT(29)
+#define   VRR_CTL_PIPELINE_FULL_MASK		REG_GENMASK(10, 3)
+#define   VRR_CTL_PIPELINE_FULL(x)		REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define   VRR_CTL_PIPELINE_FULL_OVERRIDE	REG_BIT(0)
 
 #define _TRANS_VRR_VMAX_A		0x60424
 #define _TRANS_VRR_VMAX_B		0x61424
@@ -6578,6 +6578,7 @@ enum {
 #define TGL_CURSOR_D_OFFSET 0x73080
 
 /* Display A control */
+#define _DSPAADDR_VLV				0x7017C /* vlv/chv */
 #define _DSPACNTR				0x70180
 #define   DISPLAY_PLANE_ENABLE			(1 << 31)
 #define   DISPLAY_PLANE_DISABLE			0
@@ -6614,6 +6615,7 @@ enum {
 #define   DISPPLANE_ROTATE_180			(1 << 15)
 #define   DISPPLANE_TRICKLE_FEED_DISABLE	(1 << 14) /* Ironlake */
 #define   DISPPLANE_TILED			(1 << 10)
+#define   DISPPLANE_ASYNC_FLIP			(1 << 9) /* g4x+ */
 #define   DISPPLANE_MIRROR			(1 << 8) /* CHV pipe B */
 #define _DSPAADDR				0x70184
 #define _DSPASTRIDE				0x70188
@@ -6625,6 +6627,7 @@ enum {
 #define _DSPASURFLIVE				0x701AC
 #define _DSPAGAMC				0x701E0
 
+#define DSPADDR_VLV(plane)	_MMIO_PIPE2(plane, _DSPAADDR_VLV)
 #define DSPCNTR(plane)		_MMIO_PIPE2(plane, _DSPACNTR)
 #define DSPADDR(plane)		_MMIO_PIPE2(plane, _DSPAADDR)
 #define DSPSTRIDE(plane)	_MMIO_PIPE2(plane, _DSPASTRIDE)
@@ -7070,6 +7073,8 @@ enum {
 #define _PLANE_KEYMAX_1_A			0x701a0
 #define _PLANE_KEYMAX_2_A			0x702a0
 #define  PLANE_KEYMAX_ALPHA(a)			((a) << 24)
+#define _PLANE_CC_VAL_1_A			0x701b4
+#define _PLANE_CC_VAL_2_A			0x702b4
 #define _PLANE_AUX_DIST_1_A			0x701c0
 #define _PLANE_AUX_DIST_2_A			0x702c0
 #define _PLANE_AUX_OFFSET_1_A			0x701c4
@@ -7111,6 +7116,13 @@ enum {
 #define _PLANE_NV12_BUF_CFG_1_A		0x70278
 #define _PLANE_NV12_BUF_CFG_2_A		0x70378
 
+#define _PLANE_CC_VAL_1_B			0x711b4
+#define _PLANE_CC_VAL_2_B			0x712b4
+#define _PLANE_CC_VAL_1(pipe)	_PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
+#define _PLANE_CC_VAL_2(pipe)	_PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
+#define PLANE_CC_VAL(pipe, plane)	\
+	_MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+
 /* Input CSC Register Definitions */
 #define _PLANE_INPUT_CSC_RY_GY_1_A	0x701E0
 #define _PLANE_INPUT_CSC_RY_GY_2_A	0x702E0
@@ -8213,6 +8225,7 @@ enum {
 #define  GEN11_LQSC_CLEAN_EVICT_DISABLE		(1 << 6)
 #define  GEN8_LQSC_RO_PERF_DIS			(1 << 27)
 #define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1 << 21)
+#define  GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0				_MMIO(0x7300)
@@ -9872,6 +9885,7 @@ enum skl_power_gate {
 					  _PORTD_HDCP2_BASE, \
 					  _PORTE_HDCP2_BASE, \
 					  _PORTF_HDCP2_BASE) + (x))
+
 #define PORT_HDCP2_AUTH(port)		_PORT_HDCP2_BASE(port, 0x98)
 #define _TRANSA_HDCP2_AUTH		0x66498
 #define _TRANSB_HDCP2_AUTH		0x66598
@@ -9911,6 +9925,44 @@ enum skl_power_gate {
 					 TRANS_HDCP2_STATUS(trans) : \
 					 PORT_HDCP2_STATUS(port))
 
+#define _PIPEA_HDCP2_STREAM_STATUS	0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS	0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS	0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS	0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe)		_MMIO(_PICK((pipe), \
+						      _PIPEA_HDCP2_STREAM_STATUS, \
+						      _PIPEB_HDCP2_STREAM_STATUS, \
+						      _PIPEC_HDCP2_STREAM_STATUS, \
+						      _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS		0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS		0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans)	_MMIO_TRANS(trans, \
+						    _TRANSA_HDCP2_STREAM_STATUS, \
+						    _TRANSB_HDCP2_STREAM_STATUS)
+#define   STREAM_ENCRYPTION_STATUS	BIT(31)
+#define   STREAM_TYPE_STATUS		BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+					(INTEL_GEN(dev_priv) >= 12 ? \
+					 TRANS_HDCP2_STREAM_STATUS(trans) : \
+					 PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM		0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM		0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port)	_MMIO_PORT(port, \
+						   _PORTA_HDCP2_AUTH_STREAM, \
+						   _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM		0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM		0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans)	_MMIO_TRANS(trans, \
+						    _TRANSA_HDCP2_AUTH_STREAM, \
+						    _TRANSB_HDCP2_AUTH_STREAM)
+#define   AUTH_STREAM_TYPE		BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+					(INTEL_GEN(dev_priv) >= 12 ? \
+					 TRANS_HDCP2_AUTH_STREAM(trans) : \
+					 PORT_HDCP2_AUTH_STREAM(port))
+
 /* Per-pipe DDI Function Control */
 #define _TRANS_DDI_FUNC_CTL_A		0x60400
 #define _TRANS_DDI_FUNC_CTL_B		0x61400
@@ -9960,6 +10012,7 @@ enum skl_power_gate {
 #define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC	(1 << 8)
 #define  TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
 #define  TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define  TRANS_DDI_HDCP_SELECT		REG_BIT(5)
 #define  TRANS_DDI_BFI_ENABLE		(1 << 4)
 #define  TRANS_DDI_HIGH_TMDS_CHAR_RATE	(1 << 4)
 #define  TRANS_DDI_HDMI_SCRAMBLING	(1 << 0)
@@ -10851,8 +10904,10 @@ enum skl_power_gate {
 #define  CNL_DRAM_RANK_3			(0x2 << 9)
 #define  CNL_DRAM_RANK_4			(0x3 << 9)
 
-/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using I915_WRITE. */
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
 #define D_COMP_HSW			_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
 #define D_COMP_BDW			_MMIO(0x138144)
 #define  D_COMP_RCOMP_IN_PROGRESS	(1 << 9)
@@ -12053,6 +12108,12 @@ enum skl_power_gate {
 #define __GEN11_VCS2_MOCS0	0x10000
 #define GEN11_MFX2_MOCS(i)	_MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
 
+#define GEN9_SCRATCH_LNCF1		_MMIO(0xb008)
+#define   GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
+
+#define GEN9_SCRATCH1			_MMIO(0xb11c)
+#define   EVICTION_PERF_FIX_ENABLE	REG_BIT(8)
+
 #define GEN10_SCRATCH_LNCF2		_MMIO(0xb0a0)
 #define   PMFLUSHDONE_LNICRSDROP	(1 << 20)
 #define   PMFLUSH_GAPL3UNBLOCK		(1 << 21)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5385b081a37630a97fe6be25445b1f64528045d5..22e39d938f17fc20d7e01463265c0cbfa2dac5ec 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -33,6 +33,7 @@
 #include "gem/i915_gem_context.h"
 #include "gt/intel_breadcrumbs.h"
 #include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_ring.h"
 #include "gt/intel_rps.h"
 
@@ -275,7 +276,7 @@ static void remove_from_engine(struct i915_request *rq)
 
 bool i915_request_retire(struct i915_request *rq)
 {
-	if (!i915_request_completed(rq))
+	if (!__i915_request_is_complete(rq))
 		return false;
 
 	RQ_TRACE(rq, "\n");
@@ -306,10 +307,8 @@ bool i915_request_retire(struct i915_request *rq)
 		spin_unlock_irq(&rq->lock);
 	}
 
-	if (i915_request_has_waitboost(rq)) {
-		GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+	if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
 		atomic_dec(&rq->engine->gt->rps.num_waiters);
-	}
 
 	/*
 	 * We only loosely track inflight requests across preemption,
@@ -321,7 +320,8 @@ bool i915_request_retire(struct i915_request *rq)
 	 * after removing the breadcrumb and signaling it, so that we do not
 	 * inadvertently attach the breadcrumb to a completed request.
 	 */
-	remove_from_engine(rq);
+	if (!list_empty(&rq->sched.link))
+		remove_from_engine(rq);
 	GEM_BUG_ON(!llist_empty(&rq->execute_cb));
 
 	__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
@@ -342,8 +342,7 @@ void i915_request_retire_upto(struct i915_request *rq)
 	struct i915_request *tmp;
 
 	RQ_TRACE(rq, "\n");
-
-	GEM_BUG_ON(!i915_request_completed(rq));
+	GEM_BUG_ON(!__i915_request_is_complete(rq));
 
 	do {
 		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
@@ -488,6 +487,8 @@ void __i915_request_skip(struct i915_request *rq)
 	if (rq->infix == rq->postfix)
 		return;
 
+	RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
 	/*
 	 * As this request likely depends on state from the lost
 	 * context, clear out all the user operations leaving the
@@ -513,6 +514,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
 	} while (!try_cmpxchg(&rq->fence.error, &old, error));
 }
 
+void i915_request_mark_eio(struct i915_request *rq)
+{
+	if (__i915_request_is_complete(rq))
+		return;
+
+	GEM_BUG_ON(i915_request_signaled(rq));
+
+	i915_request_set_error_once(rq, -EIO);
+	i915_request_mark_complete(rq);
+}
+
 bool __i915_request_submit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
@@ -539,12 +551,10 @@ bool __i915_request_submit(struct i915_request *request)
 	 * dropped upon retiring. (Otherwise if resubmit a *retired*
 	 * request, this would be a horrible use-after-free.)
 	 */
-	if (i915_request_completed(request))
-		goto xfer;
-
-	if (unlikely(intel_context_is_closed(request->context) &&
-		     !intel_engine_has_heartbeat(engine)))
-		intel_context_set_banned(request->context);
+	if (__i915_request_is_complete(request)) {
+		list_del_init(&request->sched.link);
+		goto active;
+	}
 
 	if (unlikely(intel_context_is_banned(request->context)))
 		i915_request_set_error_once(request, -EIO);
@@ -579,11 +589,11 @@ bool __i915_request_submit(struct i915_request *request)
 	engine->serial++;
 	result = true;
 
-xfer:
-	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
-		list_move_tail(&request->sched.link, &engine->active.requests);
-		clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
-	}
+	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+	list_move_tail(&request->sched.link, &engine->active.requests);
+active:
+	clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 
 	/*
 	 * XXX Rollback bonded-execution on __i915_request_unsubmit()?
@@ -643,7 +653,7 @@ void __i915_request_unsubmit(struct i915_request *request)
 		i915_request_cancel_breadcrumb(request);
 
 	/* We've already spun, don't charge on resubmitting. */
-	if (request->sched.semaphores && i915_request_started(request))
+	if (request->sched.semaphores && __i915_request_has_started(request))
 		request->sched.semaphores = 0;
 
 	/*
@@ -855,7 +865,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	RCU_INIT_POINTER(rq->timeline, tl);
 	RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
 	rq->hwsp_seqno = tl->hwsp_seqno;
-	GEM_BUG_ON(i915_request_completed(rq));
+	GEM_BUG_ON(__i915_request_is_complete(rq));
 
 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
@@ -961,15 +971,22 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 	if (i915_request_started(signal))
 		return 0;
 
+	/*
+	 * The caller holds a reference on @signal, but we do not serialise
+	 * against it being retired and removed from the lists.
+	 *
+	 * We do not hold a reference to the request before @signal, and
+	 * so must be very careful to ensure that it is not _recycled_ as
+	 * we follow the link backwards.
+	 */
 	fence = NULL;
 	rcu_read_lock();
-	spin_lock_irq(&signal->lock);
 	do {
 		struct list_head *pos = READ_ONCE(signal->link.prev);
 		struct i915_request *prev;
 
 		/* Confirm signal has not been retired, the link is valid */
-		if (unlikely(i915_request_started(signal)))
+		if (unlikely(__i915_request_has_started(signal)))
 			break;
 
 		/* Is signal the earliest request on its timeline? */
@@ -994,7 +1011,6 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 
 		fence = &prev->fence;
 	} while (0);
-	spin_unlock_irq(&signal->lock);
 	rcu_read_unlock();
 	if (!fence)
 		return 0;
@@ -1511,7 +1527,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
 	 */
 	prev = to_request(__i915_active_fence_set(&timeline->last_request,
 						  &rq->fence));
-	if (prev && !i915_request_completed(prev)) {
+	if (prev && !__i915_request_is_complete(prev)) {
 		/*
 		 * The requests are supposed to be kept in order. However,
 		 * we need to be wary in case the timeline->last_request
@@ -1582,6 +1598,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 	return __i915_request_add_to_timeline(rq);
 }
 
+void __i915_request_queue_bh(struct i915_request *rq)
+{
+	i915_sw_fence_commit(&rq->semaphore);
+	i915_sw_fence_commit(&rq->submit);
+}
+
 void __i915_request_queue(struct i915_request *rq,
 			  const struct i915_sched_attr *attr)
 {
@@ -1598,8 +1620,10 @@ void __i915_request_queue(struct i915_request *rq,
 	 */
 	if (attr && rq->engine->schedule)
 		rq->engine->schedule(rq, attr);
-	i915_sw_fence_commit(&rq->semaphore);
-	i915_sw_fence_commit(&rq->submit);
+
+	local_bh_disable();
+	__i915_request_queue_bh(rq);
+	local_bh_enable(); /* kick tasklets */
 }
 
 void i915_request_add(struct i915_request *rq)
@@ -1823,7 +1847,7 @@ long i915_request_wait(struct i915_request *rq,
 	 * for unhappy HW.
 	 */
 	if (i915_request_is_ready(rq))
-		intel_engine_flush_submission(rq->engine);
+		__intel_engine_flush_submission(rq->engine, false);
 
 	for (;;) {
 		set_current_state(state);
@@ -1855,6 +1879,106 @@ long i915_request_wait(struct i915_request *rq,
 	return timeout;
 }
 
+static int print_sched_attr(const struct i915_sched_attr *attr,
+			    char *buf, int x, int len)
+{
+	if (attr->priority == I915_PRIORITY_INVALID)
+		return x;
+
+	x += snprintf(buf + x, len - x,
+		      " prio=%d", attr->priority);
+
+	return x;
+}
+
+static char queue_status(const struct i915_request *rq)
+{
+	if (i915_request_is_active(rq))
+		return 'E';
+
+	if (i915_request_is_ready(rq))
+		return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+
+	return 'U';
+}
+
+static const char *run_status(const struct i915_request *rq)
+{
+	if (__i915_request_is_complete(rq))
+		return "!";
+
+	if (__i915_request_has_started(rq))
+		return "*";
+
+	if (!i915_sw_fence_signaled(&rq->semaphore))
+		return "&";
+
+	return "";
+}
+
+static const char *fence_status(const struct i915_request *rq)
+{
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+		return "+";
+
+	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+		return "-";
+
+	return "";
+}
+
+void i915_request_show(struct drm_printer *m,
+		       const struct i915_request *rq,
+		       const char *prefix,
+		       int indent)
+{
+	const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+	char buf[80] = "";
+	int x = 0;
+
+	/*
+	 * The prefix is used to show the queue status, for which we use
+	 * the following flags:
+	 *
+	 *  U [Unready]
+	 *    - initial status upon being submitted by the user
+	 *
+	 *    - the request is not ready for execution as it is waiting
+	 *      for external fences
+	 *
+	 *  R [Ready]
+	 *    - all fences the request was waiting on have been signaled,
+	 *      and the request is now ready for execution and will be
+	 *      in a backend queue
+	 *
+	 *    - a ready request may still need to wait on semaphores
+	 *      [internal fences]
+	 *
+	 *  V [Ready/virtual]
+	 *    - same as ready, but queued over multiple backends
+	 *
+	 *  E [Executing]
+	 *    - the request has been transferred from the backend queue and
+	 *      submitted for execution on HW
+	 *
+	 *    - a completed request may still be regarded as executing, its
+	 *      status may not be updated until it is retired and removed
+	 *      from the lists
+	 */
+
+	x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+
+	drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
+		   prefix, indent, "                ",
+		   queue_status(rq),
+		   rq->fence.context, rq->fence.seqno,
+		   run_status(rq),
+		   fence_status(rq),
+		   buf,
+		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+		   name);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_request.c"
 #include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 92adfee30c7c022def7ad74e29865a16221a9764..1bfe214a47e986071a510e3f8cf18f2e73828176 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -43,6 +43,7 @@
 
 struct drm_file;
 struct drm_i915_gem_object;
+struct drm_printer;
 struct i915_request;
 
 struct i915_capture_list {
@@ -308,12 +309,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
 struct i915_request * __must_check
 i915_request_create(struct intel_context *ce);
 
-void i915_request_set_error_once(struct i915_request *rq, int error);
 void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
 
 struct i915_request *__i915_request_commit(struct i915_request *request);
 void __i915_request_queue(struct i915_request *rq,
 			  const struct i915_sched_attr *attr);
+void __i915_request_queue_bh(struct i915_request *rq);
 
 bool i915_request_retire(struct i915_request *rq);
 void i915_request_retire_upto(struct i915_request *rq);
@@ -371,6 +374,11 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
 
+void i915_request_show(struct drm_printer *m,
+		       const struct i915_request *rq,
+		       const char *prefix,
+		       int indent);
+
 static inline bool i915_request_signaled(const struct i915_request *rq)
 {
 	/* The request may live longer than its HWSP, so check flags first! */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c650d26496826d1f8b578075bd46cfe..7144239f08df85e3a6ab312429bda8b54ec1ea05 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
 	if (!dep)
 		return -ENOMEM;
 
-	local_bh_disable();
-
 	if (!__i915_sched_node_add_dependency(node, signal, dep,
 					      flags | I915_DEPENDENCY_ALLOC))
 		i915_dependency_free(dep);
 
-	local_bh_enable(); /* kick submission tasklet */
-
 	return 0;
 }
 
@@ -504,6 +500,34 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 	spin_unlock_irq(&schedule_lock);
 }
 
+void i915_request_show_with_schedule(struct drm_printer *m,
+				     const struct i915_request *rq,
+				     const char *prefix,
+				     int indent)
+{
+	struct i915_dependency *dep;
+
+	i915_request_show(m, rq, prefix, indent);
+	if (i915_request_completed(rq))
+		return;
+
+	rcu_read_lock();
+	for_each_signaler(dep, rq) {
+		const struct i915_request *signaler =
+			node_to_request(dep->signaler);
+
+		/* Dependencies along the same timeline are expected. */
+		if (signaler->timeline == rq->timeline)
+			continue;
+
+		if (__i915_request_is_complete(signaler))
+			continue;
+
+		i915_request_show(m, signaler, prefix, indent + 2);
+	}
+	rcu_read_unlock();
+}
+
 static void i915_global_scheduler_shrink(void)
 {
 	kmem_cache_shrink(global.slab_dependencies);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc5690d361800454029bbea703b393790..4501e5ac263732b231eea827c10fc097dd32b454 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -13,6 +13,8 @@
 
 #include "i915_scheduler_types.h"
 
+struct drm_printer;
+
 #define priolist_for_each_request(it, plist, idx) \
 	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
 		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
@@ -54,4 +56,9 @@ static inline void i915_priolist_free(struct i915_priolist *p)
 		__i915_priolist_free(p);
 }
 
+void i915_request_show_with_schedule(struct drm_printer *m,
+				     const struct i915_request *rq,
+				     const char *prefix,
+				     int indent);
+
 #endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index f72e6c397b088889ae8072a35c5236a0d58427e1..343ed44d5ed4144973f3a1196d12e4791a39e446 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -81,4 +81,14 @@ struct i915_dependency {
 #define I915_DEPENDENCY_WEAK		BIT(2)
 };
 
+#define for_each_waiter(p__, rq__) \
+	list_for_each_entry_lockless(p__, \
+				     &(rq__)->sched.waiters_list, \
+				     wait_link)
+
+#define for_each_signaler(p__, rq__) \
+	list_for_each_entry_rcu(p__, \
+				&(rq__)->sched.signalers_list, \
+				signal_link)
+
 #endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index db2111fc809e16fda37537448005db34c14d686f..63212df33c9ecee552d3ce501ef5dbee2bb49790 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,6 +24,7 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include "display/intel_de.h"
 #include "display/intel_fbc.h"
 #include "display/intel_gmbus.h"
 #include "display/intel_vga.h"
@@ -39,21 +40,21 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
 	/* Scratch space */
 	if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
 		for (i = 0; i < 7; i++) {
-			dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
-			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+			dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+			dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
 		}
 		for (i = 0; i < 3; i++)
-			dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+			dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
 	} else if (IS_GEN(dev_priv, 2)) {
 		for (i = 0; i < 7; i++)
-			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+			dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
 	} else if (HAS_GMCH(dev_priv)) {
 		for (i = 0; i < 16; i++) {
-			dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
-			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+			dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+			dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
 		}
 		for (i = 0; i < 3; i++)
-			dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+			dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
 	}
 }
 
@@ -64,21 +65,21 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
 	/* Scratch space */
 	if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
 		for (i = 0; i < 7; i++) {
-			I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
-			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+			intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+			intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
 		}
 		for (i = 0; i < 3; i++)
-			I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+			intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
 	} else if (IS_GEN(dev_priv, 2)) {
 		for (i = 0; i < 7; i++)
-			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+			intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
 	} else if (HAS_GMCH(dev_priv)) {
 		for (i = 0; i < 16; i++) {
-			I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
-			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+			intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+			intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
 		}
 		for (i = 0; i < 3; i++)
-			I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+			intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
 	}
 }
 
@@ -88,7 +89,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
 
 	/* Display arbitration control */
 	if (INTEL_GEN(dev_priv) <= 4)
-		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+		dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
 
 	if (IS_GEN(dev_priv, 4))
 		pci_read_config_word(pdev, GCDGMBUS,
@@ -109,7 +110,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
 
 	/* Display arbitration */
 	if (INTEL_GEN(dev_priv) <= 4)
-		I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+		intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
 
 	/* only restore FBC info on the platform that supports FBC*/
 	intel_fbc_global_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 038d4c6884c5b708dc102d1b09fa663a17cee701..2744558f3050757f6c90b411f26dbb729999ec4c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,10 +18,15 @@
 #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
-#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
-
 static DEFINE_SPINLOCK(i915_sw_fence_lock);
 
+#define WQ_FLAG_BITS \
+	BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
+
+/* after WQ_FLAG_* for safety */
+#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1)
+#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2)
+
 enum {
 	DEBUG_FENCE_IDLE = 0,
 	DEBUG_FENCE_NOTIFY,
@@ -154,10 +159,10 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
 	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
 	if (continuation) {
 		list_for_each_entry_safe(pos, next, &x->head, entry) {
-			if (pos->func == autoremove_wake_function)
-				pos->func(pos, TASK_NORMAL, 0, continuation);
-			else
+			if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
 				list_move_tail(&pos->entry, continuation);
+			else
+				pos->func(pos, TASK_NORMAL, 0, continuation);
 		}
 	} else {
 		LIST_HEAD(extra);
@@ -166,9 +171,9 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
 			list_for_each_entry_safe(pos, next, &x->head, entry) {
 				int wake_flags;
 
-				wake_flags = fence->error;
-				if (pos->func == autoremove_wake_function)
-					wake_flags = 0;
+				wake_flags = 0;
+				if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
+					wake_flags = fence->error;
 
 				pos->func(pos, TASK_NORMAL, wake_flags, &extra);
 			}
@@ -332,8 +337,8 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 					  struct i915_sw_fence *signaler,
 					  wait_queue_entry_t *wq, gfp_t gfp)
 {
+	unsigned int pending;
 	unsigned long flags;
-	int pending;
 
 	debug_fence_assert(fence);
 	might_sleep_if(gfpflags_allow_blocking(gfp));
@@ -349,7 +354,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 	if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
 		return -EINVAL;
 
-	pending = 0;
+	pending = I915_SW_FENCE_FLAG_FENCE;
 	if (!wq) {
 		wq = kmalloc(sizeof(*wq), gfp);
 		if (!wq) {
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 4c305d838016ef0483c4182ada51fff3cf6c7d20..f9e780dee9decd1cd1b178ac7372ce28585312dc 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -87,7 +87,7 @@ bool i915_error_injected(void)
 
 void cancel_timer(struct timer_list *t)
 {
-	if (!READ_ONCE(t->expires))
+	if (!timer_active(t))
 		return;
 
 	del_timer(t);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 54773371e6bd4db859b352ea9593c486001268ec..abd4dcd9f79c78e995df85900f268013d84704c5 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
 void cancel_timer(struct timer_list *t);
 void set_timer_ms(struct timer_list *t, unsigned long timeout);
 
+static inline bool timer_active(const struct timer_list *t)
+{
+	return READ_ONCE(t->expires);
+}
+
 static inline bool timer_expired(const struct timer_list *t)
 {
-	return READ_ONCE(t->expires) && !timer_pending(t);
+	return timer_active(t) && !timer_pending(t);
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 5b3a3c6534540620eef52098ac22102ba5986605..a64adc8c883b01315b41a64a73b8079f533c7191 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -363,6 +363,21 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 
 void i915_vma_parked(struct intel_gt *gt);
 
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+	return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+	set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+	clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
 #define for_each_until(cond) if (cond) break; else
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 9e9082dc8f4b9d5c9ecab1e8235577781708035e..f5cb848b7a7ebc1505f4a69fa9531836e7b2fd1a 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -249,6 +249,9 @@ struct i915_vma {
 #define I915_VMA_USERFAULT	((int)BIT(I915_VMA_USERFAULT_BIT))
 #define I915_VMA_GGTT_WRITE	((int)BIT(I915_VMA_GGTT_WRITE_BIT))
 
+#define I915_VMA_SCANOUT_BIT	18
+#define I915_VMA_SCANOUT	((int)BIT(I915_VMA_SCANOUT_BIT))
+
 	struct i915_active active;
 
 #define I915_VMA_PAGES_BIAS 24
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index e67cec8fa2aa4e06b47b676c91f7ee70aa3f097d..f2d5ae59081e48e20c4c3b743e3c16d8396a18a4 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
 	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
 	drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
 
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name))
 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
 
@@ -117,150 +117,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
 				     struct drm_printer *p)
 {
 	drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
-	drm_printf(p, "CS timestamp frequency: %u Hz\n",
-		   info->cs_timestamp_frequency_hz);
-}
-
-static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
-{
-	u32 ts_override = intel_uncore_read(&dev_priv->uncore,
-					    GEN9_TIMESTAMP_OVERRIDE);
-	u32 base_freq, frac_freq;
-
-	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
-		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
-	base_freq *= 1000000;
-
-	frac_freq = ((ts_override &
-		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
-		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
-	frac_freq = 1000000 / (frac_freq + 1);
-
-	return base_freq + frac_freq;
-}
-
-static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
-					u32 rpm_config_reg)
-{
-	u32 f19_2_mhz = 19200000;
-	u32 f24_mhz = 24000000;
-	u32 crystal_clock = (rpm_config_reg &
-			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
-			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
-	switch (crystal_clock) {
-	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
-		return f19_2_mhz;
-	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
-		return f24_mhz;
-	default:
-		MISSING_CASE(crystal_clock);
-		return 0;
-	}
-}
-
-static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
-					u32 rpm_config_reg)
-{
-	u32 f19_2_mhz = 19200000;
-	u32 f24_mhz = 24000000;
-	u32 f25_mhz = 25000000;
-	u32 f38_4_mhz = 38400000;
-	u32 crystal_clock = (rpm_config_reg &
-			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
-			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
-	switch (crystal_clock) {
-	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
-		return f24_mhz;
-	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
-		return f19_2_mhz;
-	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
-		return f38_4_mhz;
-	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
-		return f25_mhz;
-	default:
-		MISSING_CASE(crystal_clock);
-		return 0;
-	}
-}
-
-static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	u32 f12_5_mhz = 12500000;
-	u32 f19_2_mhz = 19200000;
-	u32 f24_mhz = 24000000;
-
-	if (INTEL_GEN(dev_priv) <= 4) {
-		/* PRMs say:
-		 *
-		 *     "The value in this register increments once every 16
-		 *      hclks." (through the “Clocking Configuration”
-		 *      (“CLKCFG”) MCHBAR register)
-		 */
-		return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
-	} else if (INTEL_GEN(dev_priv) <= 8) {
-		/* PRMs say:
-		 *
-		 *     "The PCU TSC counts 10ns increments; this timestamp
-		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
-		 *      rolling over every 1.5 hours).
-		 */
-		return f12_5_mhz;
-	} else if (INTEL_GEN(dev_priv) <= 9) {
-		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
-		u32 freq = 0;
-
-		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
-			freq = read_reference_ts_freq(dev_priv);
-		} else {
-			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
-
-			/* Now figure out how the command stream's timestamp
-			 * register increments from this frequency (it might
-			 * increment only every few clock cycle).
-			 */
-			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
-				      CTC_SHIFT_PARAMETER_SHIFT);
-		}
-
-		return freq;
-	} else if (INTEL_GEN(dev_priv) <= 12) {
-		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
-		u32 freq = 0;
-
-		/* First figure out the reference frequency. There are 2 ways
-		 * we can compute the frequency, either through the
-		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
-		 * tells us which one we should use.
-		 */
-		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
-			freq = read_reference_ts_freq(dev_priv);
-		} else {
-			u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
-
-			if (INTEL_GEN(dev_priv) <= 10)
-				freq = gen10_get_crystal_clock_freq(dev_priv,
-								rpm_config_reg);
-			else
-				freq = gen11_get_crystal_clock_freq(dev_priv,
-								rpm_config_reg);
-
-			/* Now figure out how the command stream's timestamp
-			 * register increments from this frequency (it might
-			 * increment only every few clock cycle).
-			 */
-			freq >>= 3 - ((rpm_config_reg &
-				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
-				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
-		}
-
-		return freq;
-	}
-
-	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
-	return 0;
 }
 
 #undef INTEL_VGA_DEVICE
@@ -505,19 +361,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 	runtime->rawclk_freq = intel_read_rawclk(dev_priv);
 	drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
 
-	/* Initialize command stream timestamp frequency */
-	runtime->cs_timestamp_frequency_hz =
-		read_timestamp_frequency(dev_priv);
-	if (runtime->cs_timestamp_frequency_hz) {
-		runtime->cs_timestamp_period_ns =
-			i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
-		drm_dbg(&dev_priv->drm,
-			"CS timestamp wraparound in %lldms\n",
-			div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
-					    S32_MAX),
-				USEC_PER_SEC));
-	}
-
 	if (!HAS_DISPLAY(dev_priv)) {
 		dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
 						   DRIVER_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d92fa041c70009598759092df4099138b29e61f3..cf2d528c6e9b0fcaba5caf5cd229f9aa65f3f9ad 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -123,7 +123,6 @@ enum intel_ppgtt_type {
 	func(has_llc); \
 	func(has_logical_ring_contexts); \
 	func(has_logical_ring_elsq); \
-	func(has_logical_ring_preemption); \
 	func(has_master_unit_irq); \
 	func(has_pooled_eu); \
 	func(has_rc6); \
@@ -224,9 +223,6 @@ struct intel_runtime_info {
 	u8 num_scalers[I915_MAX_PIPES];
 
 	u32 rawclk_freq;
-
-	u32 cs_timestamp_frequency_hz;
-	u32 cs_timestamp_period_ns;
 };
 
 struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 4754296a250e6c9cb08a3dcbf336d0c23b5c8ee3..73d256fc6830677c0706e08aa8555e192fff07f6 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,6 +5,7 @@
 
 #include "i915_drv.h"
 #include "intel_dram.h"
+#include "intel_sideband.h"
 
 struct dram_dimm_info {
 	u16 size;
@@ -201,22 +202,12 @@ skl_dram_get_channels_info(struct drm_i915_private *i915)
 		return -EINVAL;
 	}
 
-	/*
-	 * If any of the channel is single rank channel, worst case output
-	 * will be same as if single rank memory, so consider single rank
-	 * memory.
-	 */
-	if (ch0.ranks == 1 || ch1.ranks == 1)
-		dram_info->ranks = 1;
-	else
-		dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
-	if (dram_info->ranks == 0) {
+	if (ch0.ranks == 0 && ch1.ranks == 0) {
 		drm_info(&i915->drm, "couldn't get memory rank information\n");
 		return -EINVAL;
 	}
 
-	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+	dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
 
 	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
 
@@ -269,16 +260,12 @@ skl_get_dram_info(struct drm_i915_private *i915)
 	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
 				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
 
-	dram_info->bandwidth_kbps = dram_info->num_channels *
-		mem_freq_khz * 8;
-
-	if (dram_info->bandwidth_kbps == 0) {
+	if (dram_info->num_channels * mem_freq_khz == 0) {
 		drm_info(&i915->drm,
 			 "Couldn't get system memory bandwidth\n");
 		return -EINVAL;
 	}
 
-	dram_info->valid = true;
 	return 0;
 }
 
@@ -365,7 +352,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
 	struct dram_info *dram_info = &i915->dram_info;
 	u32 dram_channels;
 	u32 mem_freq_khz, val;
-	u8 num_active_channels;
+	u8 num_active_channels, valid_ranks = 0;
 	int i;
 
 	val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
@@ -375,10 +362,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
 	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
 	num_active_channels = hweight32(dram_channels);
 
-	/* Each active bit represents 4-byte channel */
-	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
-	if (dram_info->bandwidth_kbps == 0) {
+	if (mem_freq_khz * num_active_channels == 0) {
 		drm_info(&i915->drm,
 			 "Couldn't get system memory bandwidth\n");
 		return -EINVAL;
@@ -410,57 +394,125 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
 			    dimm.size, dimm.width, dimm.ranks,
 			    intel_dram_type_str(type));
 
-		/*
-		 * If any of the channel is single rank channel,
-		 * worst case output will be same as if single rank
-		 * memory, so consider single rank memory.
-		 */
-		if (dram_info->ranks == 0)
-			dram_info->ranks = dimm.ranks;
-		else if (dimm.ranks == 1)
-			dram_info->ranks = 1;
+		if (valid_ranks == 0)
+			valid_ranks = dimm.ranks;
 
 		if (type != INTEL_DRAM_UNKNOWN)
 			dram_info->type = type;
 	}
 
-	if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+	if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
 		drm_info(&i915->drm, "couldn't get memory information\n");
 		return -EINVAL;
 	}
 
-	dram_info->valid = true;
+	return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+{
+	struct dram_info *dram_info = &dev_priv->dram_info;
+	u32 val = 0;
+	int ret;
+
+	ret = sandybridge_pcode_read(dev_priv,
+				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+				     &val, NULL);
+	if (ret)
+		return ret;
+
+	if (IS_GEN(dev_priv, 12)) {
+		switch (val & 0xf) {
+		case 0:
+			dram_info->type = INTEL_DRAM_DDR4;
+			break;
+		case 3:
+			dram_info->type = INTEL_DRAM_LPDDR4;
+			break;
+		case 4:
+			dram_info->type = INTEL_DRAM_DDR3;
+			break;
+		case 5:
+			dram_info->type = INTEL_DRAM_LPDDR3;
+			break;
+		default:
+			MISSING_CASE(val & 0xf);
+			return -1;
+		}
+	} else {
+		switch (val & 0xf) {
+		case 0:
+			dram_info->type = INTEL_DRAM_DDR4;
+			break;
+		case 1:
+			dram_info->type = INTEL_DRAM_DDR3;
+			break;
+		case 2:
+			dram_info->type = INTEL_DRAM_LPDDR3;
+			break;
+		case 3:
+			dram_info->type = INTEL_DRAM_LPDDR4;
+			break;
+		default:
+			MISSING_CASE(val & 0xf);
+			return -1;
+		}
+	}
+
+	dram_info->num_channels = (val & 0xf0) >> 4;
+	dram_info->num_qgv_points = (val & 0xf00) >> 8;
 
 	return 0;
 }
 
+static int gen11_get_dram_info(struct drm_i915_private *i915)
+{
+	int ret = skl_get_dram_info(i915);
+
+	if (ret)
+		return ret;
+
+	return icl_pcode_read_mem_global_info(i915);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915)
+{
+	/* Always needed for GEN12+ */
+	i915->dram_info.wm_lv_0_adjust_needed = true;
+
+	return icl_pcode_read_mem_global_info(i915);
+}
+
 void intel_dram_detect(struct drm_i915_private *i915)
 {
 	struct dram_info *dram_info = &i915->dram_info;
 	int ret;
 
 	/*
-	 * Assume 16Gb DIMMs are present until proven otherwise.
-	 * This is only used for the level 0 watermark latency
-	 * w/a which does not apply to bxt/glk.
+	 * Assume level 0 watermark latency adjustment is needed until proven
+	 * otherwise, this w/a is not needed by bxt/glk.
 	 */
-	dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+	dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
 
 	if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
 		return;
 
-	if (IS_GEN9_LP(i915))
+	if (INTEL_GEN(i915) >= 12)
+		ret = gen12_get_dram_info(i915);
+	else if (INTEL_GEN(i915) >= 11)
+		ret = gen11_get_dram_info(i915);
+	else if (IS_GEN9_LP(i915))
 		ret = bxt_get_dram_info(i915);
 	else
 		ret = skl_get_dram_info(i915);
 	if (ret)
 		return;
 
-	drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
-		    dram_info->bandwidth_kbps, dram_info->num_channels);
+	drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
 
-	drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
-		    dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+	drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
+		    yesno(dram_info->wm_lv_0_adjust_needed));
 }
 
 static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b326993a10266750cf6b3752d5658a9eb715ebf8..1bfcdd89b24163b6367160719e246ccbee42dfe0 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -10,7 +10,7 @@
 #define REGION_MAP(type, inst) \
 	BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
 
-const u32 intel_region_map[] = {
+static const u32 intel_region_map[] = {
 	[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
 	[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
 	[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 232490d89a834ae17ffee88992af66da8b6ddf68..6ffc0673f0057ce798c82f6e76cdc4a6350bf069 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -51,21 +51,16 @@ enum intel_region_id {
 	for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
 		for_each_if((mr) = (i915)->mm.regions[id])
 
-/**
- * Memory regions encoded as type | instance
- */
-extern const u32 intel_region_map[];
-
 struct intel_memory_region_ops {
 	unsigned int flags;
 
 	int (*init)(struct intel_memory_region *mem);
 	void (*release)(struct intel_memory_region *mem);
 
-	struct drm_i915_gem_object *
-	(*create_object)(struct intel_memory_region *mem,
-			 resource_size_t size,
-			 unsigned int flags);
+	int (*init_object)(struct intel_memory_region *mem,
+			   struct drm_i915_gem_object *obj,
+			   resource_size_t size,
+			   unsigned int flags);
 };
 
 struct intel_memory_region {
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index f31c0dabd0ccd64e47e8e30017a949e9213157ea..ecaf314d60b6b286be01526268c88f21e4641ee6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -143,8 +143,9 @@ static bool intel_is_virt_pch(unsigned short id,
 		 sdevice == PCI_SUBDEVICE_ID_QEMU));
 }
 
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+static void
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
+		      unsigned short *pch_id, enum intel_pch *pch_type)
 {
 	unsigned short id = 0;
 
@@ -181,12 +182,21 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
 	else
 		drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
 
-	return id;
+	*pch_type = intel_pch_type(dev_priv, id);
+
+	/* Sanity check virtual PCH id */
+	if (drm_WARN_ON(&dev_priv->drm,
+			id && *pch_type == PCH_NONE))
+		id = 0;
+
+	*pch_id = id;
 }
 
 void intel_detect_pch(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pch = NULL;
+	unsigned short id;
+	enum intel_pch pch_type;
 
 	/* DG1 has south engine display on the same PCI device */
 	if (IS_DG1(dev_priv)) {
@@ -206,9 +216,6 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
 	 * of only checking the first one.
 	 */
 	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
-		unsigned short id;
-		enum intel_pch pch_type;
-
 		if (pch->vendor != PCI_VENDOR_ID_INTEL)
 			continue;
 
@@ -221,14 +228,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
 			break;
 		} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
 					     pch->subsystem_device)) {
-			id = intel_virt_detect_pch(dev_priv);
-			pch_type = intel_pch_type(dev_priv, id);
-
-			/* Sanity check virtual PCH id */
-			if (drm_WARN_ON(&dev_priv->drm,
-					id && pch_type == PCH_NONE))
-				id = 0;
-
+			intel_virt_detect_pch(dev_priv, &id, &pch_type);
 			dev_priv->pch_type = pch_type;
 			dev_priv->pch_id = id;
 			break;
@@ -244,10 +244,15 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
 			    "Display disabled, reverting to NOP PCH\n");
 		dev_priv->pch_type = PCH_NOP;
 		dev_priv->pch_id = 0;
+	} else if (!pch) {
+		if (run_as_guest() && HAS_DISPLAY(dev_priv)) {
+			intel_virt_detect_pch(dev_priv, &id, &pch_type);
+			dev_priv->pch_type = pch_type;
+			dev_priv->pch_id = id;
+		} else {
+			drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+		}
 	}
 
-	if (!pch)
-		drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
-
 	pci_dev_put(pch);
 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a20b5051f18c11b58e58570899f46a4c83b99a05..0c3e63f27c29d8f547bea447e18c1a9320e5b097 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -82,24 +82,24 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 		 * Must match Sampler, Pixel Back End, and Media. See
 		 * WaCompressedResourceSamplerPbeMediaNewHashMode.
 		 */
-		I915_WRITE(CHICKEN_PAR1_1,
-			   I915_READ(CHICKEN_PAR1_1) |
+		intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+			   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
 			   SKL_DE_COMPRESSED_HASH_MODE);
 	}
 
 	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
-	I915_WRITE(CHICKEN_PAR1_1,
-		   I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+	intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
 
 	/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(GEN8_CHICKEN_DCPR_1,
-		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+	intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+		   intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
 
 	/*
 	 * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
 	 * Display WA #0859: skl,bxt,kbl,glk,cfl
 	 */
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		   DISP_FBC_MEMORY_WAKE);
 }
 
@@ -108,21 +108,21 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
 	gen9_init_clock_gating(dev_priv);
 
 	/* WaDisableSDEUnitClockGating:bxt */
-	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
 	/*
 	 * FIXME:
 	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
 	 */
-	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
 		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
 
 	/*
 	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
 	 * to stay fully on.
 	 */
-	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+	intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
 
 	/*
@@ -131,20 +131,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * is off and a MMIO access is attempted by any privilege
 	 * application, using batch buffers or any other means.
 	 */
-	I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+	intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
 
 	/*
 	 * WaFbcTurnOffFbcWatermark:bxt
 	 * Display WA #0562: bxt
 	 */
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		   DISP_FBC_WM_DIS);
 
 	/*
 	 * WaFbcHighMemBwCorruptionAvoidance:bxt
 	 * Display WA #0883: bxt
 	 */
-	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
 		   ILK_DPFC_DISABLE_DUMMY0);
 }
 
@@ -157,7 +157,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * Backlight PWM may stop in the asserted state, causing backlight
 	 * to stay fully on.
 	 */
-	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+	intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
 }
 
@@ -165,7 +165,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
 {
 	u32 tmp;
 
-	tmp = I915_READ(CLKCFG);
+	tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
 
 	switch (tmp & CLKCFG_FSB_MASK) {
 	case CLKCFG_FSB_533:
@@ -195,7 +195,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
 	}
 
 	/* detect pineview DDR3 setting */
-	tmp = I915_READ(CSHRDDR3CTL);
+	tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
 	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
 }
 
@@ -366,39 +366,39 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
 	u32 val;
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-		POSTING_READ(FW_BLC_SELF_VLV);
+		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
 	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
-		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
-		POSTING_READ(FW_BLC_SELF);
+		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
 	} else if (IS_PINEVIEW(dev_priv)) {
-		val = I915_READ(DSPFW3);
+		val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
 		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
 		if (enable)
 			val |= PINEVIEW_SELF_REFRESH_EN;
 		else
 			val &= ~PINEVIEW_SELF_REFRESH_EN;
-		I915_WRITE(DSPFW3, val);
-		POSTING_READ(DSPFW3);
+		intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+		intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
-		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
-		I915_WRITE(FW_BLC_SELF, val);
-		POSTING_READ(FW_BLC_SELF);
+		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
 	} else if (IS_I915GM(dev_priv)) {
 		/*
 		 * FIXME can't find a bit like this for 915G, and
 		 * and yet it does have the related watermark in
 		 * FW_BLC_SELF. What's going on?
 		 */
-		was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+		was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
-		I915_WRITE(INSTPM, val);
-		POSTING_READ(INSTPM);
+		intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+		intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
 	} else {
 		return false;
 	}
@@ -494,20 +494,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
 
 	switch (pipe) {
 	case PIPE_A:
-		dsparb = I915_READ(DSPARB);
-		dsparb2 = I915_READ(DSPARB2);
+		dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
 		break;
 	case PIPE_B:
-		dsparb = I915_READ(DSPARB);
-		dsparb2 = I915_READ(DSPARB2);
+		dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
 		break;
 	case PIPE_C:
-		dsparb2 = I915_READ(DSPARB2);
-		dsparb3 = I915_READ(DSPARB3);
+		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+		dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
 		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
 		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
 		break;
@@ -525,7 +525,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
 			      enum i9xx_plane_id i9xx_plane)
 {
-	u32 dsparb = I915_READ(DSPARB);
+	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
 	int size;
 
 	size = dsparb & 0x7f;
@@ -541,7 +541,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
 			      enum i9xx_plane_id i9xx_plane)
 {
-	u32 dsparb = I915_READ(DSPARB);
+	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
 	int size;
 
 	size = dsparb & 0x1ff;
@@ -558,7 +558,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
 			      enum i9xx_plane_id i9xx_plane)
 {
-	u32 dsparb = I915_READ(DSPARB);
+	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
 	int size;
 
 	size = dsparb & 0x7f;
@@ -911,38 +911,38 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc)
 		wm = intel_calculate_wm(clock, &pnv_display_wm,
 					pnv_display_wm.fifo_size,
 					cpp, latency->display_sr);
-		reg = I915_READ(DSPFW1);
+		reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
 		reg &= ~DSPFW_SR_MASK;
 		reg |= FW_WM(wm, SR);
-		I915_WRITE(DSPFW1, reg);
+		intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
 		drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
 
 		/* cursor SR */
 		wm = intel_calculate_wm(clock, &pnv_cursor_wm,
 					pnv_display_wm.fifo_size,
 					4, latency->cursor_sr);
-		reg = I915_READ(DSPFW3);
+		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
 		reg &= ~DSPFW_CURSOR_SR_MASK;
 		reg |= FW_WM(wm, CURSOR_SR);
-		I915_WRITE(DSPFW3, reg);
+		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
 
 		/* Display HPLL off SR */
 		wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
 					pnv_display_hplloff_wm.fifo_size,
 					cpp, latency->display_hpll_disable);
-		reg = I915_READ(DSPFW3);
+		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
 		reg &= ~DSPFW_HPLL_SR_MASK;
 		reg |= FW_WM(wm, HPLL_SR);
-		I915_WRITE(DSPFW3, reg);
+		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
 
 		/* cursor HPLL off SR */
 		wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
 					pnv_display_hplloff_wm.fifo_size,
 					4, latency->cursor_hpll_disable);
-		reg = I915_READ(DSPFW3);
+		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
 		reg |= FW_WM(wm, HPLL_CURSOR);
-		I915_WRITE(DSPFW3, reg);
+		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
 		drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
 
 		intel_set_memory_cxsr(dev_priv, true);
@@ -976,25 +976,25 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
 	for_each_pipe(dev_priv, pipe)
 		trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
 
-	I915_WRITE(DSPFW1,
+	intel_uncore_write(&dev_priv->uncore, DSPFW1,
 		   FW_WM(wm->sr.plane, SR) |
 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
-	I915_WRITE(DSPFW2,
+	intel_uncore_write(&dev_priv->uncore, DSPFW2,
 		   (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
 		   FW_WM(wm->sr.fbc, FBC_SR) |
 		   FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
-	I915_WRITE(DSPFW3,
+	intel_uncore_write(&dev_priv->uncore, DSPFW3,
 		   (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
 		   FW_WM(wm->sr.cursor, CURSOR_SR) |
 		   FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
 		   FW_WM(wm->hpll.plane, HPLL_SR));
 
-	POSTING_READ(DSPFW1);
+	intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
 }
 
 #define FW_WM_VLV(value, plane) \
@@ -1008,7 +1008,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
 	for_each_pipe(dev_priv, pipe) {
 		trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
 
-		I915_WRITE(VLV_DDL(pipe),
+		intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
 			   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
 			   (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
 			   (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
@@ -1020,35 +1020,35 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
 	 * high order bits so that there are no out of bounds values
 	 * present in the registers during the reprogramming.
 	 */
-	I915_WRITE(DSPHOWM, 0);
-	I915_WRITE(DSPHOWM1, 0);
-	I915_WRITE(DSPFW4, 0);
-	I915_WRITE(DSPFW5, 0);
-	I915_WRITE(DSPFW6, 0);
+	intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+	intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+	intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+	intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+	intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
 
-	I915_WRITE(DSPFW1,
+	intel_uncore_write(&dev_priv->uncore, DSPFW1,
 		   FW_WM(wm->sr.plane, SR) |
 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
 		   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
-	I915_WRITE(DSPFW2,
+	intel_uncore_write(&dev_priv->uncore, DSPFW2,
 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
-	I915_WRITE(DSPFW3,
+	intel_uncore_write(&dev_priv->uncore, DSPFW3,
 		   FW_WM(wm->sr.cursor, CURSOR_SR));
 
 	if (IS_CHERRYVIEW(dev_priv)) {
-		I915_WRITE(DSPFW7_CHV,
+		intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
-		I915_WRITE(DSPFW8_CHV,
+		intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
-		I915_WRITE(DSPFW9_CHV,
+		intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
-		I915_WRITE(DSPHOWM,
+		intel_uncore_write(&dev_priv->uncore, DSPHOWM,
 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
@@ -1060,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
 	} else {
-		I915_WRITE(DSPFW7,
+		intel_uncore_write(&dev_priv->uncore, DSPFW7,
 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
-		I915_WRITE(DSPHOWM,
+		intel_uncore_write(&dev_priv->uncore, DSPHOWM,
 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
@@ -1073,7 +1073,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
 	}
 
-	POSTING_READ(DSPFW1);
+	intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
 }
 
 #undef FW_WM_VLV
@@ -2310,14 +2310,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 		    srwm);
 
 	/* 965 has limitations... */
-	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+	intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
 		   FW_WM(8, CURSORB) |
 		   FW_WM(8, PLANEB) |
 		   FW_WM(8, PLANEA));
-	I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+	intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
 		   FW_WM(8, PLANEC_OLD));
 	/* update cursor SR watermark */
-	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+	intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
 
 	if (cxsr_enabled)
 		intel_set_memory_cxsr(dev_priv, true);
@@ -2447,10 +2447,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 			srwm = 1;
 
 		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
-			I915_WRITE(FW_BLC_SELF,
+			intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
 		else
-			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+			intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
 	}
 
 	drm_dbg_kms(&dev_priv->drm,
@@ -2464,8 +2464,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
 	fwater_hi = fwater_hi | (1 << 8);
 
-	I915_WRITE(FW_BLC, fwater_lo);
-	I915_WRITE(FW_BLC2, fwater_hi);
+	intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+	intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
 
 	if (enabled)
 		intel_set_memory_cxsr(dev_priv, true);
@@ -2488,13 +2488,13 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
 				       &i845_wm_info,
 				       dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
 				       4, pessimal_latency_ns);
-	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+	fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
 	fwater_lo |= (3<<8) | planea_wm;
 
 	drm_dbg_kms(&dev_priv->drm,
 		    "Setting FIFO watermarks - A: %d\n", planea_wm);
 
-	I915_WRITE(FW_BLC, fwater_lo);
+	intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
 }
 
 /* latency must be in 0.1us units. */
@@ -2930,7 +2930,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		 * any underrun. If not able to get Dimm info assume 16GB dimm
 		 * to avoid any underrun.
 		 */
-		if (dev_priv->dram_info.is_16gb_dimm)
+		if (dev_priv->dram_info.wm_lv_0_adjust_needed)
 			wm[0] += 1;
 
 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3534,17 +3534,17 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
 
 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
 		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
-		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+		intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
 		changed = true;
 	}
 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
 		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
-		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+		intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
 		changed = true;
 	}
 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
 		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
-		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+		intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
 		changed = true;
 	}
 
@@ -3574,56 +3574,56 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 	_ilk_disable_lp_wm(dev_priv, dirty);
 
 	if (dirty & WM_DIRTY_PIPE(PIPE_A))
-		I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
 	if (dirty & WM_DIRTY_PIPE(PIPE_B))
-		I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
 	if (dirty & WM_DIRTY_PIPE(PIPE_C))
-		I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
 
 	if (dirty & WM_DIRTY_DDB) {
 		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-			val = I915_READ(WM_MISC);
+			val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
 			if (results->partitioning == INTEL_DDB_PART_1_2)
 				val &= ~WM_MISC_DATA_PARTITION_5_6;
 			else
 				val |= WM_MISC_DATA_PARTITION_5_6;
-			I915_WRITE(WM_MISC, val);
+			intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
 		} else {
-			val = I915_READ(DISP_ARB_CTL2);
+			val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
 			if (results->partitioning == INTEL_DDB_PART_1_2)
 				val &= ~DISP_DATA_PARTITION_5_6;
 			else
 				val |= DISP_DATA_PARTITION_5_6;
-			I915_WRITE(DISP_ARB_CTL2, val);
+			intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
 		}
 	}
 
 	if (dirty & WM_DIRTY_FBC) {
-		val = I915_READ(DISP_ARB_CTL);
+		val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
 		if (results->enable_fbc_wm)
 			val &= ~DISP_FBC_WM_DIS;
 		else
 			val |= DISP_FBC_WM_DIS;
-		I915_WRITE(DISP_ARB_CTL, val);
+		intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
 	}
 
 	if (dirty & WM_DIRTY_LP(1) &&
 	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
-		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+		intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
-			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+			intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
 		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
-			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+			intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
 	}
 
 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
-		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+		intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
-		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+		intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
-		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+		intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
 
 	dev_priv->wm.hw = *results;
 }
@@ -3640,7 +3640,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
 	u8 enabled_slices_mask = 0;
 
 	for (i = 0; i < max_slices; i++) {
-		if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+		if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
 			enabled_slices_mask |= BIT(i);
 	}
 
@@ -4017,43 +4017,48 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
 	return 0;
 }
 
-/*
- * Calculate initial DBuf slice offset, based on slice size
- * and mask(i.e if slice size is 1024 and second slice is enabled
- * offset would be 1024)
- */
-static unsigned int
-icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
-				u32 slice_size,
-				u32 ddb_size)
+static int intel_dbuf_size(struct drm_i915_private *dev_priv)
 {
-	unsigned int offset = 0;
+	int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
 
-	if (!dbuf_slice_mask)
-		return 0;
+	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
 
-	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+	if (INTEL_GEN(dev_priv) < 11)
+		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	WARN_ON(offset >= ddb_size);
-	return offset;
+	return ddb_size;
 }
 
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
 {
-	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
-	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
+	return intel_dbuf_size(dev_priv) /
+		INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+}
 
-	if (INTEL_GEN(dev_priv) < 11)
-		return ddb_size - 4; /* 4 blocks for bypass path allocation */
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
+			 struct skl_ddb_entry *ddb)
+{
+	int slice_size = intel_dbuf_slice_size(dev_priv);
 
-	return ddb_size;
+	if (!slice_mask) {
+		ddb->start = 0;
+		ddb->end = 0;
+		return;
+	}
+
+	ddb->start = (ffs(slice_mask) - 1) * slice_size;
+	ddb->end = fls(slice_mask) * slice_size;
+
+	WARN_ON(ddb->start >= ddb->end);
+	WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
 }
 
 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
 			    const struct skl_ddb_entry *entry)
 {
 	u32 slice_mask = 0;
-	u16 ddb_size = intel_get_ddb_size(dev_priv);
+	u16 ddb_size = intel_dbuf_size(dev_priv);
 	u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
 	u16 slice_size = ddb_size / num_supported_slices;
 	u16 start_slice;
@@ -4077,116 +4082,40 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
 	return slice_mask;
 }
 
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
-				  u8 active_pipes);
-
-static int
-skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
-				   const struct intel_crtc_state *crtc_state,
-				   const u64 total_data_rate,
-				   struct skl_ddb_entry *alloc, /* out */
-				   int *num_active /* out */)
-{
-	struct drm_atomic_state *state = crtc_state->uapi.state;
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
-	const struct intel_crtc *crtc;
-	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
-	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
-	struct intel_dbuf_state *new_dbuf_state =
-		intel_atomic_get_new_dbuf_state(intel_state);
-	const struct intel_dbuf_state *old_dbuf_state =
-		intel_atomic_get_old_dbuf_state(intel_state);
-	u8 active_pipes = new_dbuf_state->active_pipes;
-	u16 ddb_size;
-	u32 ddb_range_size;
-	u32 i;
-	u32 dbuf_slice_mask;
-	u32 offset;
-	u32 slice_size;
-	u32 total_slice_mask;
-	u32 start, end;
-	int ret;
-
-	*num_active = hweight8(active_pipes);
-
-	if (!crtc_state->hw.active) {
-		alloc->start = 0;
-		alloc->end = 0;
-		return 0;
-	}
-
-	ddb_size = intel_get_ddb_size(dev_priv);
-
-	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+	int hdisplay, vdisplay;
 
-	/*
-	 * If the state doesn't change the active CRTC's or there is no
-	 * modeset request, then there's no need to recalculate;
-	 * the existing pipe allocation limits should remain unchanged.
-	 * Note that we're safe from racing commits since any racing commit
-	 * that changes the active CRTC list or do modeset would need to
-	 * grab _all_ crtc locks, including the one we currently hold.
-	 */
-	if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
-	    !dev_priv->wm.distrust_bios_wm) {
-		/*
-		 * alloc may be cleared by clear_intel_crtc_state,
-		 * copy from old state to be sure
-		 *
-		 * FIXME get rid of this mess
-		 */
-		*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
+	if (!crtc_state->hw.active)
 		return 0;
-	}
-
-	/*
-	 * Get allowed DBuf slices for correspondent pipe and platform.
-	 */
-	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
-
-	/*
-	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
-	 * and slice size is 1024, the offset would be 1024
-	 */
-	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
-						 slice_size, ddb_size);
-
-	/*
-	 * Figure out total size of allowed DBuf slices, which is basically
-	 * a number of allowed slices for that pipe multiplied by slice size.
-	 * Inside of this
-	 * range ddb entries are still allocated in proportion to display width.
-	 */
-	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
 
 	/*
 	 * Watermark/ddb requirement highly depends upon width of the
 	 * framebuffer, So instead of allocating DDB equally among pipes
 	 * distribute DDB based on resolution/width of the display.
 	 */
-	total_slice_mask = dbuf_slice_mask;
-	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
-		const struct drm_display_mode *pipe_mode =
-			&crtc_state->hw.pipe_mode;
-		enum pipe pipe = crtc->pipe;
-		int hdisplay, vdisplay;
-		u32 pipe_dbuf_slice_mask;
+	drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
 
-		if (!crtc_state->hw.active)
-			continue;
+	return hdisplay;
+}
 
-		pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
-							       active_pipes);
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+				    enum pipe for_pipe,
+				    unsigned int *weight_start,
+				    unsigned int *weight_end,
+				    unsigned int *weight_total)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(dbuf_state->base.state->base.dev);
+	enum pipe pipe;
 
-		/*
-		 * According to BSpec pipe can share one dbuf slice with another
-		 * pipes or pipe can use multiple dbufs, in both cases we
-		 * account for other pipes only if they have exactly same mask.
-		 * However we need to account how many slices we should enable
-		 * in total.
-		 */
-		total_slice_mask |= pipe_dbuf_slice_mask;
+	*weight_start = 0;
+	*weight_end = 0;
+	*weight_total = 0;
+
+	for_each_pipe(dev_priv, pipe) {
+		int weight = dbuf_state->weight[pipe];
 
 		/*
 		 * Do not account pipes using other slice sets
@@ -4195,42 +4124,78 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		 * i.e no partial intersection), so it is enough to check for
 		 * equality for now.
 		 */
-		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
+		if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
 			continue;
 
-		drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+		*weight_total += weight;
+		if (pipe < for_pipe) {
+			*weight_start += weight;
+			*weight_end += weight;
+		} else if (pipe == for_pipe) {
+			*weight_end += weight;
+		}
+	}
+}
 
-		total_width_in_range += hdisplay;
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	unsigned int weight_total, weight_start, weight_end;
+	const struct intel_dbuf_state *old_dbuf_state =
+		intel_atomic_get_old_dbuf_state(state);
+	struct intel_dbuf_state *new_dbuf_state =
+		intel_atomic_get_new_dbuf_state(state);
+	struct intel_crtc_state *crtc_state;
+	struct skl_ddb_entry ddb_slices;
+	enum pipe pipe = crtc->pipe;
+	u32 ddb_range_size;
+	u32 dbuf_slice_mask;
+	u32 start, end;
+	int ret;
 
-		if (pipe < for_pipe)
-			width_before_pipe_in_range += hdisplay;
-		else if (pipe == for_pipe)
-			pipe_width = hdisplay;
+	if (new_dbuf_state->weight[pipe] == 0) {
+		new_dbuf_state->ddb[pipe].start = 0;
+		new_dbuf_state->ddb[pipe].end = 0;
+		goto out;
 	}
 
-	/*
-	 * FIXME: For now we always enable slice S1 as per
-	 * the Bspec display initialization sequence.
-	 */
-	new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+	dbuf_slice_mask = new_dbuf_state->slices[pipe];
 
-	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
-		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
-		if (ret)
-			return ret;
-	}
+	skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+	ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+	intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+				&weight_start, &weight_end, &weight_total);
+
+	start = ddb_range_size * weight_start / weight_total;
+	end = ddb_range_size * weight_end / weight_total;
+
+	new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
+	new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
+
+out:
+	if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+				&new_dbuf_state->ddb[pipe]))
+		return 0;
+
+	ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+	if (ret)
+		return ret;
 
-	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
-	end = ddb_range_size *
-		(width_before_pipe_in_range + pipe_width) / total_width_in_range;
+	crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
 
-	alloc->start = offset + start;
-	alloc->end = offset + end;
+	crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
 
 	drm_dbg_kms(&dev_priv->drm,
-		    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
-		    for_crtc->base.id, for_crtc->name,
-		    dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+		    "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+		    crtc->base.base.id, crtc->base.name,
+		    old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+		    old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+		    new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+		    old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
 
 	return 0;
 }
@@ -4300,12 +4265,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
 
 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
 	if (plane_id == PLANE_CURSOR) {
-		val = I915_READ(CUR_BUF_CFG(pipe));
+		val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
 		return;
 	}
 
-	val = I915_READ(PLANE_CTL(pipe, plane_id));
+	val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
 
 	/* No DDB allocated for disabled planes */
 	if (val & PLANE_CTL_ENABLE)
@@ -4314,11 +4279,11 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
 					      val & PLANE_CTL_ALPHA_MASK);
 
 	if (INTEL_GEN(dev_priv) >= 11) {
-		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+		val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
 	} else {
-		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+		val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+		val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
 
 		if (fourcc &&
 		    drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
@@ -4632,10 +4597,8 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
 	return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
 }
 
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
-				  u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -4798,55 +4761,30 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
 }
 
 static int
-skl_allocate_pipe_ddb(struct intel_atomic_state *state,
-		      struct intel_crtc *crtc)
+skl_allocate_plane_ddb(struct intel_atomic_state *state,
+		       struct intel_crtc *crtc)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_crtc_state *crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
+	const struct intel_dbuf_state *dbuf_state =
+		intel_atomic_get_new_dbuf_state(state);
+	const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+	int num_active = hweight8(dbuf_state->active_pipes);
 	u16 alloc_size, start = 0;
 	u16 total[I915_MAX_PLANES] = {};
 	u16 uv_total[I915_MAX_PLANES] = {};
 	u64 total_data_rate;
 	enum plane_id plane_id;
-	int num_active;
 	u32 blocks;
 	int level;
-	int ret;
 
 	/* Clear the partitioning for disabled planes. */
 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
 	memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
 
-	if (!crtc_state->hw.active) {
-		struct intel_atomic_state *state =
-			to_intel_atomic_state(crtc_state->uapi.state);
-		struct intel_dbuf_state *new_dbuf_state =
-			intel_atomic_get_new_dbuf_state(state);
-		const struct intel_dbuf_state *old_dbuf_state =
-			intel_atomic_get_old_dbuf_state(state);
-
-		/*
-		 * FIXME hack to make sure we compute this sensibly when
-		 * turning off all the pipes. Otherwise we leave it at
-		 * whatever we had previously, and then runtime PM will
-		 * mess it up by turning off all but S1. Remove this
-		 * once the dbuf state computation flow becomes sane.
-		 */
-		if (new_dbuf_state->active_pipes == 0) {
-			new_dbuf_state->enabled_slices = BIT(DBUF_S1);
-
-			if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
-				ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
-				if (ret)
-					return ret;
-			}
-		}
-
-		alloc->start = alloc->end = 0;
+	if (!crtc_state->hw.active)
 		return 0;
-	}
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		total_data_rate =
@@ -4855,12 +4793,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
 		total_data_rate =
 			skl_get_total_relative_data_rate(state, crtc);
 
-	ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
-						 total_data_rate,
-						 alloc, &num_active);
-	if (ret)
-		return ret;
-
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0)
 		return 0;
@@ -5731,6 +5663,18 @@ static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
 	return a->start < b->end && b->start < a->end;
 }
 
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+				const struct skl_ddb_entry *b)
+{
+	if (a->end && b->end) {
+		a->start = min(a->start, b->start);
+		a->end = max(a->end, b->end);
+	} else if (b->end) {
+		a->start = b->start;
+		a->end = b->end;
+	}
+}
+
 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
 				 const struct skl_ddb_entry *entries,
 				 int num_entries, int ignore_idx)
@@ -5775,39 +5719,114 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
 	return 0;
 }
 
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
+	u8 enabled_slices;
+	enum pipe pipe;
+
+	/*
+	 * FIXME: For now we always enable slice S1 as per
+	 * the Bspec display initialization sequence.
+	 */
+	enabled_slices = BIT(DBUF_S1);
+
+	for_each_pipe(dev_priv, pipe)
+		enabled_slices |= dbuf_state->slices[pipe];
+
+	return enabled_slices;
+}
+
 static int
 skl_compute_ddb(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	const struct intel_dbuf_state *old_dbuf_state;
-	const struct intel_dbuf_state *new_dbuf_state;
+	struct intel_dbuf_state *new_dbuf_state = NULL;
 	const struct intel_crtc_state *old_crtc_state;
 	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc *crtc;
 	int ret, i;
 
-	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-					    new_crtc_state, i) {
-		ret = skl_allocate_pipe_ddb(state, crtc);
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+		new_dbuf_state = intel_atomic_get_dbuf_state(state);
+		if (IS_ERR(new_dbuf_state))
+			return PTR_ERR(new_dbuf_state);
+
+		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+		break;
+	}
+
+	if (!new_dbuf_state)
+		return 0;
+
+	new_dbuf_state->active_pipes =
+		intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+	if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
 		if (ret)
 			return ret;
+	}
 
-		ret = skl_ddb_add_affected_planes(old_crtc_state,
-						  new_crtc_state);
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		enum pipe pipe = crtc->pipe;
+
+		new_dbuf_state->slices[pipe] =
+			skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+
+		if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+			continue;
+
+		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
 		if (ret)
 			return ret;
 	}
 
-	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
-	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+	new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+		if (ret)
+			return ret;
 
-	if (new_dbuf_state &&
-	    new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
 		drm_dbg_kms(&dev_priv->drm,
 			    "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
 			    old_dbuf_state->enabled_slices,
 			    new_dbuf_state->enabled_slices,
 			    INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+	}
+
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+		enum pipe pipe = crtc->pipe;
+
+		new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+		if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+			continue;
+
+		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+		if (ret)
+			return ret;
+	}
+
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		ret = skl_crtc_allocate_ddb(state, crtc);
+		if (ret)
+			return ret;
+	}
+
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		ret = skl_allocate_plane_ddb(state, crtc);
+		if (ret)
+			return ret;
+
+		ret = skl_ddb_add_affected_planes(old_crtc_state,
+						  new_crtc_state);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
@@ -5944,83 +5963,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 	}
 }
 
-static int intel_add_affected_pipes(struct intel_atomic_state *state,
-				    u8 pipe_mask)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_crtc *crtc;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		struct intel_crtc_state *crtc_state;
-
-		if ((pipe_mask & BIT(crtc->pipe)) == 0)
-			continue;
-
-		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	}
-
-	return 0;
-}
-
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_crtc_state *crtc_state;
-	struct intel_crtc *crtc;
-	int i, ret;
-
-	if (dev_priv->wm.distrust_bios_wm) {
-		/*
-		 * skl_ddb_get_pipe_allocation_limits() currently requires
-		 * all active pipes to be included in the state so that
-		 * it can redistribute the dbuf among them, and it really
-		 * wants to recompute things when distrust_bios_wm is set
-		 * so we add all the pipes to the state.
-		 */
-		ret = intel_add_affected_pipes(state, ~0);
-		if (ret)
-			return ret;
-	}
-
-	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-		struct intel_dbuf_state *new_dbuf_state;
-		const struct intel_dbuf_state *old_dbuf_state;
-
-		new_dbuf_state = intel_atomic_get_dbuf_state(state);
-		if (IS_ERR(new_dbuf_state))
-			return PTR_ERR(new_dbuf_state);
-
-		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
-
-		new_dbuf_state->active_pipes =
-			intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
-		if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
-			break;
-
-		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
-		if (ret)
-			return ret;
-
-		/*
-		 * skl_ddb_get_pipe_allocation_limits() currently requires
-		 * all active pipes to be included in the state so that
-		 * it can redistribute the dbuf among them.
-		 */
-		ret = intel_add_affected_pipes(state,
-					       new_dbuf_state->active_pipes);
-		if (ret)
-			return ret;
-
-		break;
-	}
-
-	return 0;
-}
-
 /*
  * To make sure the cursor watermark registers are always consistent
  * with our computed state the following scenario needs special
@@ -6088,15 +6030,6 @@ skl_compute_wm(struct intel_atomic_state *state)
 	struct intel_crtc_state *new_crtc_state;
 	int ret, i;
 
-	ret = skl_ddb_add_affected_pipes(state);
-	if (ret)
-		return ret;
-
-	/*
-	 * Calculate WM's for all pipes that are part of this transaction.
-	 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
-	 * weren't otherwise being modified if pipe allocations had to change.
-	 */
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
 		ret = skl_build_pipe_wm(state, crtc);
 		if (ret)
@@ -6231,9 +6164,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 
 		for (level = 0; level <= max_level; level++) {
 			if (plane_id != PLANE_CURSOR)
-				val = I915_READ(PLANE_WM(pipe, plane_id, level));
+				val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
 			else
-				val = I915_READ(CUR_WM(pipe, level));
+				val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
 
 			skl_wm_level_from_reg_val(val, &wm->wm[level]);
 		}
@@ -6242,9 +6175,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 			wm->sagv_wm0 = wm->wm[0];
 
 		if (plane_id != PLANE_CURSOR)
-			val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
+			val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
 		else
-			val = I915_READ(CUR_WM_TRANS(pipe));
+			val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
 
 		skl_wm_level_from_reg_val(val, &wm->trans_wm);
 	}
@@ -6255,20 +6188,49 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 
 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
+	struct intel_dbuf_state *dbuf_state =
+		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
 	struct intel_crtc *crtc;
-	struct intel_crtc_state *crtc_state;
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		crtc_state = to_intel_crtc_state(crtc->base.state);
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+		enum pipe pipe = crtc->pipe;
+		enum plane_id plane_id;
 
 		skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
 		crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
-	}
 
-	if (dev_priv->active_pipes) {
-		/* Fully recompute DDB on first atomic commit */
-		dev_priv->wm.distrust_bios_wm = true;
+		memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+		for_each_plane_id_on_crtc(crtc, plane_id) {
+			struct skl_ddb_entry *ddb_y =
+				&crtc_state->wm.skl.plane_ddb_y[plane_id];
+			struct skl_ddb_entry *ddb_uv =
+				&crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+			skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
+						   plane_id, ddb_y, ddb_uv);
+
+			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
+		}
+
+		dbuf_state->slices[pipe] =
+			skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
+
+		dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+		crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+
+		drm_dbg_kms(&dev_priv->drm,
+			    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+			    crtc->base.base.id, crtc->base.name,
+			    dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+			    dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
 	}
+
+	dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
 }
 
 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
@@ -6280,7 +6242,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
 	struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
 	enum pipe pipe = crtc->pipe;
 
-	hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
+	hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
 
 	memset(active, 0, sizeof(*active));
 
@@ -6324,13 +6286,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
 {
 	u32 tmp;
 
-	tmp = I915_READ(DSPFW1);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
 	wm->sr.plane = _FW_WM(tmp, SR);
 	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
 	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
 	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
 
-	tmp = I915_READ(DSPFW2);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
 	wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
 	wm->sr.fbc = _FW_WM(tmp, FBC_SR);
 	wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -6338,7 +6300,7 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
 	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
 	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
 
-	tmp = I915_READ(DSPFW3);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
 	wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
 	wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
@@ -6352,7 +6314,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
 	u32 tmp;
 
 	for_each_pipe(dev_priv, pipe) {
-		tmp = I915_READ(VLV_DDL(pipe));
+		tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
 
 		wm->ddl[pipe].plane[PLANE_PRIMARY] =
 			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -6364,34 +6326,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
 			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
 	}
 
-	tmp = I915_READ(DSPFW1);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
 	wm->sr.plane = _FW_WM(tmp, SR);
 	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
 	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
 	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
 
-	tmp = I915_READ(DSPFW2);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
 	wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
 	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
 	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
 
-	tmp = I915_READ(DSPFW3);
+	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
 
 	if (IS_CHERRYVIEW(dev_priv)) {
-		tmp = I915_READ(DSPFW7_CHV);
+		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
 
-		tmp = I915_READ(DSPFW8_CHV);
+		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
 		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
 		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
 
-		tmp = I915_READ(DSPFW9_CHV);
+		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
 		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
 		wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
 
-		tmp = I915_READ(DSPHOWM);
+		tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
 		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
 		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -6403,11 +6365,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
 		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
 		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
 	} else {
-		tmp = I915_READ(DSPFW7);
+		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
 
-		tmp = I915_READ(DSPHOWM);
+		tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -6428,7 +6390,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
 
 	g4x_read_wm_values(dev_priv, wm);
 
-	wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+	wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		struct intel_crtc_state *crtc_state =
@@ -6572,7 +6534,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 
 	vlv_read_wm_values(dev_priv, wm);
 
-	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+	wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 	wm->level = VLV_WM_LEVEL_PM2;
 
 	if (IS_CHERRYVIEW(dev_priv)) {
@@ -6719,9 +6681,9 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
  */
 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
-	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
-	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+	intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
+	intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
+	intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
 
 	/*
 	 * Don't touch WM1S_LP_EN here.
@@ -6739,25 +6701,25 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
 	for_each_intel_crtc(&dev_priv->drm, crtc)
 		ilk_pipe_wm_get_hw_state(crtc);
 
-	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
-	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
-	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+	hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+	hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+	hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
 
-	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+	hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
 	if (INTEL_GEN(dev_priv) >= 7) {
-		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
-		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+		hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+		hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
 	}
 
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+		hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 	else if (IS_IVYBRIDGE(dev_priv))
-		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+		hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
 	hw->enable_fbc_wm =
-		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+		!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
 }
 
 /**
@@ -6808,14 +6770,14 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 	if (!HAS_IPC(dev_priv))
 		return;
 
-	val = I915_READ(DISP_ARB_CTL2);
+	val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
 
 	if (dev_priv->ipc_enabled)
 		val |= DISP_IPC_ENABLE;
 	else
 		val &= ~DISP_IPC_ENABLE;
 
-	I915_WRITE(DISP_ARB_CTL2, val);
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
 }
 
 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
@@ -6850,7 +6812,7 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * gating for the panel power sequencer or it will fail to
 	 * start up when no ports are active.
 	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+	intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
 }
 
 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
@@ -6858,12 +6820,12 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
 	enum pipe pipe;
 
 	for_each_pipe(dev_priv, pipe) {
-		I915_WRITE(DSPCNTR(pipe),
-			   I915_READ(DSPCNTR(pipe)) |
+		intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
+			   intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
 			   DISPPLANE_TRICKLE_FEED_DISABLE);
 
-		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
-		POSTING_READ(DSPSURF(pipe));
+		intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
+		intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
 	}
 }
 
@@ -6879,10 +6841,10 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
 		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
 
-	I915_WRITE(PCH_3DCGDIS0,
+	intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
 		   MARIUNIT_CLOCK_GATE_DISABLE |
 		   SVSMUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(PCH_3DCGDIS1,
+	intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
 		   VFMUNIT_CLOCK_GATE_DISABLE);
 
 	/*
@@ -6892,12 +6854,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * The bit 5 of 0x42020
 	 * The bit 15 of 0x45000
 	 */
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+		   (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
 	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
-	I915_WRITE(DISP_ARB_CTL,
-		   (I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
+		   (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		    DISP_FBC_WM_DIS));
 
 	/*
@@ -6909,18 +6871,18 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
 	 */
 	if (IS_IRONLAKE_M(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
-		I915_WRITE(ILK_DISPLAY_CHICKEN1,
-			   I915_READ(ILK_DISPLAY_CHICKEN1) |
+		intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+			   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
 			   ILK_FBCQ_DIS);
-		I915_WRITE(ILK_DISPLAY_CHICKEN2,
-			   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+			   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
 			   ILK_DPARB_GATE);
 	}
 
-	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
 
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
 		   ILK_ELPIN_409_SELECT);
 
 	g4x_disable_trickle_feed(dev_priv);
@@ -6938,27 +6900,27 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * gating for the panel power sequencer or it will fail to
 	 * start up when no ports are active.
 	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+	intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
 		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
 		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+	intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
 		   DPLS_EDP_PPS_FIX_DIS);
 	/* The below fixes the weird display corruption, a few pixels shifted
 	 * downward, on (only) LVDS of some HP laptops with IVY.
 	 */
 	for_each_pipe(dev_priv, pipe) {
-		val = I915_READ(TRANS_CHICKEN2(pipe));
+		val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
 		if (dev_priv->vbt.fdi_rx_polarity_inverted)
 			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
-		I915_WRITE(TRANS_CHICKEN2(pipe), val);
+		intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
 	}
 	/* WADP0ClockGatingDisable */
 	for_each_pipe(dev_priv, pipe) {
-		I915_WRITE(TRANS_CHICKEN1(pipe),
+		intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
 			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
 	}
 }
@@ -6967,7 +6929,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
 {
 	u32 tmp;
 
-	tmp = I915_READ(MCH_SSKPD);
+	tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
 	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
 		drm_dbg_kms(&dev_priv->drm,
 			    "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
@@ -6978,14 +6940,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
 
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
 		   ILK_ELPIN_409_SELECT);
 
-	I915_WRITE(GEN6_UCGCTL1,
-		   I915_READ(GEN6_UCGCTL1) |
+	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+		   intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
@@ -7002,7 +6964,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * WaDisableRCCUnitClockGating:snb
 	 * WaDisableRCPBUnitClockGating:snb
 	 */
-	I915_WRITE(GEN6_UCGCTL2,
+	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
@@ -7017,14 +6979,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 	 *
 	 * WaFbcAsynchFlipDisableFbcQueue:snb
 	 */
-	I915_WRITE(ILK_DISPLAY_CHICKEN1,
-		   I915_READ(ILK_DISPLAY_CHICKEN1) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-	I915_WRITE(ILK_DSPCLK_GATE_D,
-		   I915_READ(ILK_DSPCLK_GATE_D) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
+		   intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
 		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
@@ -7042,23 +7004,23 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * disabled when not needed anymore in order to save power.
 	 */
 	if (HAS_PCH_LPT_LP(dev_priv))
-		I915_WRITE(SOUTH_DSPCLK_GATE_D,
-			   I915_READ(SOUTH_DSPCLK_GATE_D) |
+		intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
+			   intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
 			   PCH_LP_PARTITION_LEVEL_DISABLE);
 
 	/* WADPOClockGatingDisable:hsw */
-	I915_WRITE(TRANS_CHICKEN1(PIPE_A),
-		   I915_READ(TRANS_CHICKEN1(PIPE_A)) |
+	intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
+		   intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
 		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
 }
 
 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
 {
 	if (HAS_PCH_LPT_LP(dev_priv)) {
-		u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
+		u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
 
 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
-		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+		intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
 	}
 }
 
@@ -7070,60 +7032,62 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
 	u32 val;
 
 	/* WaTempDisableDOPClkGating:bdw */
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+	misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 
-	val = I915_READ(GEN8_L3SQCREG1);
+	val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
 	val &= ~L3_PRIO_CREDITS_MASK;
 	val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
 	val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
-	I915_WRITE(GEN8_L3SQCREG1, val);
+	intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
 
 	/*
 	 * Wait at least 100 clocks before re-enabling clock gating.
 	 * See the definition of L3SQCREG1 in BSpec.
 	 */
-	POSTING_READ(GEN8_L3SQCREG1);
+	intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
 	udelay(1);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
 }
 
 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	/* Wa_1409120013:icl,ehl */
-	I915_WRITE(ILK_DPFC_CHICKEN,
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
 		   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
 
 	/* This is not an Wa. Enable to reduce Sampler power */
-	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
-		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+	intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+		   intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
 
 	/*Wa_14010594013:icl, ehl */
 	intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
 			 0, CNL_DELAY_PMRSP);
 }
 
-static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	/* Wa_1409120013:tgl */
-	I915_WRITE(ILK_DPFC_CHICKEN,
-		   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+	/* Wa_1409120013:tgl,rkl,adl_s,dg1 */
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
+			   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
 
 	/* Wa_1409825376:tgl (pre-prod)*/
 	if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
-		I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+		intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
 			   TGL_VRH_GATING_DIS);
 
-	/* Wa_14011059788:tgl */
+	/* Wa_14011059788:tgl,rkl,adl_s,dg1 */
 	intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
 			 0, DFR_DISABLE);
 }
 
 static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+	gen12lp_init_clock_gating(dev_priv);
+
 	/* Wa_1409836686:dg1[a0] */
 	if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
-		I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+		intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
 			   DPT_GATING_DIS);
 }
 
@@ -7133,7 +7097,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
 		return;
 
 	/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+	intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
 		   CNP_PWM_CGE_GATING_DISABLE);
 }
 
@@ -7143,35 +7107,35 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
 	cnp_init_clock_gating(dev_priv);
 
 	/* This is not an Wa. Enable for better image quality */
-	I915_WRITE(_3D_CHICKEN3,
+	intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3,
 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
 
 	/* WaEnableChickenDCPR:cnl */
-	I915_WRITE(GEN8_CHICKEN_DCPR_1,
-		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+	intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+		   intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
 
 	/*
 	 * WaFbcWakeMemOn:cnl
 	 * Display WA #0859: cnl
 	 */
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		   DISP_FBC_MEMORY_WAKE);
 
-	val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+	val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE);
 	/* ReadHitWriteOnlyDisable:cnl */
 	val |= RCCUNIT_CLKGATE_DIS;
-	I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+	intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val);
 
 	/* Wa_2201832410:cnl */
-	val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+	val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE);
 	val |= GWUNIT_CLKGATE_DIS;
-	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+	intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val);
 
 	/* WaDisableVFclkgate:cnl */
 	/* WaVFUnitClockGatingDisable:cnl */
-	val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+	val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE);
 	val |= VFUNIT_CLKGATE_DIS;
-	I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
+	intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val);
 }
 
 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7180,21 +7144,21 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
 	gen9_init_clock_gating(dev_priv);
 
 	/* WAC6entrylatency:cfl */
-	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+	intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
 		   FBC_LLC_FULLY_OPEN);
 
 	/*
 	 * WaFbcTurnOffFbcWatermark:cfl
 	 * Display WA #0562: cfl
 	 */
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		   DISP_FBC_WM_DIS);
 
 	/*
 	 * WaFbcNukeOnHostModify:cfl
 	 * Display WA #0873: cfl
 	 */
-	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 }
 
@@ -7203,31 +7167,31 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
 	gen9_init_clock_gating(dev_priv);
 
 	/* WAC6entrylatency:kbl */
-	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+	intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
 		   FBC_LLC_FULLY_OPEN);
 
 	/* WaDisableSDEUnitClockGating:kbl */
 	if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
-		I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+		intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
 			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaDisableGamClockGating:kbl */
 	if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
-		I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+		intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
 			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
 
 	/*
 	 * WaFbcTurnOffFbcWatermark:kbl
 	 * Display WA #0562: kbl
 	 */
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		   DISP_FBC_WM_DIS);
 
 	/*
 	 * WaFbcNukeOnHostModify:kbl
 	 * Display WA #0873: kbl
 	 */
-	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 }
 
@@ -7236,32 +7200,32 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
 	gen9_init_clock_gating(dev_priv);
 
 	/* WaDisableDopClockGating:skl */
-	I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
+	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
 		   ~GEN7_DOP_CLOCK_GATE_ENABLE);
 
 	/* WAC6entrylatency:skl */
-	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+	intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
 		   FBC_LLC_FULLY_OPEN);
 
 	/*
 	 * WaFbcTurnOffFbcWatermark:skl
 	 * Display WA #0562: skl
 	 */
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
 		   DISP_FBC_WM_DIS);
 
 	/*
 	 * WaFbcNukeOnHostModify:skl
 	 * Display WA #0873: skl
 	 */
-	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 
 	/*
 	 * WaFbcHighMemBwCorruptionAvoidance:skl
 	 * Display WA #0883: skl
 	 */
-	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
 		   ILK_DPFC_DISABLE_DUMMY0);
 }
 
@@ -7270,42 +7234,42 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
 	enum pipe pipe;
 
 	/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-	I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
-		   I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+	intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
 		   HSW_FBCQ_DIS);
 
 	/* WaSwitchSolVfFArbitrationPriority:bdw */
-	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+	intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
 	/* WaPsrDPAMaskVBlankInSRD:bdw */
-	I915_WRITE(CHICKEN_PAR1_1,
-		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+	intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
 	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
 	for_each_pipe(dev_priv, pipe) {
-		I915_WRITE(CHICKEN_PIPESL_1(pipe),
-			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
+		intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+			   intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
 			   BDW_DPRS_MASK_VBLANK_SRD);
 	}
 
 	/* WaVSRefCountFullforceMissDisable:bdw */
 	/* WaDSRefCountFullforceMissDisable:bdw */
-	I915_WRITE(GEN7_FF_THREAD_MODE,
-		   I915_READ(GEN7_FF_THREAD_MODE) &
+	intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+		   intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
-	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+	intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
 	/* WaDisableSDEUnitClockGating:bdw */
-	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaProgramL3SqcReg1Default:bdw */
 	gen8_set_l3sqc_credits(dev_priv, 30, 2);
 
 	/* WaKVMNotificationOnConfigChange:bdw */
-	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+	intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
 		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
 
 	lpt_init_clock_gating(dev_priv);
@@ -7315,24 +7279,24 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
 	 * clock gating.
 	 */
-	I915_WRITE(GEN6_UCGCTL1,
-		   I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+		   intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
 }
 
 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-	I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
-		   I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+	intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
 		   HSW_FBCQ_DIS);
 
 	/* This is required by WaCatErrorRejectionIssue:hsw */
-	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+	intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+		   intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
 	/* WaSwitchSolVfFArbitrationPriority:hsw */
-	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+	intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
 	lpt_init_clock_gating(dev_priv);
 }
@@ -7341,26 +7305,26 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	u32 snpcr;
 
-	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaFbcAsynchFlipDisableFbcQueue:ivb */
-	I915_WRITE(ILK_DISPLAY_CHICKEN1,
-		   I915_READ(ILK_DISPLAY_CHICKEN1) |
+	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
 		   ILK_FBCQ_DIS);
 
 	/* WaDisableBackToBackFlipFix:ivb */
-	I915_WRITE(IVB_CHICKEN3,
+	intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
 	if (IS_IVB_GT1(dev_priv))
-		I915_WRITE(GEN7_ROW_CHICKEN2,
+		intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 	else {
 		/* must write both registers */
-		I915_WRITE(GEN7_ROW_CHICKEN2,
+		intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+		intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 	}
 
@@ -7368,20 +7332,20 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
 	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
 	 */
-	I915_WRITE(GEN6_UCGCTL2,
+	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
 	/* This is required by WaCatErrorRejectionIssue:ivb */
-	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+	intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+			intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
 	g4x_disable_trickle_feed(dev_priv);
 
-	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+	snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
 	snpcr |= GEN6_MBC_SNPCR_MED;
-	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+	intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
 
 	if (!HAS_PCH_NOP(dev_priv))
 		cpt_init_clock_gating(dev_priv);
@@ -7392,58 +7356,58 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	/* WaDisableBackToBackFlipFix:vlv */
-	I915_WRITE(IVB_CHICKEN3,
+	intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
 	/* WaDisableDopClockGating:vlv */
-	I915_WRITE(GEN7_ROW_CHICKEN2,
+	intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
 		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
 	/* This is required by WaCatErrorRejectionIssue:vlv */
-	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+	intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+		   intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
 	/*
 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
 	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
 	 */
-	I915_WRITE(GEN6_UCGCTL2,
+	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaDisableL3Bank2xClockGate:vlv
 	 * Disabling L3 clock gating- MMIO 940c[25] = 1
 	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+	intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
+		   intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
 	/*
 	 * WaDisableVLVClockGating_VBIIssue:vlv
 	 * Disable clock gating on th GCFG unit to prevent a delay
 	 * in the reporting of vblank events.
 	 */
-	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+	intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
 }
 
 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	/* WaVSRefCountFullforceMissDisable:chv */
 	/* WaDSRefCountFullforceMissDisable:chv */
-	I915_WRITE(GEN7_FF_THREAD_MODE,
-		   I915_READ(GEN7_FF_THREAD_MODE) &
+	intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+		   intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
 	/* WaDisableSemaphoreAndSyncFlipWait:chv */
-	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+	intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
 	/* WaDisableCSUnitClockGating:chv */
-	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaDisableSDEUnitClockGating:chv */
-	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
 	/*
@@ -7458,17 +7422,17 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	u32 dspclk_gate;
 
-	I915_WRITE(RENCLK_GATE_D1, 0);
-	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
+	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
 		   GS_UNIT_CLOCK_GATE_DISABLE |
 		   CL_UNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(RAMCLK_GATE_D, 0);
+	intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
 		OVRUNIT_CLOCK_GATE_DISABLE |
 		OVCUNIT_CLOCK_GATE_DISABLE;
 	if (IS_GM45(dev_priv))
 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
-	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+	intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
 
 	g4x_disable_trickle_feed(dev_priv);
 }
@@ -7489,49 +7453,49 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
 		   I965_RCC_CLOCK_GATE_DISABLE |
 		   I965_RCPB_CLOCK_GATE_DISABLE |
 		   I965_ISC_CLOCK_GATE_DISABLE |
 		   I965_FBC_CLOCK_GATE_DISABLE);
-	I915_WRITE(RENCLK_GATE_D2, 0);
-	I915_WRITE(MI_ARB_STATE,
+	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
+	intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 }
 
 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	u32 dstate = I915_READ(D_STATE);
+	u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
 
 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
 		DSTATE_DOT_CLOCK_GATING;
-	I915_WRITE(D_STATE, dstate);
+	intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
 
 	if (IS_PINEVIEW(dev_priv))
-		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+		intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
 
 	/* IIR "flip pending" means done if this bit is set */
-	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+	intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
 
 	/* interrupts should cause a wake up from C3 */
-	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+	intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+	intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
 
-	I915_WRITE(MI_ARB_STATE,
+	intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 }
 
 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 
 	/* interrupts should cause a wake up from C3 */
-	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+	intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
 		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
 
-	I915_WRITE(MEM_MODE,
+	intel_uncore_write(&dev_priv->uncore, MEM_MODE,
 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
 
 	/*
@@ -7541,13 +7505,13 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
 	 * abosultely nothing) would not allow FBC to recompress
 	 * until a 2D blit occurs.
 	 */
-	I915_WRITE(SCPD0,
+	intel_uncore_write(&dev_priv->uncore, SCPD0,
 		   _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
 }
 
 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(MEM_MODE,
+	intel_uncore_write(&dev_priv->uncore, MEM_MODE,
 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
 }
@@ -7583,7 +7547,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 	if (IS_DG1(dev_priv))
 		dev_priv->display.init_clock_gating = dg1_init_clock_gating;
 	else if (IS_GEN(dev_priv, 12))
-		dev_priv->display.init_clock_gating = tgl_init_clock_gating;
+		dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
 	else if (IS_GEN(dev_priv, 11))
 		dev_priv->display.init_clock_gating = icl_init_clock_gating;
 	else if (IS_CANNONLAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index eab83e251dd55e76213ecfcb0ee52fde17026161..97550cf0b6df02a2627af800a693358119149f09 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,8 +9,10 @@
 #include <linux/types.h>
 
 #include "display/intel_bw.h"
+#include "display/intel_display.h"
 #include "display/intel_global_state.h"
 
+#include "i915_drv.h"
 #include "i915_reg.h"
 
 struct drm_device;
@@ -40,7 +42,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 			       struct skl_ddb_entry *ddb_y,
 			       struct skl_ddb_entry *ddb_uv);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
 			    const struct skl_ddb_entry *entry);
 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
@@ -69,6 +70,10 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
 struct intel_dbuf_state {
 	struct intel_global_state base;
 
+	struct skl_ddb_entry ddb[I915_MAX_PIPES];
+	unsigned int weight[I915_MAX_PIPES];
+	u8 slices[I915_MAX_PIPES];
+
 	u8 enabled_slices;
 	u8 active_pipes;
 };
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 02ebf5a04a9bc15fcd6e985e3b4c21e7df3a200e..0ec0cf19195554313c646c201c40aa576a203d9e 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -404,8 +404,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
 	lockdep_assert_held(&i915->sb_lock);
 
 	/*
-	 * GEN6_PCODE_* are outside of the forcewake domain, we can
-	 * use te fw I915_READ variants to reduce the amount of work
+	 * GEN6_PCODE_* are outside of the forcewake domain, we can use
+	 * intel_uncore_read/write_fw variants to reduce the amount of work
 	 * required when reading/writing.
 	 */
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1c14a07eba7d6f651b7a66208f1f484b20a8f1e6..9ac501bcfdadb707cee272bd43bd0cbad44a6626 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2070,7 +2070,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
  * This routine waits until the target register @reg contains the expected
  * @value after applying the @mask, i.e. it waits until ::
  *
- *     (I915_READ_FW(reg) & mask) == value
+ *     (intel_uncore_read_fw(uncore, reg) & mask) == value
  *
  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
@@ -2126,7 +2126,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
  * This routine waits until the target register @reg contains the expected
  * @value after applying the @mask, i.e. it waits until ::
  *
- *     (I915_READ(reg) & mask) == value
+ *     (intel_uncore_read(uncore, reg) & mask) == value
  *
  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  *
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bd2467284295c95140ceb0a09720375f05953c11..59f0da8f1fbb1bfab10238e49c4f76ff075aef51 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -216,7 +216,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
 
 /*
  * Like above but the caller must manage the uncore.lock itself.
- * Must be used with I915_READ_FW and friends.
+ * Must be used with intel_uncore_read_fw() and friends.
  */
 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
 					enum forcewake_domains domains);
@@ -318,8 +318,8 @@ __uncore_write(write_notrace, 32, l, false)
  * will be implemented using 2 32-bit writes in an arbitrary order with
  * an arbitrary delay between them. This can cause the hardware to
  * act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * uncore->funcs.mmio_writeq.
+ * machine death. For this reason we do not support intel_uncore_write64,
+ * or uncore->funcs.mmio_writeq.
  *
  * When reading a 64-bit value as two 32-bit values, the delay may cause
  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 412e21604a0508824faf356583e6d85f8ce6fef9..dc394fb7ccfaa663d7356a0000f5959174e2dfe7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,6 +8,7 @@
 
 #include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
+#include "gem/i915_gem_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f88473d396f4b8ffdbd73c4d0d8d6bea2f2571fd..f99bb0113726a976ae45d5f68772630d82a290ed 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
 		      struct list_head *objects)
 {
 	/* quirk is only for live tiled objects, use it to declare ownership */
-	GEM_BUG_ON(obj->mm.quirked);
-	obj->mm.quirked = true;
+	GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+	i915_gem_object_set_tiling_quirk(obj);
 	list_add(&obj->st_link, objects);
 }
 
@@ -85,7 +85,7 @@ static void unpin_ggtt(struct i915_ggtt *ggtt)
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
-		if (vma->obj->mm.quirked)
+		if (i915_gem_object_has_tiling_quirk(vma->obj))
 			i915_vma_unpin(vma);
 }
 
@@ -94,8 +94,8 @@ static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
 	struct drm_i915_gem_object *obj, *on;
 
 	list_for_each_entry_safe(obj, on, list, st_link) {
-		GEM_BUG_ON(!obj->mm.quirked);
-		obj->mm.quirked = false;
+		GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+		i915_gem_object_set_tiling_quirk(obj);
 		i915_gem_object_put(obj);
 	}
 
@@ -442,28 +442,22 @@ static int igt_evict_contexts(void *arg)
 	/* Overfill the GGTT with context objects and so try to evict one. */
 	for_each_engine(engine, gt, id) {
 		struct i915_sw_fence fence;
-		struct file *file;
-
-		file = mock_file(i915);
-		if (IS_ERR(file)) {
-			err = PTR_ERR(file);
-			break;
-		}
 
 		count = 0;
 		onstack_fence_init(&fence);
 		do {
+			struct intel_context *ce;
 			struct i915_request *rq;
-			struct i915_gem_context *ctx;
 
-			ctx = live_context(i915, file);
-			if (IS_ERR(ctx))
+			ce = intel_context_create(engine);
+			if (IS_ERR(ce))
 				break;
 
 			/* We will need some GGTT space for the rq's context */
 			igt_evict_ctl.fail_if_busy = true;
-			rq = igt_request_alloc(ctx, engine);
+			rq = intel_context_create_request(ce);
 			igt_evict_ctl.fail_if_busy = false;
+			intel_context_put(ce);
 
 			if (IS_ERR(rq)) {
 				/* When full, fail_if_busy will trigger EBUSY */
@@ -490,8 +484,6 @@ static int igt_evict_contexts(void *arg)
 		onstack_fence_fini(&fence);
 		pr_info("Submitted %lu contexts/requests on %s\n",
 			count, engine->name);
-
-		fput(file);
 		if (err)
 			break;
 	}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 713770fb2b92d4846acfd2b1e69fe413cb03b74e..c1adea8765a97cffbfe474ddbb1eafe697cac2b7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -28,6 +28,7 @@
 #include "gem/i915_gem_context.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
 
 #include "i915_random.h"
 #include "i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index debbac660519e18a8e6c5b638b859d155f785257..e9d86dab867711d431f7d9684a48de5b1c7108aa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -262,7 +262,7 @@ static int live_noa_delay(void *arg)
 
 	delay = intel_read_status_page(stream->engine, 0x102);
 	delay -= intel_read_status_page(stream->engine, 0x100);
-	delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
+	delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay);
 	pr_info("GPU delay: %uns, expected %lluns\n",
 		delay, expected);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e424a6d1a68c9433c0ba7f646e95f8255780a307..d2a678a2497e4331326a0e72965b4861bc1833f8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -33,6 +33,7 @@
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/selftest_engine_heartbeat.h"
 
@@ -1560,7 +1561,7 @@ static u32 trifilter(u32 *a)
 
 static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
 {
-	u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+	u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
 
 	return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
 }
@@ -1932,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce)
 		intel_ring_advance(rq, cs);
 		i915_request_add(rq);
 	}
-	local_bh_disable();
 	i915_sw_fence_commit(submit);
-	local_bh_enable();
 	intel_engine_flush_submission(ce->engine);
 	heap_fence_put(submit);
 
@@ -2220,11 +2219,9 @@ static int measure_completion(struct intel_context *ce)
 		intel_ring_advance(rq, cs);
 
 		dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
-
-		local_bh_disable();
 		i915_request_add(rq);
-		local_bh_enable();
 
+		intel_engine_flush_submission(ce->engine);
 		if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
 			err = -EIO;
 			goto err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ec0ecb4e4ca6a4853c1f829034876ad9a5b9b7cf..83f6e5f31fb397996ccdfb8c6ac6a546df6817d2 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
  *
  * Copyright © 2018 Intel Corporation
  */
+#include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 
 #include "gem/selftests/igt_gem_utils.h"
@@ -219,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin)
 
 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 {
+	if (i915_request_is_ready(rq))
+		intel_engine_flush_submission(rq->engine);
+
 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
 					       rq->fence.seqno),
 			     100) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 0aeba8e3af28daecde5424c9458683f8faf5d4db..ce7adfa3bca064cc475c3d39d74e1810bc62ca65 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj)
 	i915_gem_object_put(obj);
 }
 
+static bool is_contiguous(struct drm_i915_gem_object *obj)
+{
+	struct scatterlist *sg;
+	dma_addr_t addr = -1;
+
+	for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+		if (addr != -1 && sg_dma_address(sg) != addr)
+			return false;
+
+		addr = sg_dma_address(sg) + sg_dma_len(sg);
+	}
+
+	return true;
+}
+
 static int igt_mock_contiguous(void *arg)
 {
 	struct intel_memory_region *mem = arg;
@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	if (obj->mm.pages->nents != 1) {
-		pr_err("%s min object spans multiple sg entries\n", __func__);
+	if (!is_contiguous(obj)) {
+		pr_err("%s min object spans disjoint sg entries\n", __func__);
 		err = -EINVAL;
 		goto err_close_objects;
 	}
@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	if (obj->mm.pages->nents != 1) {
-		pr_err("%s max object spans multiple sg entries\n", __func__);
+	if (!is_contiguous(obj)) {
+		pr_err("%s max object spans disjoint sg entries\n", __func__);
 		err = -EINVAL;
 		goto err_close_objects;
 	}
@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg)
 		goto err_close_objects;
 	}
 
-	if (obj->mm.pages->nents != 1) {
-		pr_err("%s object spans multiple sg entries\n", __func__);
+	if (!is_contiguous(obj)) {
+		pr_err("%s object spans disjoint sg entries\n", __func__);
 		err = -EINVAL;
 		goto err_close_objects;
 	}
@@ -337,6 +352,68 @@ static int igt_mock_splintered_region(void *arg)
 	return err;
 }
 
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+	const unsigned int max_segment = i915_sg_segment_size();
+	struct intel_memory_region *mem = arg;
+	struct drm_i915_private *i915 = mem->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_buddy_block *block;
+	struct scatterlist *sg;
+	LIST_HEAD(objects);
+	u64 size;
+	int err = 0;
+
+	/*
+	 * While we may create very large contiguous blocks, we may need
+	 * to break those down for consumption elsewhere. In particular,
+	 * dma-mapping with scatterlist elements have an implicit limit of
+	 * UINT_MAX on each element.
+	 */
+
+	size = SZ_8G;
+	mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+	if (IS_ERR(mem))
+		return PTR_ERR(mem);
+
+	obj = igt_object_create(mem, &objects, size, 0);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto out_put;
+	}
+
+	size = 0;
+	list_for_each_entry(block, &obj->mm.blocks, link) {
+		if (i915_buddy_block_size(&mem->mm, block) > size)
+			size = i915_buddy_block_size(&mem->mm, block);
+	}
+	if (size < max_segment) {
+		pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+		       __func__, max_segment, size);
+		err = -EINVAL;
+		goto out_close;
+	}
+
+	for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+		if (sg->length > max_segment) {
+			pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+			       __func__, sg->length, max_segment);
+			err = -EINVAL;
+			goto out_close;
+		}
+	}
+
+out_close:
+	close_objects(mem, &objects);
+out_put:
+	intel_memory_region_put(mem);
+	return err;
+}
+
 static int igt_gpu_write_dw(struct intel_context *ce,
 			    struct i915_vma *vma,
 			    u32 dword,
@@ -775,14 +852,22 @@ static int _perf_memcpy(struct intel_memory_region *src_mr,
 		}
 
 		sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+		if (t[0] <= 0) {
+			/* ignore the impossible to protect our sanity */
+			pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
+				 __func__,
+				 src_mr->name, repr_type(src_type),
+				 dst_mr->name, repr_type(dst_type),
+				 tests[i].name, size >> 10,
+				 t[0], t[4]);
+			continue;
+		}
+
 		pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
 			__func__,
-			src_mr->name,
-			repr_type(src_type),
-			dst_mr->name,
-			repr_type(dst_type),
-			tests[i].name,
-			size >> 10,
+			src_mr->name, repr_type(src_type),
+			dst_mr->name, repr_type(dst_type),
+			tests[i].name, size >> 10,
 			div64_u64(mul_u32_u32(4 * size,
 					      1000 * 1000 * 1000),
 				  t[1] + 2 * t[2] + t[3]) >> 20);
@@ -848,6 +933,7 @@ int intel_memory_region_mock_selftests(void)
 		SUBTEST(igt_mock_fill),
 		SUBTEST(igt_mock_contiguous),
 		SUBTEST(igt_mock_splintered_region),
+		SUBTEST(igt_mock_max_segment),
 	};
 	struct intel_memory_region *mem;
 	struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e946bd2087d879d8f41d060008844ca931e45946..0188f877cab2c8e262635c14501d3fb3a971d9ac 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
 	mock_device_flush(i915);
 	intel_gt_driver_remove(&i915->gt);
 
-	i915_gem_driver_release__contexts(i915);
-
 	i915_gem_drain_workqueue(i915);
 	i915_gem_drain_freed_objects(i915);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 979d96f27c43a7c81f965b1f505caad062c44c50..3c6021415274879875fb38daccdac0bd7100d810 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,21 +15,16 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
 	.release = i915_gem_object_release_memory_region,
 };
 
-static struct drm_i915_gem_object *
-mock_object_create(struct intel_memory_region *mem,
-		   resource_size_t size,
-		   unsigned int flags)
+static int mock_object_init(struct intel_memory_region *mem,
+			    struct drm_i915_gem_object *obj,
+			    resource_size_t size,
+			    unsigned int flags)
 {
 	static struct lock_class_key lock_class;
 	struct drm_i915_private *i915 = mem->i915;
-	struct drm_i915_gem_object *obj;
 
 	if (size > mem->mm.size)
-		return ERR_PTR(-E2BIG);
-
-	obj = i915_gem_object_alloc();
-	if (!obj)
-		return ERR_PTR(-ENOMEM);
+		return -E2BIG;
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
 	i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
@@ -40,13 +35,13 @@ mock_object_create(struct intel_memory_region *mem,
 
 	i915_gem_object_init_memory_region(obj, mem, flags);
 
-	return obj;
+	return 0;
 }
 
 static const struct intel_memory_region_ops mock_region_ops = {
 	.init = intel_memory_region_init_buddy,
 	.release = intel_memory_region_release_buddy,
-	.create_object = mock_object_create,
+	.init_object = mock_object_init,
 };
 
 struct intel_memory_region *
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 6231048aa5aaa05faf1e10697a63753999fae798..b5fa0e45a839d621b6a83d33ae20825d8c6fc3f6 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -28,6 +28,7 @@ config DRM_IMX_TVE
 config DRM_IMX_LDB
 	tristate "Support for LVDS displays"
 	depends on DRM_IMX && MFD_SYSCON
+	depends on COMMON_CLK
 	select DRM_PANEL
 	help
 	  Choose this to enable the internal LVDS Display Bridge (LDB)
@@ -36,7 +37,7 @@ config DRM_IMX_LDB
 config DRM_IMX_HDMI
 	tristate "Freescale i.MX DRM HDMI"
 	select DRM_DW_HDMI
-	depends on DRM_IMX
+	depends on DRM_IMX && OF
 	help
 	  Choose this if you want to use HDMI on i.MX6.
 
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index d07b39b8afd21a0a7c43b45760d750bd1e4ae997..87428fb23d9ffa5e53d8b20a76c3d71402265a77 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -15,23 +15,32 @@
 
 #include <drm/bridge/dw_hdmi.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
+struct imx_hdmi;
+
+struct imx_hdmi_encoder {
+	struct drm_encoder encoder;
+	struct imx_hdmi *hdmi;
+};
+
 struct imx_hdmi {
 	struct device *dev;
-	struct drm_encoder encoder;
+	struct drm_bridge *bridge;
 	struct dw_hdmi *hdmi;
 	struct regmap *regmap;
 };
 
 static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
 {
-	return container_of(e, struct imx_hdmi, encoder);
+	return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi;
 }
 
 static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
@@ -98,19 +107,6 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = {
 	{ ~0UL,      0x0000, 0x0000, 0x0000}
 };
 
-static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
-{
-	struct device_node *np = hdmi->dev->of_node;
-
-	hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
-	if (IS_ERR(hdmi->regmap)) {
-		dev_err(hdmi->dev, "Unable to get gpr\n");
-		return PTR_ERR(hdmi->regmap);
-	}
-
-	return 0;
-}
-
 static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
 {
 	struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -195,65 +191,36 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
 static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
 			    void *data)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	const struct dw_hdmi_plat_data *plat_data;
-	const struct of_device_id *match;
 	struct drm_device *drm = data;
+	struct imx_hdmi_encoder *hdmi_encoder;
 	struct drm_encoder *encoder;
-	struct imx_hdmi *hdmi;
 	int ret;
 
-	if (!pdev->dev.of_node)
-		return -ENODEV;
+	hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder,
+						 encoder, DRM_MODE_ENCODER_TMDS);
+	if (IS_ERR(hdmi_encoder))
+		return PTR_ERR(hdmi_encoder);
 
-	hdmi = dev_get_drvdata(dev);
-	memset(hdmi, 0, sizeof(*hdmi));
-
-	match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
-	plat_data = match->data;
-	hdmi->dev = &pdev->dev;
-	encoder = &hdmi->encoder;
+	hdmi_encoder->hdmi = dev_get_drvdata(dev);
+	encoder = &hdmi_encoder->encoder;
 
 	ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
 	if (ret)
 		return ret;
 
-	ret = dw_hdmi_imx_parse_dt(hdmi);
-	if (ret < 0)
-		return ret;
-
 	drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
-	drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
-	hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
 
-	/*
-	 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
-	 * which would have called the encoder cleanup.  Do it manually.
-	 */
-	if (IS_ERR(hdmi->hdmi)) {
-		ret = PTR_ERR(hdmi->hdmi);
-		drm_encoder_cleanup(encoder);
-	}
-
-	return ret;
-}
-
-static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
-			       void *data)
-{
-	struct imx_hdmi *hdmi = dev_get_drvdata(dev);
-
-	dw_hdmi_unbind(hdmi->hdmi);
+	return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0);
 }
 
 static const struct component_ops dw_hdmi_imx_ops = {
 	.bind	= dw_hdmi_imx_bind,
-	.unbind	= dw_hdmi_imx_unbind,
 };
 
 static int dw_hdmi_imx_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
 	struct imx_hdmi *hdmi;
 
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -261,13 +228,33 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	platform_set_drvdata(pdev, hdmi);
+	hdmi->dev = &pdev->dev;
+
+	hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+	if (IS_ERR(hdmi->regmap)) {
+		dev_err(hdmi->dev, "Unable to get gpr\n");
+		return PTR_ERR(hdmi->regmap);
+	}
+
+	hdmi->hdmi = dw_hdmi_probe(pdev, match->data);
+	if (IS_ERR(hdmi->hdmi))
+		return PTR_ERR(hdmi->hdmi);
+
+	hdmi->bridge = of_drm_find_bridge(np);
+	if (!hdmi->bridge) {
+		dev_err(hdmi->dev, "Unable to find bridge\n");
+		return -ENODEV;
+	}
 
 	return component_add(&pdev->dev, &dw_hdmi_imx_ops);
 }
 
 static int dw_hdmi_imx_remove(struct platform_device *pdev)
 {
+	struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+
 	component_del(&pdev->dev, &dw_hdmi_imx_ops);
+	dw_hdmi_remove(hdmi->hdmi);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 41e2978cb1ebf278fa93ac803d6ea74d06ad5ecd..dbfe39e2f7f614151795f82e6b5b626ae2214b1c 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -22,6 +22,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
@@ -47,12 +48,18 @@
 #define LDB_DI1_VS_POL_ACT_LOW		(1 << 10)
 #define LDB_BGREF_RMODE_INT		(1 << 15)
 
+struct imx_ldb_channel;
+
+struct imx_ldb_encoder {
+	struct drm_connector connector;
+	struct drm_encoder encoder;
+	struct imx_ldb_channel *channel;
+};
+
 struct imx_ldb;
 
 struct imx_ldb_channel {
 	struct imx_ldb *ldb;
-	struct drm_connector connector;
-	struct drm_encoder encoder;
 
 	/* Defines what is connected to the ldb, only one at a time */
 	struct drm_panel *panel;
@@ -70,12 +77,12 @@ struct imx_ldb_channel {
 
 static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
 {
-	return container_of(c, struct imx_ldb_channel, connector);
+	return container_of(c, struct imx_ldb_encoder, connector)->channel;
 }
 
 static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
 {
-	return container_of(e, struct imx_ldb_channel, encoder);
+	return container_of(e, struct imx_ldb_encoder, encoder)->channel;
 }
 
 struct bus_mux {
@@ -411,9 +418,20 @@ static int imx_ldb_register(struct drm_device *drm,
 	struct imx_ldb_channel *imx_ldb_ch)
 {
 	struct imx_ldb *ldb = imx_ldb_ch->ldb;
-	struct drm_encoder *encoder = &imx_ldb_ch->encoder;
+	struct imx_ldb_encoder *ldb_encoder;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 	int ret;
 
+	ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder,
+						encoder, DRM_MODE_ENCODER_LVDS);
+	if (IS_ERR(ldb_encoder))
+		return PTR_ERR(ldb_encoder);
+
+	ldb_encoder->channel = imx_ldb_ch;
+	connector = &ldb_encoder->connector;
+	encoder = &ldb_encoder->encoder;
+
 	ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
 	if (ret)
 		return ret;
@@ -429,11 +447,9 @@ static int imx_ldb_register(struct drm_device *drm,
 	}
 
 	drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
-	drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
 
 	if (imx_ldb_ch->bridge) {
-		ret = drm_bridge_attach(&imx_ldb_ch->encoder,
-					imx_ldb_ch->bridge, NULL, 0);
+		ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0);
 		if (ret) {
 			DRM_ERROR("Failed to initialize bridge with drm\n");
 			return ret;
@@ -445,13 +461,13 @@ static int imx_ldb_register(struct drm_device *drm,
 		 * historical reasons, the ldb driver can also work without
 		 * a panel.
 		 */
-		drm_connector_helper_add(&imx_ldb_ch->connector,
-				&imx_ldb_connector_helper_funcs);
-		drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector,
+		drm_connector_helper_add(connector,
+					 &imx_ldb_connector_helper_funcs);
+		drm_connector_init_with_ddc(drm, connector,
 					    &imx_ldb_connector_funcs,
 					    DRM_MODE_CONNECTOR_LVDS,
 					    imx_ldb_ch->ddc);
-		drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
+		drm_connector_attach_encoder(connector, encoder);
 	}
 
 	return 0;
@@ -559,17 +575,42 @@ static int imx_ldb_panel_ddc(struct device *dev,
 static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 {
 	struct drm_device *drm = data;
+	struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+	int ret;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+
+		if (!channel->ldb)
+			break;
+
+		ret = imx_ldb_register(drm, channel);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct component_ops imx_ldb_ops = {
+	.bind	= imx_ldb_bind,
+};
+
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
-	const struct of_device_id *of_id =
-			of_match_device(imx_ldb_dt_ids, dev);
+	const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
 	struct device_node *child;
 	struct imx_ldb *imx_ldb;
 	int dual;
 	int ret;
 	int i;
 
-	imx_ldb = dev_get_drvdata(dev);
-	memset(imx_ldb, 0, sizeof(*imx_ldb));
+	imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
+	if (!imx_ldb)
+		return -ENOMEM;
 
 	imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
 	if (IS_ERR(imx_ldb->regmap)) {
@@ -669,25 +710,20 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 		}
 		channel->bus_format = bus_format;
 		channel->child = child;
-
-		ret = imx_ldb_register(drm, channel);
-		if (ret) {
-			channel->child = NULL;
-			goto free_child;
-		}
 	}
 
-	return 0;
+	platform_set_drvdata(pdev, imx_ldb);
+
+	return component_add(&pdev->dev, &imx_ldb_ops);
 
 free_child:
 	of_node_put(child);
 	return ret;
 }
 
-static void imx_ldb_unbind(struct device *dev, struct device *master,
-	void *data)
+static int imx_ldb_remove(struct platform_device *pdev)
 {
-	struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+	struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
 	int i;
 
 	for (i = 0; i < 2; i++) {
@@ -696,28 +732,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
 		kfree(channel->edid);
 		i2c_put_adapter(channel->ddc);
 	}
-}
-
-static const struct component_ops imx_ldb_ops = {
-	.bind	= imx_ldb_bind,
-	.unbind	= imx_ldb_unbind,
-};
-
-static int imx_ldb_probe(struct platform_device *pdev)
-{
-	struct imx_ldb *imx_ldb;
-
-	imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
-	if (!imx_ldb)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, imx_ldb);
-
-	return component_add(&pdev->dev, &imx_ldb_ops);
-}
 
-static int imx_ldb_remove(struct platform_device *pdev)
-{
 	component_del(&pdev->dev, &imx_ldb_ops);
 	return 0;
 }
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 2a8d2e32e7b42678850cf9758019653cc4957882..bc8c3f802a1529a21eda3dc9ad65513d64fec068 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -19,6 +19,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
@@ -99,9 +100,13 @@ enum {
 	TVE_MODE_VGA,
 };
 
-struct imx_tve {
+struct imx_tve_encoder {
 	struct drm_connector connector;
 	struct drm_encoder encoder;
+	struct imx_tve *tve;
+};
+
+struct imx_tve {
 	struct device *dev;
 	int mode;
 	int di_hsync_pin;
@@ -118,12 +123,12 @@ struct imx_tve {
 
 static inline struct imx_tve *con_to_tve(struct drm_connector *c)
 {
-	return container_of(c, struct imx_tve, connector);
+	return container_of(c, struct imx_tve_encoder, connector)->tve;
 }
 
 static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
 {
-	return container_of(e, struct imx_tve, encoder);
+	return container_of(e, struct imx_tve_encoder, encoder)->tve;
 }
 
 static void tve_enable(struct imx_tve *tve)
@@ -418,7 +423,7 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
 	init.parent_names = (const char **)&tve_di_parent;
 
 	tve->clk_hw_di.init = &init;
-	tve->di_clk = clk_register(tve->dev, &tve->clk_hw_di);
+	tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di);
 	if (IS_ERR(tve->di_clk)) {
 		dev_err(tve->dev, "failed to register TVE output clock: %ld\n",
 			PTR_ERR(tve->di_clk));
@@ -428,33 +433,6 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
 	return 0;
 }
 
-static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
-{
-	int encoder_type;
-	int ret;
-
-	encoder_type = tve->mode == TVE_MODE_VGA ?
-				DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
-
-	ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
-	if (ret)
-		return ret;
-
-	drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
-	drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
-
-	drm_connector_helper_add(&tve->connector,
-			&imx_tve_connector_helper_funcs);
-	drm_connector_init_with_ddc(drm, &tve->connector,
-				    &imx_tve_connector_funcs,
-				    DRM_MODE_CONNECTOR_VGA,
-				    tve->ddc);
-
-	drm_connector_attach_encoder(&tve->connector, &tve->encoder);
-
-	return 0;
-}
-
 static void imx_tve_disable_regulator(void *data)
 {
 	struct imx_tve *tve = data;
@@ -502,8 +480,49 @@ static int of_get_tve_mode(struct device_node *np)
 
 static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 {
-	struct platform_device *pdev = to_platform_device(dev);
 	struct drm_device *drm = data;
+	struct imx_tve *tve = dev_get_drvdata(dev);
+	struct imx_tve_encoder *tvee;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int encoder_type;
+	int ret;
+
+	encoder_type = tve->mode == TVE_MODE_VGA ?
+		       DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
+
+	tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder,
+					 encoder_type);
+	if (IS_ERR(tvee))
+		return PTR_ERR(tvee);
+
+	tvee->tve = tve;
+	encoder = &tvee->encoder;
+	connector = &tvee->connector;
+
+	ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node);
+	if (ret)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs);
+
+	drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs);
+	ret = drm_connector_init_with_ddc(drm, connector,
+					  &imx_tve_connector_funcs,
+					  DRM_MODE_CONNECTOR_VGA, tve->ddc);
+	if (ret)
+		return ret;
+
+	return drm_connector_attach_encoder(connector, encoder);
+}
+
+static const struct component_ops imx_tve_ops = {
+	.bind	= imx_tve_bind,
+};
+
+static int imx_tve_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct device_node *ddc_node;
 	struct imx_tve *tve;
@@ -513,8 +532,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 	int irq;
 	int ret;
 
-	tve = dev_get_drvdata(dev);
-	memset(tve, 0, sizeof(*tve));
+	tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
+	if (!tve)
+		return -ENOMEM;
 
 	tve->dev = dev;
 
@@ -621,28 +641,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 	if (ret)
 		return ret;
 
-	ret = imx_tve_register(drm, tve);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static const struct component_ops imx_tve_ops = {
-	.bind	= imx_tve_bind,
-};
-
-static int imx_tve_probe(struct platform_device *pdev)
-{
-	struct imx_tve *tve;
-
-	tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
-	if (!tve)
-		return -ENOMEM;
-
 	platform_set_drvdata(pdev, tve);
 
-	return component_add(&pdev->dev, &imx_tve_ops);
+	return component_add(dev, &imx_tve_ops);
 }
 
 static int imx_tve_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7ebd99ee3240728a1dac170da421db18348c06dc..e6431a227feb17f7cce698593afa7fd2f9ca908e 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -20,6 +20,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -163,7 +164,6 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
 
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
-	.destroy = drm_crtc_cleanup,
 	.page_flip = drm_atomic_helper_page_flip,
 	.reset = imx_drm_crtc_reset,
 	.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -322,73 +322,73 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
 	.atomic_enable = ipu_crtc_atomic_enable,
 };
 
-static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
+static void ipu_put_resources(struct drm_device *dev, void *ptr)
 {
+	struct ipu_crtc *ipu_crtc = ptr;
+
 	if (!IS_ERR_OR_NULL(ipu_crtc->dc))
 		ipu_dc_put(ipu_crtc->dc);
 	if (!IS_ERR_OR_NULL(ipu_crtc->di))
 		ipu_di_put(ipu_crtc->di);
 }
 
-static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
-		struct ipu_client_platformdata *pdata)
+static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc,
+			     struct ipu_client_platformdata *pdata)
 {
 	struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 	int ret;
 
 	ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
-	if (IS_ERR(ipu_crtc->dc)) {
-		ret = PTR_ERR(ipu_crtc->dc);
-		goto err_out;
-	}
+	if (IS_ERR(ipu_crtc->dc))
+		return PTR_ERR(ipu_crtc->dc);
+
+	ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc);
+	if (ret)
+		return ret;
 
 	ipu_crtc->di = ipu_di_get(ipu, pdata->di);
-	if (IS_ERR(ipu_crtc->di)) {
-		ret = PTR_ERR(ipu_crtc->di);
-		goto err_out;
-	}
+	if (IS_ERR(ipu_crtc->di))
+		return PTR_ERR(ipu_crtc->di);
 
 	return 0;
-err_out:
-	ipu_put_resources(ipu_crtc);
-
-	return ret;
 }
 
-static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
-	struct ipu_client_platformdata *pdata, struct drm_device *drm)
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
 {
-	struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
-	struct drm_crtc *crtc = &ipu_crtc->base;
+	struct ipu_client_platformdata *pdata = dev->platform_data;
+	struct ipu_soc *ipu = dev_get_drvdata(dev->parent);
+	struct drm_device *drm = data;
+	struct ipu_plane *primary_plane;
+	struct ipu_crtc *ipu_crtc;
+	struct drm_crtc *crtc;
 	int dp = -EINVAL;
 	int ret;
 
-	ret = ipu_get_resources(ipu_crtc, pdata);
-	if (ret) {
-		dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
-				ret);
-		return ret;
-	}
-
 	if (pdata->dp >= 0)
 		dp = IPU_DP_FLOW_SYNC_BG;
-	ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
-					    DRM_PLANE_TYPE_PRIMARY);
-	if (IS_ERR(ipu_crtc->plane[0])) {
-		ret = PTR_ERR(ipu_crtc->plane[0]);
-		goto err_put_resources;
-	}
+	primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+				       DRM_PLANE_TYPE_PRIMARY);
+	if (IS_ERR(primary_plane))
+		return PTR_ERR(primary_plane);
+
+	ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base,
+					       &primary_plane->base, NULL,
+					       &ipu_crtc_funcs, NULL);
+	if (IS_ERR(ipu_crtc))
+		return PTR_ERR(ipu_crtc);
 
+	ipu_crtc->dev = dev;
+	ipu_crtc->plane[0] = primary_plane;
+
+	crtc = &ipu_crtc->base;
 	crtc->port = pdata->of_node;
 	drm_crtc_helper_add(crtc, &ipu_helper_funcs);
-	drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL,
-				  &ipu_crtc_funcs, NULL);
 
-	ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
+	ret = ipu_get_resources(drm, ipu_crtc, pdata);
 	if (ret) {
-		dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
+		dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
 			ret);
-		goto err_put_resources;
+		return ret;
 	}
 
 	/* If this crtc is using the DP, add an overlay plane */
@@ -397,16 +397,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 						IPU_DP_FLOW_SYNC_FG,
 						drm_crtc_mask(&ipu_crtc->base),
 						DRM_PLANE_TYPE_OVERLAY);
-		if (IS_ERR(ipu_crtc->plane[1])) {
+		if (IS_ERR(ipu_crtc->plane[1]))
 			ipu_crtc->plane[1] = NULL;
-		} else {
-			ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
-			if (ret) {
-				dev_err(ipu_crtc->dev, "getting plane 1 "
-					"resources failed with %d.\n", ret);
-				goto err_put_plane0_res;
-			}
-		}
 	}
 
 	ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -414,58 +406,21 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 			"imx_drm", ipu_crtc);
 	if (ret < 0) {
 		dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
-		goto err_put_plane1_res;
+		return ret;
 	}
 	/* Only enable IRQ when we actually need it to trigger work. */
 	disable_irq(ipu_crtc->irq);
 
 	return 0;
-
-err_put_plane1_res:
-	if (ipu_crtc->plane[1])
-		ipu_plane_put_resources(ipu_crtc->plane[1]);
-err_put_plane0_res:
-	ipu_plane_put_resources(ipu_crtc->plane[0]);
-err_put_resources:
-	ipu_put_resources(ipu_crtc);
-
-	return ret;
-}
-
-static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
-{
-	struct ipu_client_platformdata *pdata = dev->platform_data;
-	struct drm_device *drm = data;
-	struct ipu_crtc *ipu_crtc;
-
-	ipu_crtc = dev_get_drvdata(dev);
-	memset(ipu_crtc, 0, sizeof(*ipu_crtc));
-
-	ipu_crtc->dev = dev;
-
-	return ipu_crtc_init(ipu_crtc, pdata, drm);
-}
-
-static void ipu_drm_unbind(struct device *dev, struct device *master,
-	void *data)
-{
-	struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
-
-	ipu_put_resources(ipu_crtc);
-	if (ipu_crtc->plane[1])
-		ipu_plane_put_resources(ipu_crtc->plane[1]);
-	ipu_plane_put_resources(ipu_crtc->plane[0]);
 }
 
 static const struct component_ops ipu_crtc_ops = {
 	.bind = ipu_drm_bind,
-	.unbind = ipu_drm_unbind,
 };
 
 static int ipu_drm_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct ipu_crtc *ipu_crtc;
 	int ret;
 
 	if (!dev->platform_data)
@@ -475,12 +430,6 @@ static int ipu_drm_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
-	if (!ipu_crtc)
-		return -ENOMEM;
-
-	dev_set_drvdata(dev, ipu_crtc);
-
 	return component_add(dev, &ipu_crtc_ops);
 }
 
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 8a4235d9d9f1e7bf622f286242c6d19798dfc211..075508051b5fb71214477ccef2a86a84c19f58f5 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -11,6 +11,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_plane_helper.h>
 
 #include <video/imx-ipu-v3.h>
@@ -142,8 +143,10 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
 	       fb->format->cpp[2] * x - eba;
 }
 
-void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
+static void ipu_plane_put_resources(struct drm_device *dev, void *ptr)
 {
+	struct ipu_plane *ipu_plane = ptr;
+
 	if (!IS_ERR_OR_NULL(ipu_plane->dp))
 		ipu_dp_put(ipu_plane->dp);
 	if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
@@ -154,7 +157,8 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
 		ipu_idmac_put(ipu_plane->alpha_ch);
 }
 
-int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
+static int ipu_plane_get_resources(struct drm_device *dev,
+				   struct ipu_plane *ipu_plane)
 {
 	int ret;
 	int alpha_ch;
@@ -166,6 +170,10 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
 		return ret;
 	}
 
+	ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane);
+	if (ret)
+		return ret;
+
 	alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
 	if (alpha_ch >= 0) {
 		ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
@@ -181,7 +189,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
 	if (IS_ERR(ipu_plane->dmfc)) {
 		ret = PTR_ERR(ipu_plane->dmfc);
 		DRM_ERROR("failed to get dmfc: ret %d\n", ret);
-		goto err_out;
+		return ret;
 	}
 
 	if (ipu_plane->dp_flow >= 0) {
@@ -189,15 +197,11 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
 		if (IS_ERR(ipu_plane->dp)) {
 			ret = PTR_ERR(ipu_plane->dp);
 			DRM_ERROR("failed to get dp flow: %d\n", ret);
-			goto err_out;
+			return ret;
 		}
 	}
 
 	return 0;
-err_out:
-	ipu_plane_put_resources(ipu_plane);
-
-	return ret;
 }
 
 static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane)
@@ -262,16 +266,6 @@ void ipu_plane_disable_deferred(struct drm_plane *plane)
 }
 EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred);
 
-static void ipu_plane_destroy(struct drm_plane *plane)
-{
-	struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
-	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-	drm_plane_cleanup(plane);
-	kfree(ipu_plane);
-}
-
 static void ipu_plane_state_reset(struct drm_plane *plane)
 {
 	unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
@@ -336,7 +330,6 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
 static const struct drm_plane_funcs ipu_plane_funcs = {
 	.update_plane	= drm_atomic_helper_update_plane,
 	.disable_plane	= drm_atomic_helper_disable_plane,
-	.destroy	= ipu_plane_destroy,
 	.reset		= ipu_plane_state_reset,
 	.atomic_duplicate_state	= ipu_plane_duplicate_state,
 	.atomic_destroy_state	= ipu_plane_destroy_state,
@@ -834,10 +827,15 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 	DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
 		      dma, dp, possible_crtcs);
 
-	ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL);
-	if (!ipu_plane) {
-		DRM_ERROR("failed to allocate plane\n");
-		return ERR_PTR(-ENOMEM);
+	ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
+					       possible_crtcs, &ipu_plane_funcs,
+					       ipu_plane_formats,
+					       ARRAY_SIZE(ipu_plane_formats),
+					       modifiers, type, NULL);
+	if (IS_ERR(ipu_plane)) {
+		DRM_ERROR("failed to allocate and initialize %s plane\n",
+			  zpos ? "overlay" : "primary");
+		return ipu_plane;
 	}
 
 	ipu_plane->ipu = ipu;
@@ -847,22 +845,23 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 	if (ipu_prg_present(ipu))
 		modifiers = pre_format_modifiers;
 
-	ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
-				       &ipu_plane_funcs, ipu_plane_formats,
-				       ARRAY_SIZE(ipu_plane_formats),
-				       modifiers, type, NULL);
-	if (ret) {
-		DRM_ERROR("failed to initialize plane\n");
-		kfree(ipu_plane);
-		return ERR_PTR(ret);
-	}
-
 	drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
 
 	if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
-		drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+		ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
+						     1);
 	else
-		drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+		ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base,
+							       0);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = ipu_plane_get_resources(dev, ipu_plane);
+	if (ret) {
+		DRM_ERROR("failed to get %s plane resources: %pe\n",
+			  zpos ? "overlay" : "primary", &ret);
+		return ERR_PTR(ret);
+	}
 
 	return ipu_plane;
 }
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index ffacbcdd2f98e59ba47ab3a4c3b064e7065440f6..6d544e6ce63fea02c25fb928d5e566059453c5a6 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -41,9 +41,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
 		       uint32_t src_x, uint32_t src_y, uint32_t src_w,
 		       uint32_t src_h, bool interlaced);
 
-int ipu_plane_get_resources(struct ipu_plane *plane);
-void ipu_plane_put_resources(struct ipu_plane *plane);
-
 int ipu_plane_irq(struct ipu_plane *plane);
 
 void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2eb8df4697dfac6c0bd1e6a2098a62a296e0498c..e0412e694fd9bfa1f133af44e7e4fd8c78fbfe53 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -15,6 +15,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
@@ -22,10 +23,14 @@
 
 #include "imx-drm.h"
 
-struct imx_parallel_display {
+struct imx_parallel_display_encoder {
 	struct drm_connector connector;
 	struct drm_encoder encoder;
 	struct drm_bridge bridge;
+	struct imx_parallel_display *pd;
+};
+
+struct imx_parallel_display {
 	struct device *dev;
 	void *edid;
 	u32 bus_format;
@@ -37,12 +42,12 @@ struct imx_parallel_display {
 
 static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
 {
-	return container_of(c, struct imx_parallel_display, connector);
+	return container_of(c, struct imx_parallel_display_encoder, connector)->pd;
 }
 
 static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
 {
-	return container_of(b, struct imx_parallel_display, bridge);
+	return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
 }
 
 static int imx_pd_connector_get_modes(struct drm_connector *connector)
@@ -74,7 +79,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 			return ret;
 
 		drm_mode_copy(mode, &imxpd->mode);
-		mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+		mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 		drm_mode_probed_add(connector, mode);
 		num_modes++;
 	}
@@ -253,12 +258,26 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
 	.atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
 };
 
-static int imx_pd_register(struct drm_device *drm,
-	struct imx_parallel_display *imxpd)
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 {
-	struct drm_encoder *encoder = &imxpd->encoder;
+	struct drm_device *drm = data;
+	struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
+	struct imx_parallel_display_encoder *imxpd_encoder;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_bridge *bridge;
 	int ret;
 
+	imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder,
+						  encoder, DRM_MODE_ENCODER_NONE);
+	if (IS_ERR(imxpd_encoder))
+		return PTR_ERR(imxpd_encoder);
+
+	imxpd_encoder->pd = imxpd;
+	connector = &imxpd_encoder->connector;
+	encoder = &imxpd_encoder->encoder;
+	bridge = &imxpd_encoder->bridge;
+
 	ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
 	if (ret)
 		return ret;
@@ -268,39 +287,37 @@ static int imx_pd_register(struct drm_device *drm,
 	 * immediately since the current state is ON
 	 * at this point.
 	 */
-	imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
-
-	drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
-
-	imxpd->bridge.funcs = &imx_pd_bridge_funcs;
-	drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+	connector->dpms = DRM_MODE_DPMS_OFF;
 
-	if (!imxpd->next_bridge) {
-		drm_connector_helper_add(&imxpd->connector,
-				&imx_pd_connector_helper_funcs);
-		drm_connector_init(drm, &imxpd->connector,
-				   &imx_pd_connector_funcs,
-				   DRM_MODE_CONNECTOR_DPI);
-	}
+	bridge->funcs = &imx_pd_bridge_funcs;
+	drm_bridge_attach(encoder, bridge, NULL, 0);
 
 	if (imxpd->next_bridge) {
-		ret = drm_bridge_attach(encoder, imxpd->next_bridge,
-					&imxpd->bridge, 0);
+		ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0);
 		if (ret < 0) {
 			dev_err(imxpd->dev, "failed to attach bridge: %d\n",
 				ret);
 			return ret;
 		}
 	} else {
-		drm_connector_attach_encoder(&imxpd->connector, encoder);
+		drm_connector_helper_add(connector,
+					 &imx_pd_connector_helper_funcs);
+		drm_connector_init(drm, connector, &imx_pd_connector_funcs,
+				   DRM_MODE_CONNECTOR_DPI);
+
+		drm_connector_attach_encoder(connector, encoder);
 	}
 
 	return 0;
 }
 
-static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+static const struct component_ops imx_pd_ops = {
+	.bind	= imx_pd_bind,
+};
+
+static int imx_pd_probe(struct platform_device *pdev)
 {
-	struct drm_device *drm = data;
+	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	const u8 *edidp;
 	struct imx_parallel_display *imxpd;
@@ -309,8 +326,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 	u32 bus_format = 0;
 	const char *fmt;
 
-	imxpd = dev_get_drvdata(dev);
-	memset(imxpd, 0, sizeof(*imxpd));
+	imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
+	if (!imxpd)
+		return -ENOMEM;
 
 	/* port@1 is the output port */
 	ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
@@ -337,28 +355,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 
 	imxpd->dev = dev;
 
-	ret = imx_pd_register(drm, imxpd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static const struct component_ops imx_pd_ops = {
-	.bind	= imx_pd_bind,
-};
-
-static int imx_pd_probe(struct platform_device *pdev)
-{
-	struct imx_parallel_display *imxpd;
-
-	imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
-	if (!imxpd)
-		return -ENOMEM;
-
 	platform_set_drvdata(pdev, imxpd);
 
-	return component_add(&pdev->dev, &imx_pd_ops);
+	return component_add(dev, &imx_pd_ops);
 }
 
 static int imx_pd_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 477d5387e43eaf3c8387444a968c616d6fd34fee..3b57f8be007c4aa7d64cd03f396b77a3af5c3c72 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -4,6 +4,7 @@ config DRM_INGENIC
 	depends on DRM
 	depends on CMA
 	depends on OF
+	depends on COMMON_CLK
 	select DRM_BRIDGE
 	select DRM_PANEL_BRIDGE
 	select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 368bfef8b34035a974ef1c5f3fd103e89b596d05..7bb31fbee29dac4818d39ed86f6714fdbf1158bb 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -14,6 +14,7 @@
 #include <linux/of_device.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/regmap.h>
 
 #include <drm/drm_atomic.h>
@@ -190,15 +191,15 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
 {
 	unsigned int vpe, vds, vde, vt, hpe, hds, hde, ht;
 
-	vpe = mode->vsync_end - mode->vsync_start;
-	vds = mode->vtotal - mode->vsync_start;
-	vde = vds + mode->vdisplay;
-	vt = vde + mode->vsync_start - mode->vdisplay;
+	vpe = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	vds = mode->crtc_vtotal - mode->crtc_vsync_start;
+	vde = vds + mode->crtc_vdisplay;
+	vt = vde + mode->crtc_vsync_start - mode->crtc_vdisplay;
 
-	hpe = mode->hsync_end - mode->hsync_start;
-	hds = mode->htotal - mode->hsync_start;
-	hde = hds + mode->hdisplay;
-	ht = hde + mode->hsync_start - mode->hdisplay;
+	hpe = mode->crtc_hsync_end - mode->crtc_hsync_start;
+	hds = mode->crtc_htotal - mode->crtc_hsync_start;
+	hde = hds + mode->crtc_hdisplay;
+	ht = hde + mode->crtc_hsync_start - mode->crtc_hdisplay;
 
 	regmap_write(priv->map, JZ_REG_LCD_VSYNC,
 		     0 << JZ_LCD_VSYNC_VPS_OFFSET |
@@ -333,7 +334,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 	struct drm_pending_vblank_event *event = crtc_state->event;
 
 	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
-		ingenic_drm_crtc_update_timings(priv, &crtc_state->mode);
+		ingenic_drm_crtc_update_timings(priv, &crtc_state->adjusted_mode);
 		priv->update_clk_rate = true;
 	}
 
@@ -589,7 +590,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
 	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
 	struct drm_connector *conn = conn_state->connector;
 	struct drm_display_info *info = &conn->display_info;
-	unsigned int cfg;
+	unsigned int cfg, rgbcfg = 0;
 
 	priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
 
@@ -626,6 +627,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
 			case MEDIA_BUS_FMT_RGB888_1X24:
 				cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
 				break;
+			case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+				rgbcfg = JZ_LCD_RGBC_EVEN_GBR | JZ_LCD_RGBC_ODD_RGB;
+				fallthrough;
 			case MEDIA_BUS_FMT_RGB888_3X8:
 				cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
 				break;
@@ -636,6 +640,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
 	}
 
 	regmap_write(priv->map, JZ_REG_LCD_CFG, cfg);
+	regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
 }
 
 static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
@@ -643,6 +648,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
 					    struct drm_connector_state *conn_state)
 {
 	struct drm_display_info *info = &conn_state->connector->display_info;
+	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
 
 	if (info->num_bus_formats != 1)
 		return -EINVAL;
@@ -651,10 +657,23 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
 		return 0;
 
 	switch (*info->bus_formats) {
+	case MEDIA_BUS_FMT_RGB888_3X8:
+	case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+		/*
+		 * The LCD controller expects timing values in dot-clock ticks,
+		 * which is 3x the timing values in pixels when using a 3x8-bit
+		 * display; but it will count the display area size in pixels
+		 * either way. Go figure.
+		 */
+		mode->crtc_clock = mode->clock * 3;
+		mode->crtc_hsync_start = mode->hsync_start * 3 - mode->hdisplay * 2;
+		mode->crtc_hsync_end = mode->hsync_end * 3 - mode->hdisplay * 2;
+		mode->crtc_hdisplay = mode->hdisplay;
+		mode->crtc_htotal = mode->htotal * 3 - mode->hdisplay * 2;
+		return 0;
 	case MEDIA_BUS_FMT_RGB565_1X16:
 	case MEDIA_BUS_FMT_RGB666_1X18:
 	case MEDIA_BUS_FMT_RGB888_1X24:
-	case MEDIA_BUS_FMT_RGB888_3X8:
 		return 0;
 	default:
 		return -EINVAL;
@@ -755,8 +774,6 @@ static const struct drm_crtc_funcs ingenic_drm_crtc_funcs = {
 
 	.enable_vblank		= ingenic_drm_enable_vblank,
 	.disable_vblank		= ingenic_drm_disable_vblank,
-
-	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
 };
 
 static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = {
@@ -1167,6 +1184,22 @@ static int ingenic_drm_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static int __maybe_unused ingenic_drm_suspend(struct device *dev)
+{
+	struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+	return drm_mode_config_helper_suspend(&priv->drm);
+}
+
+static int __maybe_unused ingenic_drm_resume(struct device *dev)
+{
+	struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+	return drm_mode_config_helper_resume(&priv->drm);
+}
+
+static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume);
+
 static const u32 jz4740_formats[] = {
 	DRM_FORMAT_XRGB1555,
 	DRM_FORMAT_RGB565,
@@ -1246,6 +1279,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
 static struct platform_driver ingenic_drm_driver = {
 	.driver = {
 		.name = "ingenic-drm",
+		.pm = pm_ptr(&ingenic_drm_pm_ops),
 		.of_match_table = of_match_ptr(ingenic_drm_of_match),
 	},
 	.probe = ingenic_drm_probe,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 9b48ce02803dd76e395831a80b914b066c88be30..1b4347f7f084cb0549de4d16d1d0bb8d64bace1a 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -31,6 +31,7 @@
 #define JZ_REG_LCD_SA1				0x54
 #define JZ_REG_LCD_FID1				0x58
 #define JZ_REG_LCD_CMD1				0x5C
+#define JZ_REG_LCD_RGBC				0x90
 #define JZ_REG_LCD_OSDC				0x100
 #define JZ_REG_LCD_OSDCTRL			0x104
 #define JZ_REG_LCD_OSDS				0x108
@@ -138,6 +139,19 @@
 #define JZ_LCD_STATE_SOF_IRQ			BIT(4)
 #define JZ_LCD_STATE_DISABLED			BIT(0)
 
+#define JZ_LCD_RGBC_ODD_RGB			(0x0 << 4)
+#define JZ_LCD_RGBC_ODD_RBG			(0x1 << 4)
+#define JZ_LCD_RGBC_ODD_GRB			(0x2 << 4)
+#define JZ_LCD_RGBC_ODD_GBR			(0x3 << 4)
+#define JZ_LCD_RGBC_ODD_BRG			(0x4 << 4)
+#define JZ_LCD_RGBC_ODD_BGR			(0x5 << 4)
+#define JZ_LCD_RGBC_EVEN_RGB			(0x0 << 0)
+#define JZ_LCD_RGBC_EVEN_RBG			(0x1 << 0)
+#define JZ_LCD_RGBC_EVEN_GRB			(0x2 << 0)
+#define JZ_LCD_RGBC_EVEN_GBR			(0x3 << 0)
+#define JZ_LCD_RGBC_EVEN_BRG			(0x4 << 0)
+#define JZ_LCD_RGBC_EVEN_BGR			(0x5 << 0)
+
 #define JZ_LCD_OSDC_OSDEN			BIT(0)
 #define JZ_LCD_OSDC_F0EN			BIT(3)
 #define JZ_LCD_OSDC_F1EN			BIT(4)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index a31a840ce63491078a4ebf888e327ff32810ed4a..f64e06e1067dd8d373d270b3c2ee9697cd7aa40d 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -400,7 +400,7 @@ static void kmb_irq_reset(struct drm_device *drm)
 
 DEFINE_DRM_GEM_CMA_FOPS(fops);
 
-static struct drm_driver kmb_driver = {
+static const struct drm_driver kmb_driver = {
 	.driver_features = DRIVER_GEM |
 	    DRIVER_MODESET | DRIVER_ATOMIC,
 	.irq_handler = kmb_isr,
@@ -556,7 +556,7 @@ MODULE_DEVICE_TABLE(of, kmb_of_match);
 static int __maybe_unused kmb_pm_suspend(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
-	struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
+	struct kmb_drm_private *kmb = to_kmb(drm);
 
 	drm_kms_helper_poll_disable(drm);
 
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 8448d1edb553eadbab388c6063c5745207bbd122..be8eea3830c1bf4cd6697ae1168d05ea9956189d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -114,6 +114,9 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
 
 	kmb = to_kmb(plane->dev);
 
+	if (WARN_ON(plane_id >= KMB_MAX_PLANES))
+		return;
+
 	switch (plane_id) {
 	case LAYER_0:
 		kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 63b4c5643f9cdb075f2f928670d756f936819f78..5cc20b403a252b94a295ab19c8dfdb60eb58ea21 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -201,7 +201,7 @@ static int lima_pm_busy(struct lima_device *ldev)
 	int ret;
 
 	/* resume GPU if it has been suspended by runtime PM */
-	ret = pm_runtime_get_sync(ldev->dev);
+	ret = pm_runtime_resume_and_get(ldev->dev);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index a892edec5563e89dcb8d83c4c74a5bf0fe925b33..dc54a7a69005409f127f9b32d6f2f56a081f2086 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,10 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
-mediatek-drm-y := mtk_disp_color.o \
+mediatek-drm-y := mtk_disp_ccorr.o \
+		  mtk_disp_color.o \
+		  mtk_disp_gamma.o \
 		  mtk_disp_ovl.o \
 		  mtk_disp_rdma.o \
 		  mtk_drm_crtc.o \
-		  mtk_drm_ddp.o \
 		  mtk_drm_ddp_comp.o \
 		  mtk_drm_drv.o \
 		  mtk_drm_gem.o \
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
new file mode 100644
index 0000000000000000000000000000000000000000..141cb36b9c07b7407cc3e907f08ed2e96a66d2ce
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_CCORR_EN				0x0000
+#define CCORR_EN					BIT(0)
+#define DISP_CCORR_CFG				0x0020
+#define CCORR_RELAY_MODE				BIT(0)
+#define CCORR_ENGINE_EN					BIT(1)
+#define CCORR_GAMMA_OFF					BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP				BIT(3)
+#define DISP_CCORR_SIZE				0x0030
+#define DISP_CCORR_COEF_0			0x0080
+#define DISP_CCORR_COEF_1			0x0084
+#define DISP_CCORR_COEF_2			0x0088
+#define DISP_CCORR_COEF_3			0x008C
+#define DISP_CCORR_COEF_4			0x0090
+
+struct mtk_disp_ccorr_data {
+	u32 matrix_bits;
+};
+
+/**
+ * struct mtk_disp_ccorr - DISP_CCORR driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_ccorr {
+	struct clk *clk;
+	void __iomem *regs;
+	struct cmdq_client_reg cmdq_reg;
+	const struct mtk_disp_ccorr_data	*data;
+};
+
+int mtk_ccorr_clk_enable(struct device *dev)
+{
+	struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+	return clk_prepare_enable(ccorr->clk);
+}
+
+void mtk_ccorr_clk_disable(struct device *dev)
+{
+	struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(ccorr->clk);
+}
+
+void mtk_ccorr_config(struct device *dev, unsigned int w,
+			     unsigned int h, unsigned int vrefresh,
+			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+	struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+	mtk_ddp_write(cmdq_pkt, w << 16 | h, &ccorr->cmdq_reg, ccorr->regs,
+		      DISP_CCORR_SIZE);
+	mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, &ccorr->cmdq_reg, ccorr->regs,
+		      DISP_CCORR_CFG);
+}
+
+void mtk_ccorr_start(struct device *dev)
+{
+	struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+	writel(CCORR_EN, ccorr->regs + DISP_CCORR_EN);
+}
+
+void mtk_ccorr_stop(struct device *dev)
+{
+	struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+	writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN);
+}
+
+/* Converts a DRM S31.32 value to the HW S1.n format. */
+static u16 mtk_ctm_s31_32_to_s1_n(u64 in, u32 n)
+{
+	u16 r;
+
+	/* Sign bit. */
+	r = in & BIT_ULL(63) ? BIT(n + 1) : 0;
+
+	if ((in & GENMASK_ULL(62, 33)) > 0) {
+		/* identity value 0x100000000 -> 0x400(mt8183), */
+		/* identity value 0x100000000 -> 0x800(mt8192), */
+		/* if bigger this, set it to max 0x7ff. */
+		r |= GENMASK(n, 0);
+	} else {
+		/* take the n+1 most important bits. */
+		r |= (in >> (32 - n)) & GENMASK(n, 0);
+	}
+
+	return r;
+}
+
+void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state)
+{
+	struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+	struct drm_property_blob *blob = state->ctm;
+	struct drm_color_ctm *ctm;
+	const u64 *input;
+	uint16_t coeffs[9] = { 0 };
+	int i;
+	struct cmdq_pkt *cmdq_pkt = NULL;
+	u32 matrix_bits = ccorr->data->matrix_bits;
+
+	if (!blob)
+		return;
+
+	ctm = (struct drm_color_ctm *)blob->data;
+	input = ctm->matrix;
+
+	for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+		coeffs[i] = mtk_ctm_s31_32_to_s1_n(input[i], matrix_bits);
+
+	mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+		      &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0);
+	mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+		      &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_1);
+	mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+		      &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_2);
+	mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+		      &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_3);
+	mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+		      &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_4);
+}
+
+static int mtk_disp_ccorr_bind(struct device *dev, struct device *master,
+			       void *data)
+{
+	return 0;
+}
+
+static void mtk_disp_ccorr_unbind(struct device *dev, struct device *master,
+				  void *data)
+{
+}
+
+static const struct component_ops mtk_disp_ccorr_component_ops = {
+	.bind	= mtk_disp_ccorr_bind,
+	.unbind	= mtk_disp_ccorr_unbind,
+};
+
+static int mtk_disp_ccorr_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_disp_ccorr *priv;
+	struct resource *res;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "failed to get ccorr clk\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(dev, "failed to ioremap ccorr\n");
+		return PTR_ERR(priv->regs);
+	}
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+	if (ret)
+		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+	priv->data = of_device_get_match_data(dev);
+	platform_set_drvdata(pdev, priv);
+
+	ret = component_add(dev, &mtk_disp_ccorr_component_ops);
+	if (ret)
+		dev_err(dev, "Failed to add component: %d\n", ret);
+
+	return ret;
+}
+
+static int mtk_disp_ccorr_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &mtk_disp_ccorr_component_ops);
+
+	return 0;
+}
+
+static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
+	.matrix_bits = 10,
+};
+
+static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = {
+	{ .compatible = "mediatek,mt8183-disp-ccorr",
+	  .data = &mt8183_ccorr_driver_data},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
+
+struct platform_driver mtk_disp_ccorr_driver = {
+	.probe		= mtk_disp_ccorr_probe,
+	.remove		= mtk_disp_ccorr_remove,
+	.driver		= {
+		.name	= "mediatek-disp-ccorr",
+		.owner	= THIS_MODULE,
+		.of_match_table = mtk_disp_ccorr_driver_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6048cbc9f0ec63da9c96d49230bc75d09444912d..63f411ab393b70358d41f9f9f3389b1726d372df 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
+#include "mtk_disp_drv.h"
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
 
@@ -36,64 +37,55 @@ struct mtk_disp_color_data {
  * @data: platform colour driver data
  */
 struct mtk_disp_color {
-	struct mtk_ddp_comp			ddp_comp;
 	struct drm_crtc				*crtc;
+	struct clk				*clk;
+	void __iomem				*regs;
+	struct cmdq_client_reg			cmdq_reg;
 	const struct mtk_disp_color_data	*data;
 };
 
-static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+int mtk_color_clk_enable(struct device *dev)
 {
-	return container_of(comp, struct mtk_disp_color, ddp_comp);
+	struct mtk_disp_color *color = dev_get_drvdata(dev);
+
+	return clk_prepare_enable(color->clk);
 }
 
-static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
-			     unsigned int h, unsigned int vrefresh,
-			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_color_clk_disable(struct device *dev)
 {
-	struct mtk_disp_color *color = comp_to_color(comp);
+	struct mtk_disp_color *color = dev_get_drvdata(dev);
 
-	mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
-	mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
+	clk_disable_unprepare(color->clk);
 }
 
-static void mtk_color_start(struct mtk_ddp_comp *comp)
+void mtk_color_config(struct device *dev, unsigned int w,
+		      unsigned int h, unsigned int vrefresh,
+		      unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	struct mtk_disp_color *color = comp_to_color(comp);
+	struct mtk_disp_color *color = dev_get_drvdata(dev);
 
-	writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
-	       comp->regs + DISP_COLOR_CFG_MAIN);
-	writel(0x1, comp->regs + DISP_COLOR_START(color));
+	mtk_ddp_write(cmdq_pkt, w, &color->cmdq_reg, color->regs, DISP_COLOR_WIDTH(color));
+	mtk_ddp_write(cmdq_pkt, h, &color->cmdq_reg, color->regs, DISP_COLOR_HEIGHT(color));
 }
 
-static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = {
-	.config = mtk_color_config,
-	.start = mtk_color_start,
-};
+void mtk_color_start(struct device *dev)
+{
+	struct mtk_disp_color *color = dev_get_drvdata(dev);
+
+	writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+	       color->regs + DISP_COLOR_CFG_MAIN);
+	writel(0x1, color->regs + DISP_COLOR_START(color));
+}
 
 static int mtk_disp_color_bind(struct device *dev, struct device *master,
 			       void *data)
 {
-	struct mtk_disp_color *priv = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-	int ret;
-
-	ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
-	if (ret < 0) {
-		dev_err(dev, "Failed to register component %pOF: %d\n",
-			dev->of_node, ret);
-		return ret;
-	}
-
 	return 0;
 }
 
 static void mtk_disp_color_unbind(struct device *dev, struct device *master,
 				  void *data)
 {
-	struct mtk_disp_color *priv = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-
-	mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
 }
 
 static const struct component_ops mtk_disp_color_component_ops = {
@@ -105,31 +97,32 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct mtk_disp_color *priv;
-	int comp_id;
+	struct resource *res;
 	int ret;
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR);
-	if (comp_id < 0) {
-		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
-		return comp_id;
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "failed to get color clk\n");
+		return PTR_ERR(priv->clk);
 	}
 
-	ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
-				&mtk_disp_color_funcs);
-	if (ret) {
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "Failed to initialize component: %d\n",
-				ret);
-
-		return ret;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(dev, "failed to ioremap color\n");
+		return PTR_ERR(priv->regs);
 	}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+	if (ret)
+		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
 
 	priv->data = of_device_get_match_data(dev);
-
 	platform_set_drvdata(pdev, priv);
 
 	ret = component_add(dev, &mtk_disp_color_component_ops);
@@ -141,8 +134,6 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
 
 static int mtk_disp_color_remove(struct platform_device *pdev)
 {
-	component_del(&pdev->dev, &mtk_disp_color_component_ops);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
new file mode 100644
index 0000000000000000000000000000000000000000..cafd9df2d63bb06f8289e7cb4b2d816a7a0891fe
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef _MTK_DISP_DRV_H_
+#define _MTK_DISP_DRV_H_
+
+#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_drm_plane.h"
+
+void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state);
+int mtk_ccorr_clk_enable(struct device *dev);
+void mtk_ccorr_clk_disable(struct device *dev);
+void mtk_ccorr_config(struct device *dev, unsigned int w,
+		      unsigned int h, unsigned int vrefresh,
+		      unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_ccorr_start(struct device *dev);
+void mtk_ccorr_stop(struct device *dev);
+
+void mtk_color_bypass_shadow(struct device *dev);
+int mtk_color_clk_enable(struct device *dev);
+void mtk_color_clk_disable(struct device *dev);
+void mtk_color_config(struct device *dev, unsigned int w,
+		      unsigned int h, unsigned int vrefresh,
+		      unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_color_start(struct device *dev);
+
+void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
+			   unsigned int bpc, unsigned int cfg,
+			   unsigned int dither_en, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_dpi_start(struct device *dev);
+void mtk_dpi_stop(struct device *dev);
+
+void mtk_dsi_ddp_start(struct device *dev);
+void mtk_dsi_ddp_stop(struct device *dev);
+
+int mtk_gamma_clk_enable(struct device *dev);
+void mtk_gamma_clk_disable(struct device *dev);
+void mtk_gamma_config(struct device *dev, unsigned int w,
+		      unsigned int h, unsigned int vrefresh,
+		      unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state);
+void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state);
+void mtk_gamma_start(struct device *dev);
+void mtk_gamma_stop(struct device *dev);
+
+void mtk_ovl_bgclr_in_on(struct device *dev);
+void mtk_ovl_bgclr_in_off(struct device *dev);
+void mtk_ovl_bypass_shadow(struct device *dev);
+int mtk_ovl_clk_enable(struct device *dev);
+void mtk_ovl_clk_disable(struct device *dev);
+void mtk_ovl_config(struct device *dev, unsigned int w,
+		    unsigned int h, unsigned int vrefresh,
+		    unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
+			struct mtk_plane_state *mtk_state);
+void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+			  struct mtk_plane_state *state,
+			  struct cmdq_pkt *cmdq_pkt);
+unsigned int mtk_ovl_layer_nr(struct device *dev);
+void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
+		      struct cmdq_pkt *cmdq_pkt);
+void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
+		       struct cmdq_pkt *cmdq_pkt);
+void mtk_ovl_start(struct device *dev);
+void mtk_ovl_stop(struct device *dev);
+unsigned int mtk_ovl_supported_rotations(struct device *dev);
+void mtk_ovl_enable_vblank(struct device *dev,
+			   void (*vblank_cb)(void *),
+			   void *vblank_cb_data);
+void mtk_ovl_disable_vblank(struct device *dev);
+
+void mtk_rdma_bypass_shadow(struct device *dev);
+int mtk_rdma_clk_enable(struct device *dev);
+void mtk_rdma_clk_disable(struct device *dev);
+void mtk_rdma_config(struct device *dev, unsigned int width,
+		     unsigned int height, unsigned int vrefresh,
+		     unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+unsigned int mtk_rdma_layer_nr(struct device *dev);
+void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
+			   struct mtk_plane_state *state,
+			   struct cmdq_pkt *cmdq_pkt);
+void mtk_rdma_start(struct device *dev);
+void mtk_rdma_stop(struct device *dev);
+void mtk_rdma_enable_vblank(struct device *dev,
+			    void (*vblank_cb)(void *),
+			    void *vblank_cb_data);
+void mtk_rdma_disable_vblank(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
new file mode 100644
index 0000000000000000000000000000000000000000..3ebf91e0ab4127db617bb28ea8bb8c394fb58c3c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_GAMMA_EN				0x0000
+#define GAMMA_EN					BIT(0)
+#define DISP_GAMMA_CFG				0x0020
+#define GAMMA_LUT_EN					BIT(1)
+#define GAMMA_DITHERING					BIT(2)
+#define DISP_GAMMA_SIZE				0x0030
+#define DISP_GAMMA_LUT				0x0700
+
+#define LUT_10BIT_MASK				0x03ff
+
+struct mtk_disp_gamma_data {
+	bool has_dither;
+};
+
+/**
+ * struct mtk_disp_gamma - DISP_GAMMA driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_gamma {
+	struct clk *clk;
+	void __iomem *regs;
+	struct cmdq_client_reg cmdq_reg;
+	const struct mtk_disp_gamma_data *data;
+};
+
+int mtk_gamma_clk_enable(struct device *dev)
+{
+	struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+	return clk_prepare_enable(gamma->clk);
+}
+
+void mtk_gamma_clk_disable(struct device *dev)
+{
+	struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(gamma->clk);
+}
+
+void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state)
+{
+	unsigned int i, reg;
+	struct drm_color_lut *lut;
+	void __iomem *lut_base;
+	u32 word;
+
+	if (state->gamma_lut) {
+		reg = readl(regs + DISP_GAMMA_CFG);
+		reg = reg | GAMMA_LUT_EN;
+		writel(reg, regs + DISP_GAMMA_CFG);
+		lut_base = regs + DISP_GAMMA_LUT;
+		lut = (struct drm_color_lut *)state->gamma_lut->data;
+		for (i = 0; i < MTK_LUT_SIZE; i++) {
+			word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
+				(((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
+				((lut[i].blue >> 6) & LUT_10BIT_MASK);
+			writel(word, (lut_base + i * 4));
+		}
+	}
+}
+
+void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
+{
+	struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+	mtk_gamma_set_common(gamma->regs, state);
+}
+
+void mtk_gamma_config(struct device *dev, unsigned int w,
+		      unsigned int h, unsigned int vrefresh,
+		      unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+	struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+	mtk_ddp_write(cmdq_pkt, h << 16 | w, &gamma->cmdq_reg, gamma->regs,
+		      DISP_GAMMA_SIZE);
+	if (gamma->data && gamma->data->has_dither)
+		mtk_dither_set_common(gamma->regs, &gamma->cmdq_reg, bpc,
+				      DISP_GAMMA_CFG, GAMMA_DITHERING, cmdq_pkt);
+}
+
+void mtk_gamma_start(struct device *dev)
+{
+	struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+	writel(GAMMA_EN, gamma->regs + DISP_GAMMA_EN);
+}
+
+void mtk_gamma_stop(struct device *dev)
+{
+	struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+	writel_relaxed(0x0, gamma->regs + DISP_GAMMA_EN);
+}
+
+static int mtk_disp_gamma_bind(struct device *dev, struct device *master,
+			       void *data)
+{
+	return 0;
+}
+
+static void mtk_disp_gamma_unbind(struct device *dev, struct device *master,
+				  void *data)
+{
+}
+
+static const struct component_ops mtk_disp_gamma_component_ops = {
+	.bind	= mtk_disp_gamma_bind,
+	.unbind = mtk_disp_gamma_unbind,
+};
+
+static int mtk_disp_gamma_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_disp_gamma *priv;
+	struct resource *res;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "failed to get gamma clk\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(dev, "failed to ioremap gamma\n");
+		return PTR_ERR(priv->regs);
+	}
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+	if (ret)
+		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+	priv->data = of_device_get_match_data(dev);
+	platform_set_drvdata(pdev, priv);
+
+	ret = component_add(dev, &mtk_disp_gamma_component_ops);
+	if (ret)
+		dev_err(dev, "Failed to add component: %d\n", ret);
+
+	return ret;
+}
+
+static int mtk_disp_gamma_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &mtk_disp_gamma_component_ops);
+
+	return 0;
+}
+
+static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = {
+	.has_dither = true,
+};
+
+static const struct of_device_id mtk_disp_gamma_driver_dt_match[] = {
+	{ .compatible = "mediatek,mt8173-disp-gamma",
+	  .data = &mt8173_gamma_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-gamma"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
+
+struct platform_driver mtk_disp_gamma_driver = {
+	.probe		= mtk_disp_gamma_probe,
+	.remove		= mtk_disp_gamma_remove,
+	.driver		= {
+		.name	= "mediatek-disp-gamma",
+		.owner	= THIS_MODULE,
+		.of_match_table = mtk_disp_gamma_driver_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 74ef6fc0528b6e783c3134164ae32048a498f7cf..961f87f8d4d156f873bc0ad4167e7f27b4b116fc 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
+#include "mtk_disp_drv.h"
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
 
@@ -23,6 +24,7 @@
 #define DISP_REG_OVL_RST			0x0014
 #define DISP_REG_OVL_ROI_SIZE			0x0020
 #define DISP_REG_OVL_DATAPATH_CON		0x0024
+#define OVL_LAYER_SMI_ID_EN				BIT(0)
 #define OVL_BGCLR_SEL_IN				BIT(2)
 #define DISP_REG_OVL_ROI_BGCLR			0x0028
 #define DISP_REG_OVL_SRC_CON			0x002c
@@ -61,6 +63,7 @@ struct mtk_disp_ovl_data {
 	unsigned int gmc_bits;
 	unsigned int layer_nr;
 	bool fmt_rgb565_is_0;
+	bool smi_id_en;
 };
 
 /**
@@ -70,88 +73,124 @@ struct mtk_disp_ovl_data {
  * @data: platform data
  */
 struct mtk_disp_ovl {
-	struct mtk_ddp_comp		ddp_comp;
 	struct drm_crtc			*crtc;
+	struct clk			*clk;
+	void __iomem			*regs;
+	struct cmdq_client_reg		cmdq_reg;
 	const struct mtk_disp_ovl_data	*data;
+	void				(*vblank_cb)(void *data);
+	void				*vblank_cb_data;
 };
 
-static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
-{
-	return container_of(comp, struct mtk_disp_ovl, ddp_comp);
-}
-
 static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
 {
 	struct mtk_disp_ovl *priv = dev_id;
-	struct mtk_ddp_comp *ovl = &priv->ddp_comp;
 
 	/* Clear frame completion interrupt */
-	writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+	writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
 
-	if (!priv->crtc)
+	if (!priv->vblank_cb)
 		return IRQ_NONE;
 
-	mtk_crtc_ddp_irq(priv->crtc, ovl);
+	priv->vblank_cb(priv->vblank_cb_data);
 
 	return IRQ_HANDLED;
 }
 
-static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
-				  struct drm_crtc *crtc)
+void mtk_ovl_enable_vblank(struct device *dev,
+			   void (*vblank_cb)(void *),
+			   void *vblank_cb_data)
+{
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+	ovl->vblank_cb = vblank_cb;
+	ovl->vblank_cb_data = vblank_cb_data;
+	writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+	writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
+}
+
+void mtk_ovl_disable_vblank(struct device *dev)
+{
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+	ovl->vblank_cb = NULL;
+	ovl->vblank_cb_data = NULL;
+	writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
+}
+
+int mtk_ovl_clk_enable(struct device *dev)
 {
-	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 
-	ovl->crtc = crtc;
-	writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
-	writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
+	return clk_prepare_enable(ovl->clk);
 }
 
-static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
+void mtk_ovl_clk_disable(struct device *dev)
 {
-	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 
-	ovl->crtc = NULL;
-	writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
+	clk_disable_unprepare(ovl->clk);
 }
 
-static void mtk_ovl_start(struct mtk_ddp_comp *comp)
+void mtk_ovl_start(struct device *dev)
 {
-	writel_relaxed(0x1, comp->regs + DISP_REG_OVL_EN);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+	if (ovl->data->smi_id_en) {
+		unsigned int reg;
+
+		reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+		reg = reg | OVL_LAYER_SMI_ID_EN;
+		writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+	}
+	writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
 }
 
-static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
+void mtk_ovl_stop(struct device *dev)
 {
-	writel_relaxed(0x0, comp->regs + DISP_REG_OVL_EN);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+	writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
+	if (ovl->data->smi_id_en) {
+		unsigned int reg;
+
+		reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+		reg = reg & ~OVL_LAYER_SMI_ID_EN;
+		writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+	}
+
 }
 
-static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
-			   unsigned int h, unsigned int vrefresh,
-			   unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_config(struct device *dev, unsigned int w,
+		    unsigned int h, unsigned int vrefresh,
+		    unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
 	if (w != 0 && h != 0)
-		mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+		mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
 				      DISP_REG_OVL_ROI_SIZE);
-	mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
+	mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR);
 
-	mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
-	mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
+	mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
+	mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
 }
 
-static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+unsigned int mtk_ovl_layer_nr(struct device *dev)
 {
-	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 
 	return ovl->data->layer_nr;
 }
 
-static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
+unsigned int mtk_ovl_supported_rotations(struct device *dev)
 {
 	return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
 	       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
 }
 
-static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
-			       struct mtk_plane_state *mtk_state)
+int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
+			struct mtk_plane_state *mtk_state)
 {
 	struct drm_plane_state *state = &mtk_state->base;
 	unsigned int rotation = 0;
@@ -178,15 +217,15 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
 	return 0;
 }
 
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
-			     struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
+		      struct cmdq_pkt *cmdq_pkt)
 {
 	unsigned int gmc_thrshd_l;
 	unsigned int gmc_thrshd_h;
 	unsigned int gmc_value;
-	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 
-	mtk_ddp_write(cmdq_pkt, 0x1, comp,
+	mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
 		      DISP_REG_OVL_RDMA_CTRL(idx));
 	gmc_thrshd_l = GMC_THRESHOLD_LOW >>
 		      (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
@@ -198,17 +237,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
 		gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
 			    gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
 	mtk_ddp_write(cmdq_pkt, gmc_value,
-		      comp, DISP_REG_OVL_RDMA_GMC(idx));
-	mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+		      &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
+	mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
 			   DISP_REG_OVL_SRC_CON, BIT(idx));
 }
 
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
-			      struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
+		       struct cmdq_pkt *cmdq_pkt)
 {
-	mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+	mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
 			   DISP_REG_OVL_SRC_CON, BIT(idx));
-	mtk_ddp_write(cmdq_pkt, 0, comp,
+	mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
 		      DISP_REG_OVL_RDMA_CTRL(idx));
 }
 
@@ -248,11 +289,11 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
 	}
 }
 
-static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
-				 struct mtk_plane_state *state,
-				 struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+			  struct mtk_plane_state *state,
+			  struct cmdq_pkt *cmdq_pkt)
 {
-	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 	struct mtk_plane_pending_state *pending = &state->pending;
 	unsigned int addr = pending->addr;
 	unsigned int pitch = pending->pitch & 0xffff;
@@ -262,12 +303,12 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
 	unsigned int con;
 
 	if (!pending->enable) {
-		mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+		mtk_ovl_layer_off(dev, idx, cmdq_pkt);
 		return;
 	}
 
 	con = ovl_fmt_convert(ovl, fmt);
-	if (state->base.fb->format->has_alpha)
+	if (state->base.fb && state->base.fb->format->has_alpha)
 		con |= OVL_CON_AEN | OVL_CON_ALPHA;
 
 	if (pending->rotation & DRM_MODE_REFLECT_Y) {
@@ -280,76 +321,49 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
 		addr += pending->pitch - 1;
 	}
 
-	mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+	mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
 			      DISP_REG_OVL_CON(idx));
-	mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+	mtk_ddp_write_relaxed(cmdq_pkt, pitch, &ovl->cmdq_reg, ovl->regs,
 			      DISP_REG_OVL_PITCH(idx));
-	mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+	mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
 			      DISP_REG_OVL_SRC_SIZE(idx));
-	mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+	mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
 			      DISP_REG_OVL_OFFSET(idx));
-	mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+	mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
 			      DISP_REG_OVL_ADDR(ovl, idx));
 
-	mtk_ovl_layer_on(comp, idx, cmdq_pkt);
+	mtk_ovl_layer_on(dev, idx, cmdq_pkt);
 }
 
-static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+void mtk_ovl_bgclr_in_on(struct device *dev)
 {
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 	unsigned int reg;
 
-	reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+	reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
 	reg = reg | OVL_BGCLR_SEL_IN;
-	writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+	writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
 }
 
-static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+void mtk_ovl_bgclr_in_off(struct device *dev)
 {
+	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
 	unsigned int reg;
 
-	reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+	reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
 	reg = reg & ~OVL_BGCLR_SEL_IN;
-	writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+	writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
 }
 
-static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
-	.config = mtk_ovl_config,
-	.start = mtk_ovl_start,
-	.stop = mtk_ovl_stop,
-	.enable_vblank = mtk_ovl_enable_vblank,
-	.disable_vblank = mtk_ovl_disable_vblank,
-	.supported_rotations = mtk_ovl_supported_rotations,
-	.layer_nr = mtk_ovl_layer_nr,
-	.layer_check = mtk_ovl_layer_check,
-	.layer_config = mtk_ovl_layer_config,
-	.bgclr_in_on = mtk_ovl_bgclr_in_on,
-	.bgclr_in_off = mtk_ovl_bgclr_in_off,
-};
-
 static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
 			     void *data)
 {
-	struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-	int ret;
-
-	ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
-	if (ret < 0) {
-		dev_err(dev, "Failed to register component %pOF: %d\n",
-			dev->of_node, ret);
-		return ret;
-	}
-
 	return 0;
 }
 
 static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-
-	mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
 }
 
 static const struct component_ops mtk_disp_ovl_component_ops = {
@@ -361,7 +375,7 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct mtk_disp_ovl *priv;
-	int comp_id;
+	struct resource *res;
 	int irq;
 	int ret;
 
@@ -373,27 +387,25 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 	if (irq < 0)
 		return irq;
 
-	priv->data = of_device_get_match_data(dev);
-
-	comp_id = mtk_ddp_comp_get_id(dev->of_node,
-				      priv->data->layer_nr == 4 ?
-				      MTK_DISP_OVL :
-				      MTK_DISP_OVL_2L);
-	if (comp_id < 0) {
-		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
-		return comp_id;
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "failed to get ovl clk\n");
+		return PTR_ERR(priv->clk);
 	}
 
-	ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
-				&mtk_disp_ovl_funcs);
-	if (ret) {
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "Failed to initialize component: %d\n",
-				ret);
-
-		return ret;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(dev, "failed to ioremap ovl\n");
+		return PTR_ERR(priv->regs);
 	}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+	if (ret)
+		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
 
+	priv->data = of_device_get_match_data(dev);
 	platform_set_drvdata(pdev, priv);
 
 	ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -412,8 +424,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 
 static int mtk_disp_ovl_remove(struct platform_device *pdev)
 {
-	component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
-
 	return 0;
 }
 
@@ -431,11 +441,29 @@ static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
 	.fmt_rgb565_is_0 = true,
 };
 
+static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
+	.addr = DISP_REG_OVL_ADDR_MT8173,
+	.gmc_bits = 10,
+	.layer_nr = 4,
+	.fmt_rgb565_is_0 = true,
+};
+
+static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
+	.addr = DISP_REG_OVL_ADDR_MT8173,
+	.gmc_bits = 10,
+	.layer_nr = 2,
+	.fmt_rgb565_is_0 = true,
+};
+
 static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-ovl",
 	  .data = &mt2701_ovl_driver_data},
 	{ .compatible = "mediatek,mt8173-disp-ovl",
 	  .data = &mt8173_ovl_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-ovl",
+	  .data = &mt8183_ovl_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-ovl-2l",
+	  .data = &mt8183_ovl_2l_driver_data},
 	{},
 };
 MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index d46b8ae1d08008bb9dd968f8774ca74ed6afbd24..728aaadfea8cfcc130a628105ec3f427e90bf490 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
+#include "mtk_disp_drv.h"
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
 
@@ -61,83 +62,105 @@ struct mtk_disp_rdma_data {
  * @data: local driver data
  */
 struct mtk_disp_rdma {
-	struct mtk_ddp_comp		ddp_comp;
-	struct drm_crtc			*crtc;
+	struct clk			*clk;
+	void __iomem			*regs;
+	struct cmdq_client_reg		cmdq_reg;
 	const struct mtk_disp_rdma_data	*data;
+	void				(*vblank_cb)(void *data);
+	void				*vblank_cb_data;
+	u32				fifo_size;
 };
 
-static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
-{
-	return container_of(comp, struct mtk_disp_rdma, ddp_comp);
-}
-
 static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
 {
 	struct mtk_disp_rdma *priv = dev_id;
-	struct mtk_ddp_comp *rdma = &priv->ddp_comp;
 
 	/* Clear frame completion interrupt */
-	writel(0x0, rdma->regs + DISP_REG_RDMA_INT_STATUS);
+	writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
 
-	if (!priv->crtc)
+	if (!priv->vblank_cb)
 		return IRQ_NONE;
 
-	mtk_crtc_ddp_irq(priv->crtc, rdma);
+	priv->vblank_cb(priv->vblank_cb_data);
 
 	return IRQ_HANDLED;
 }
 
-static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
+static void rdma_update_bits(struct device *dev, unsigned int reg,
 			     unsigned int mask, unsigned int val)
 {
-	unsigned int tmp = readl(comp->regs + reg);
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+	unsigned int tmp = readl(rdma->regs + reg);
 
 	tmp = (tmp & ~mask) | (val & mask);
-	writel(tmp, comp->regs + reg);
+	writel(tmp, rdma->regs + reg);
 }
 
-static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
-				   struct drm_crtc *crtc)
+void mtk_rdma_enable_vblank(struct device *dev,
+			    void (*vblank_cb)(void *),
+			    void *vblank_cb_data)
 {
-	struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
 
-	rdma->crtc = crtc;
-	rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
+	rdma->vblank_cb = vblank_cb;
+	rdma->vblank_cb_data = vblank_cb_data;
+	rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
 			 RDMA_FRAME_END_INT);
 }
 
-static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
+void mtk_rdma_disable_vblank(struct device *dev)
+{
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+
+	rdma->vblank_cb = NULL;
+	rdma->vblank_cb_data = NULL;
+	rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+}
+
+int mtk_rdma_clk_enable(struct device *dev)
 {
-	struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
 
-	rdma->crtc = NULL;
-	rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+	return clk_prepare_enable(rdma->clk);
 }
 
-static void mtk_rdma_start(struct mtk_ddp_comp *comp)
+void mtk_rdma_clk_disable(struct device *dev)
 {
-	rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(rdma->clk);
+}
+
+void mtk_rdma_start(struct device *dev)
+{
+	rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
 			 RDMA_ENGINE_EN);
 }
 
-static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
+void mtk_rdma_stop(struct device *dev)
 {
-	rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
+	rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
 }
 
-static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
-			    unsigned int height, unsigned int vrefresh,
-			    unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_rdma_config(struct device *dev, unsigned int width,
+		     unsigned int height, unsigned int vrefresh,
+		     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
 	unsigned int threshold;
 	unsigned int reg;
-	struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+	u32 rdma_fifo_size;
 
-	mtk_ddp_write_mask(cmdq_pkt, width, comp,
+	mtk_ddp_write_mask(cmdq_pkt, width, &rdma->cmdq_reg, rdma->regs,
 			   DISP_REG_RDMA_SIZE_CON_0, 0xfff);
-	mtk_ddp_write_mask(cmdq_pkt, height, comp,
+	mtk_ddp_write_mask(cmdq_pkt, height, &rdma->cmdq_reg, rdma->regs,
 			   DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
 
+	if (rdma->fifo_size)
+		rdma_fifo_size = rdma->fifo_size;
+	else
+		rdma_fifo_size = RDMA_FIFO_SIZE(rdma);
+
 	/*
 	 * Enable FIFO underflow since DSI and DPI can't be blocked.
 	 * Keep the FIFO pseudo size reset default of 8 KiB. Set the
@@ -146,9 +169,9 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
 	 */
 	threshold = width * height * vrefresh * 4 * 7 / 1000000;
 	reg = RDMA_FIFO_UNDERFLOW_EN |
-	      RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
+	      RDMA_FIFO_PSEUDO_SIZE(rdma_fifo_size) |
 	      RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
-	mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
+	mtk_ddp_write(cmdq_pkt, reg, &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_FIFO_CON);
 }
 
 static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -188,16 +211,16 @@ static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
 	}
 }
 
-static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+unsigned int mtk_rdma_layer_nr(struct device *dev)
 {
 	return 1;
 }
 
-static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
-				  struct mtk_plane_state *state,
-				  struct cmdq_pkt *cmdq_pkt)
+void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
+			   struct mtk_plane_state *state,
+			   struct cmdq_pkt *cmdq_pkt)
 {
-	struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+	struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
 	struct mtk_plane_pending_state *pending = &state->pending;
 	unsigned int addr = pending->addr;
 	unsigned int pitch = pending->pitch & 0xffff;
@@ -205,53 +228,34 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
 	unsigned int con;
 
 	con = rdma_fmt_convert(rdma, fmt);
-	mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
+	mtk_ddp_write_relaxed(cmdq_pkt, con, &rdma->cmdq_reg, rdma->regs, DISP_RDMA_MEM_CON);
 
 	if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
-		mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+		mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, &rdma->cmdq_reg, rdma->regs,
 				   DISP_REG_RDMA_SIZE_CON_0,
 				   RDMA_MATRIX_ENABLE);
 		mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
-				   comp, DISP_REG_RDMA_SIZE_CON_0,
+				   &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_SIZE_CON_0,
 				   RDMA_MATRIX_INT_MTX_SEL);
 	} else {
-		mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+		mtk_ddp_write_mask(cmdq_pkt, 0, &rdma->cmdq_reg, rdma->regs,
 				   DISP_REG_RDMA_SIZE_CON_0,
 				   RDMA_MATRIX_ENABLE);
 	}
-	mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
-	mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
-	mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+	mtk_ddp_write_relaxed(cmdq_pkt, addr, &rdma->cmdq_reg, rdma->regs,
+			      DISP_RDMA_MEM_START_ADDR);
+	mtk_ddp_write_relaxed(cmdq_pkt, pitch, &rdma->cmdq_reg, rdma->regs,
+			      DISP_RDMA_MEM_SRC_PITCH);
+	mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, &rdma->cmdq_reg, rdma->regs,
 		      DISP_RDMA_MEM_GMC_SETTING_0);
-	mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+	mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, &rdma->cmdq_reg, rdma->regs,
 			   DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
 
 }
 
-static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
-	.config = mtk_rdma_config,
-	.start = mtk_rdma_start,
-	.stop = mtk_rdma_stop,
-	.enable_vblank = mtk_rdma_enable_vblank,
-	.disable_vblank = mtk_rdma_disable_vblank,
-	.layer_nr = mtk_rdma_layer_nr,
-	.layer_config = mtk_rdma_layer_config,
-};
-
 static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
 			      void *data)
 {
-	struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-	int ret;
-
-	ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
-	if (ret < 0) {
-		dev_err(dev, "Failed to register component %pOF: %d\n",
-			dev->of_node, ret);
-		return ret;
-	}
-
 	return 0;
 
 }
@@ -259,10 +263,6 @@ static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
 static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
 				 void *data)
 {
-	struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-
-	mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
 }
 
 static const struct component_ops mtk_disp_rdma_component_ops = {
@@ -274,7 +274,7 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct mtk_disp_rdma *priv;
-	int comp_id;
+	struct resource *res;
 	int irq;
 	int ret;
 
@@ -286,25 +286,37 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
 	if (irq < 0)
 		return irq;
 
-	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_RDMA);
-	if (comp_id < 0) {
-		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
-		return comp_id;
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "failed to get rdma clk\n");
+		return PTR_ERR(priv->clk);
 	}
 
-	ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
-				&mtk_disp_rdma_funcs);
-	if (ret) {
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "Failed to initialize component: %d\n",
-				ret);
-
-		return ret;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(dev, "failed to ioremap rdma\n");
+		return PTR_ERR(priv->regs);
+	}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+	if (ret)
+		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+	if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
+		ret = of_property_read_u32(dev->of_node,
+					   "mediatek,rdma-fifo-size",
+					   &priv->fifo_size);
+		if (ret) {
+			dev_err(dev, "Failed to get rdma fifo size\n");
+			return ret;
+		}
 	}
 
 	/* Disable and clear pending interrupts */
-	writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_ENABLE);
-	writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_STATUS);
+	writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
+	writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
 
 	ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
 			       IRQF_TRIGGER_NONE, dev_name(dev), priv);
@@ -339,11 +351,17 @@ static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
 	.fifo_size = SZ_8K,
 };
 
+static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
+	.fifo_size = 5 * SZ_1K,
+};
+
 static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-rdma",
 	  .data = &mt2701_rdma_driver_data},
 	{ .compatible = "mediatek,mt8173-disp-rdma",
 	  .data = &mt8173_rdma_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-rdma",
+	  .data = &mt8183_rdma_driver_data},
 	{},
 };
 MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 52f11a63a3304a616352b7d56d8b1fd1f6602c24..b05f900d9322ef16977b3d0faa529c26044475d4 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -20,10 +20,12 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_of.h>
 #include <drm/drm_simple_kms_helper.h>
 
+#include "mtk_disp_drv.h"
 #include "mtk_dpi_regs.h"
 #include "mtk_drm_ddp_comp.h"
 
@@ -62,10 +64,10 @@ enum mtk_dpi_out_color_format {
 };
 
 struct mtk_dpi {
-	struct mtk_ddp_comp ddp_comp;
 	struct drm_encoder encoder;
 	struct drm_bridge bridge;
 	struct drm_bridge *next_bridge;
+	struct drm_connector *connector;
 	void __iomem *regs;
 	struct device *dev;
 	struct clk *engine_clk;
@@ -562,53 +564,50 @@ static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
 	.enable = mtk_dpi_bridge_enable,
 };
 
-static void mtk_dpi_start(struct mtk_ddp_comp *comp)
+void mtk_dpi_start(struct device *dev)
 {
-	struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+	struct mtk_dpi *dpi = dev_get_drvdata(dev);
 
 	mtk_dpi_power_on(dpi);
 }
 
-static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
+void mtk_dpi_stop(struct device *dev)
 {
-	struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+	struct mtk_dpi *dpi = dev_get_drvdata(dev);
 
 	mtk_dpi_power_off(dpi);
 }
 
-static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
-	.start = mtk_dpi_start,
-	.stop = mtk_dpi_stop,
-};
-
 static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct mtk_dpi *dpi = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	int ret;
 
-	ret = mtk_ddp_comp_register(drm_dev, &dpi->ddp_comp);
-	if (ret < 0) {
-		dev_err(dev, "Failed to register component %pOF: %d\n",
-			dev->of_node, ret);
-		return ret;
-	}
-
 	ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
 				      DRM_MODE_ENCODER_TMDS);
 	if (ret) {
 		dev_err(dev, "Failed to initialize decoder: %d\n", ret);
-		goto err_unregister;
+		return ret;
 	}
 
-	dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp);
+	dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev);
 
-	ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0);
+	ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
+				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
 	if (ret) {
 		dev_err(dev, "Failed to attach bridge: %d\n", ret);
 		goto err_cleanup;
 	}
 
+	dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder);
+	if (IS_ERR(dpi->connector)) {
+		dev_err(dev, "Unable to create bridge connector\n");
+		ret = PTR_ERR(dpi->connector);
+		goto err_cleanup;
+	}
+	drm_connector_attach_encoder(dpi->connector, &dpi->encoder);
+
 	dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
 	dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
 	dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
@@ -618,8 +617,6 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
 
 err_cleanup:
 	drm_encoder_cleanup(&dpi->encoder);
-err_unregister:
-	mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
 	return ret;
 }
 
@@ -627,10 +624,8 @@ static void mtk_dpi_unbind(struct device *dev, struct device *master,
 			   void *data)
 {
 	struct mtk_dpi *dpi = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
 
 	drm_encoder_cleanup(&dpi->encoder);
-	mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
 }
 
 static const struct component_ops mtk_dpi_component_ops = {
@@ -691,7 +686,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct mtk_dpi *dpi;
 	struct resource *mem;
-	int comp_id;
 	int ret;
 
 	dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
@@ -769,19 +763,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
 
 	dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
 
-	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
-	if (comp_id < 0) {
-		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
-		return comp_id;
-	}
-
-	ret = mtk_ddp_comp_init(dev, dev->of_node, &dpi->ddp_comp, comp_id,
-				&mtk_dpi_funcs);
-	if (ret) {
-		dev_err(dev, "Failed to initialize component: %d\n", ret);
-		return ret;
-	}
-
 	platform_set_drvdata(pdev, dpi);
 
 	dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index bdd37eadecd5eebfb982325a36cf626c7900dcdf..8b0de90156c6c76546eea48515a3b06ef93a5130 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -7,6 +7,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 #include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
 
 #include <asm/barrier.h>
 #include <soc/mediatek/smi.h>
@@ -19,7 +20,6 @@
 
 #include "mtk_drm_drv.h"
 #include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp.h"
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_gem.h"
 #include "mtk_drm_plane.h"
@@ -55,7 +55,7 @@ struct mtk_drm_crtc {
 #endif
 
 	struct device			*mmsys_dev;
-	struct mtk_disp_mutex		*mutex;
+	struct mtk_mutex		*mutex;
 	unsigned int			ddp_comp_nr;
 	struct mtk_ddp_comp		**ddp_comp;
 
@@ -107,7 +107,7 @@ static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
 
-	mtk_disp_mutex_put(mtk_crtc->mutex);
+	mtk_mutex_put(mtk_crtc->mutex);
 
 	drm_crtc_cleanup(crtc);
 }
@@ -169,31 +169,13 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	state->pending_config = true;
 }
 
-static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
-{
-	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
-
-	mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
-
-	return 0;
-}
-
-static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
-{
-	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
-
-	mtk_ddp_comp_disable_vblank(comp);
-}
-
 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
 {
 	int ret;
 	int i;
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
-		ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
+		ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
 		if (ret) {
 			DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
 			goto err;
@@ -203,7 +185,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
 	return 0;
 err:
 	while (--i >= 0)
-		clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
+		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
 	return ret;
 }
 
@@ -212,7 +194,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
 	int i;
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
-		clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
+		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
 }
 
 static
@@ -283,7 +265,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 		return ret;
 	}
 
-	ret = mtk_disp_mutex_prepare(mtk_crtc->mutex);
+	ret = mtk_mutex_prepare(mtk_crtc->mutex);
 	if (ret < 0) {
 		DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
 		goto err_pm_runtime_put;
@@ -299,11 +281,11 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 		mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
 				      mtk_crtc->ddp_comp[i]->id,
 				      mtk_crtc->ddp_comp[i + 1]->id);
-		mtk_disp_mutex_add_comp(mtk_crtc->mutex,
+		mtk_mutex_add_comp(mtk_crtc->mutex,
 					mtk_crtc->ddp_comp[i]->id);
 	}
-	mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
-	mtk_disp_mutex_enable(mtk_crtc->mutex);
+	mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+	mtk_mutex_enable(mtk_crtc->mutex);
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
 		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
@@ -332,7 +314,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 	return 0;
 
 err_mutex_unprepare:
-	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+	mtk_mutex_unprepare(mtk_crtc->mutex);
 err_pm_runtime_put:
 	pm_runtime_put(crtc->dev->dev);
 	return ret;
@@ -351,19 +333,19 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
 	}
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
-		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+		mtk_mutex_remove_comp(mtk_crtc->mutex,
 					   mtk_crtc->ddp_comp[i]->id);
-	mtk_disp_mutex_disable(mtk_crtc->mutex);
+	mtk_mutex_disable(mtk_crtc->mutex);
 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
 		mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
 					 mtk_crtc->ddp_comp[i]->id,
 					 mtk_crtc->ddp_comp[i + 1]->id);
-		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+		mtk_mutex_remove_comp(mtk_crtc->mutex,
 					   mtk_crtc->ddp_comp[i]->id);
 	}
-	mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+	mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
 	mtk_crtc_ddp_clk_disable(mtk_crtc);
-	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+	mtk_mutex_unprepare(mtk_crtc->mutex);
 
 	pm_runtime_put(drm->dev);
 
@@ -475,9 +457,9 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
 		mtk_crtc->pending_async_planes = true;
 
 	if (priv->data->shadow_register) {
-		mtk_disp_mutex_acquire(mtk_crtc->mutex);
+		mtk_mutex_acquire(mtk_crtc->mutex);
 		mtk_crtc_ddp_config(crtc, NULL);
-		mtk_disp_mutex_release(mtk_crtc->mutex);
+		mtk_mutex_release(mtk_crtc->mutex);
 	}
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 	if (mtk_crtc->cmdq_client) {
@@ -493,6 +475,40 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
 	mutex_unlock(&mtk_crtc->hw_lock);
 }
 
+static void mtk_crtc_ddp_irq(void *data)
+{
+	struct drm_crtc *crtc = data;
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_drm_private *priv = crtc->dev->dev_private;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
+	if (!priv->data->shadow_register)
+#endif
+		mtk_crtc_ddp_config(crtc, NULL);
+
+	mtk_drm_finish_page_flip(mtk_crtc);
+}
+
+static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+
+	mtk_ddp_comp_enable_vblank(comp, mtk_crtc_ddp_irq, &mtk_crtc->base);
+
+	return 0;
+}
+
+static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+
+	mtk_ddp_comp_disable_vblank(comp);
+}
+
 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
 			     struct mtk_plane_state *state)
 {
@@ -619,7 +635,6 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
 	.reset			= mtk_drm_crtc_reset,
 	.atomic_duplicate_state	= mtk_drm_crtc_duplicate_state,
 	.atomic_destroy_state	= mtk_drm_crtc_destroy_state,
-	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
 	.enable_vblank		= mtk_drm_crtc_enable_vblank,
 	.disable_vblank		= mtk_drm_crtc_disable_vblank,
 };
@@ -662,21 +677,6 @@ static int mtk_drm_crtc_init(struct drm_device *drm,
 	return ret;
 }
 
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
-{
-	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-	struct mtk_drm_private *priv = crtc->dev->dev_private;
-
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
-#else
-	if (!priv->data->shadow_register)
-#endif
-		mtk_crtc_ddp_config(crtc, NULL);
-
-	mtk_drm_finish_page_flip(mtk_crtc);
-}
-
 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
 					int comp_idx)
 {
@@ -772,7 +772,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 	if (!mtk_crtc->ddp_comp)
 		return -ENOMEM;
 
-	mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
+	mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
 	if (IS_ERR(mtk_crtc->mutex)) {
 		ret = PTR_ERR(mtk_crtc->mutex);
 		dev_err(dev, "Failed to get mutex: %d\n", ret);
@@ -785,7 +785,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 		struct device_node *node;
 
 		node = priv->comp_node[comp_id];
-		comp = priv->ddp_comp[comp_id];
+		comp = &priv->ddp_comp[comp_id];
 		if (!comp) {
 			dev_err(dev, "Component %pOF not initialized\n", node);
 			ret = -ENODEV;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index a2b4677a451cf42d7cffd8904eabef0b1e404534..45cfd0a032decb2dba30b9fd9439b962fde372f3 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -15,7 +15,6 @@
 #define MTK_MIN_BPC	3
 
 void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
 int mtk_drm_crtc_create(struct drm_device *drm_dev,
 			const enum mtk_ddp_comp_id *path,
 			unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
deleted file mode 100644
index 6b691a57be4a9366d92f11afe9f1a0682b15d59d..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_DDP_H
-#define MTK_DRM_DDP_H
-
-#include "mtk_drm_ddp_comp.h"
-
-struct regmap;
-struct device;
-struct mtk_disp_mutex;
-
-struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
-int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
-			     enum mtk_ddp_comp_id id);
-void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
-				enum mtk_ddp_comp_id id);
-void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
-
-#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3064eac1a75079fab05acd62ec6fb6ba393740ae..75bc00e17fc49302e8097084d66b1ceece3a93d7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -9,12 +9,12 @@
 #include <linux/clk.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 #include <drm/drm_print.h>
 
+#include "mtk_disp_drv.h"
 #include "mtk_drm_drv.h"
 #include "mtk_drm_plane.h"
 #include "mtk_drm_ddp_comp.h"
@@ -35,31 +35,13 @@
 #define DISP_AAL_EN				0x0000
 #define DISP_AAL_SIZE				0x0030
 
-#define DISP_CCORR_EN				0x0000
-#define CCORR_EN				BIT(0)
-#define DISP_CCORR_CFG				0x0020
-#define CCORR_RELAY_MODE			BIT(0)
-#define CCORR_ENGINE_EN				BIT(1)
-#define CCORR_GAMMA_OFF				BIT(2)
-#define CCORR_WGAMUT_SRC_CLIP			BIT(3)
-#define DISP_CCORR_SIZE				0x0030
-#define DISP_CCORR_COEF_0			0x0080
-#define DISP_CCORR_COEF_1			0x0084
-#define DISP_CCORR_COEF_2			0x0088
-#define DISP_CCORR_COEF_3			0x008C
-#define DISP_CCORR_COEF_4			0x0090
-
 #define DISP_DITHER_EN				0x0000
 #define DITHER_EN				BIT(0)
 #define DISP_DITHER_CFG				0x0020
 #define DITHER_RELAY_MODE			BIT(0)
+#define DITHER_ENGINE_EN			BIT(1)
 #define DISP_DITHER_SIZE			0x0030
 
-#define DISP_GAMMA_EN				0x0000
-#define DISP_GAMMA_CFG				0x0020
-#define DISP_GAMMA_SIZE				0x0030
-#define DISP_GAMMA_LUT				0x0700
-
 #define LUT_10BIT_MASK				0x03ff
 
 #define OD_RELAYMODE				BIT(0)
@@ -68,9 +50,6 @@
 
 #define AAL_EN					BIT(0)
 
-#define GAMMA_EN				BIT(0)
-#define GAMMA_LUT_EN				BIT(1)
-
 #define DISP_DITHERING				BIT(2)
 #define DITHER_LSB_ERR_SHIFT_R(x)		(((x) & 0x7) << 28)
 #define DITHER_OVFLW_BIT_R(x)			(((x) & 0x7) << 24)
@@ -86,262 +65,233 @@
 #define DITHER_ADD_LSHIFT_G(x)			(((x) & 0x7) << 4)
 #define DITHER_ADD_RSHIFT_G(x)			(((x) & 0x7) << 0)
 
+struct mtk_ddp_comp_dev {
+	struct clk *clk;
+	void __iomem *regs;
+	struct cmdq_client_reg cmdq_reg;
+};
+
 void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
-		   struct mtk_ddp_comp *comp, unsigned int offset)
+		   struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+		   unsigned int offset)
 {
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 	if (cmdq_pkt)
-		cmdq_pkt_write(cmdq_pkt, comp->subsys,
-			       comp->regs_pa + offset, value);
+		cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
+			       cmdq_reg->offset + offset, value);
 	else
 #endif
-		writel(value, comp->regs + offset);
+		writel(value, regs + offset);
 }
 
 void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
-			   struct mtk_ddp_comp *comp,
+			   struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
 			   unsigned int offset)
 {
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 	if (cmdq_pkt)
-		cmdq_pkt_write(cmdq_pkt, comp->subsys,
-			       comp->regs_pa + offset, value);
+		cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
+			       cmdq_reg->offset + offset, value);
 	else
 #endif
-		writel_relaxed(value, comp->regs + offset);
+		writel_relaxed(value, regs + offset);
 }
 
-void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
-			unsigned int value,
-			struct mtk_ddp_comp *comp,
-			unsigned int offset,
-			unsigned int mask)
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+			struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+			unsigned int offset, unsigned int mask)
 {
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 	if (cmdq_pkt) {
-		cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
-				    comp->regs_pa + offset, value, mask);
+		cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys,
+				    cmdq_reg->offset + offset, value, mask);
 	} else {
 #endif
-		u32 tmp = readl(comp->regs + offset);
+		u32 tmp = readl(regs + offset);
 
 		tmp = (tmp & ~mask) | (value & mask);
-		writel(tmp, comp->regs + offset);
+		writel(tmp, regs + offset);
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 	}
 #endif
 }
 
-void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
-		    unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
+static int mtk_ddp_clk_enable(struct device *dev)
+{
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+	return clk_prepare_enable(priv->clk);
+}
+
+static void mtk_ddp_clk_disable(struct device *dev)
+{
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(priv->clk);
+}
+
+void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
+			   unsigned int bpc, unsigned int cfg,
+			   unsigned int dither_en, struct cmdq_pkt *cmdq_pkt)
 {
 	/* If bpc equal to 0, the dithering function didn't be enabled */
 	if (bpc == 0)
 		return;
 
 	if (bpc >= MTK_MIN_BPC) {
-		mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
-		mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+		mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5);
+		mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7);
 		mtk_ddp_write(cmdq_pkt,
 			      DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
 			      DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
 			      DITHER_NEW_BIT_MODE,
-			      comp, DISP_DITHER_15);
+			      cmdq_reg, regs, DISP_DITHER_15);
 		mtk_ddp_write(cmdq_pkt,
 			      DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
 			      DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
 			      DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
 			      DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
-			      comp, DISP_DITHER_16);
-		mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
+			      cmdq_reg, regs, DISP_DITHER_16);
+		mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg);
 	}
 }
 
-static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
-			  unsigned int h, unsigned int vrefresh,
-			  unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_dither_set(struct device *dev, unsigned int bpc,
+		    unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
 {
-	mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
-	mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
-	mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_od_start(struct mtk_ddp_comp *comp)
-{
-	writel(1, comp->regs + DISP_OD_EN);
+	mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg,
+			      DISP_DITHERING, cmdq_pkt);
 }
 
-static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
+static void mtk_od_config(struct device *dev, unsigned int w,
+			  unsigned int h, unsigned int vrefresh,
+			  unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
-			   unsigned int h, unsigned int vrefresh,
-			   unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
-{
-	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
+	mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE);
+	mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG);
+	mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt);
 }
 
-static void mtk_aal_start(struct mtk_ddp_comp *comp)
+static void mtk_od_start(struct device *dev)
 {
-	writel(AAL_EN, comp->regs + DISP_AAL_EN);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_aal_stop(struct mtk_ddp_comp *comp)
-{
-	writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
+	writel(1, priv->regs + DISP_OD_EN);
 }
 
-static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
-			     unsigned int h, unsigned int vrefresh,
-			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_ufoe_start(struct device *dev)
 {
-	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
-	mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
-{
-	writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+	writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
 }
 
-static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+static void mtk_aal_config(struct device *dev, unsigned int w,
+			   unsigned int h, unsigned int vrefresh,
+			   unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+	mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
 }
 
-/* Converts a DRM S31.32 value to the HW S1.10 format. */
-static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
 {
-	u16 r;
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-	/* Sign bit. */
-	r = in & BIT_ULL(63) ? BIT(11) : 0;
+	mtk_gamma_set_common(priv->regs, state);
+}
 
-	if ((in & GENMASK_ULL(62, 33)) > 0) {
-		/* identity value 0x100000000 -> 0x400, */
-		/* if bigger this, set it to max 0x7ff. */
-		r |= GENMASK(10, 0);
-	} else {
-		/* take the 11 most important bits. */
-		r |= (in >> 22) & GENMASK(10, 0);
-	}
+static void mtk_aal_start(struct device *dev)
+{
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-	return r;
+	writel(AAL_EN, priv->regs + DISP_AAL_EN);
 }
 
-static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
-			      struct drm_crtc_state *state)
+static void mtk_aal_stop(struct device *dev)
 {
-	struct drm_property_blob *blob = state->ctm;
-	struct drm_color_ctm *ctm;
-	const u64 *input;
-	uint16_t coeffs[9] = { 0 };
-	int i;
-	struct cmdq_pkt *cmdq_pkt = NULL;
-
-	if (!blob)
-		return;
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-	ctm = (struct drm_color_ctm *)blob->data;
-	input = ctm->matrix;
-
-	for (i = 0; i < ARRAY_SIZE(coeffs); i++)
-		coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
-
-	mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
-		      comp, DISP_CCORR_COEF_0);
-	mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
-		      comp, DISP_CCORR_COEF_1);
-	mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
-		      comp, DISP_CCORR_COEF_2);
-	mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
-		      comp, DISP_CCORR_COEF_3);
-	mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
-		      comp, DISP_CCORR_COEF_4);
+	writel_relaxed(0x0, priv->regs + DISP_AAL_EN);
 }
 
-static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+static void mtk_dither_config(struct device *dev, unsigned int w,
 			      unsigned int h, unsigned int vrefresh,
 			      unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
-	mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
-}
-
-static void mtk_dither_start(struct mtk_ddp_comp *comp)
-{
-	writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_dither_stop(struct mtk_ddp_comp *comp)
-{
-	writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+	mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE);
+	mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG);
+	mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG,
+			      DITHER_ENGINE_EN, cmdq_pkt);
 }
 
-static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
-			     unsigned int h, unsigned int vrefresh,
-			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_dither_start(struct device *dev)
 {
-	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
-	mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_gamma_start(struct mtk_ddp_comp *comp)
-{
-	writel(GAMMA_EN, comp->regs  + DISP_GAMMA_EN);
+	writel(DITHER_EN, priv->regs + DISP_DITHER_EN);
 }
 
-static void mtk_gamma_stop(struct mtk_ddp_comp *comp)
+static void mtk_dither_stop(struct device *dev)
 {
-	writel_relaxed(0x0, comp->regs  + DISP_GAMMA_EN);
-}
+	struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-static void mtk_gamma_set(struct mtk_ddp_comp *comp,
-			  struct drm_crtc_state *state)
-{
-	unsigned int i, reg;
-	struct drm_color_lut *lut;
-	void __iomem *lut_base;
-	u32 word;
-
-	if (state->gamma_lut) {
-		reg = readl(comp->regs + DISP_GAMMA_CFG);
-		reg = reg | GAMMA_LUT_EN;
-		writel(reg, comp->regs + DISP_GAMMA_CFG);
-		lut_base = comp->regs + DISP_GAMMA_LUT;
-		lut = (struct drm_color_lut *)state->gamma_lut->data;
-		for (i = 0; i < MTK_LUT_SIZE; i++) {
-			word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
-				(((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
-				((lut[i].blue >> 6) & LUT_10BIT_MASK);
-			writel(word, (lut_base + i * 4));
-		}
-	}
+	writel_relaxed(0x0, priv->regs + DISP_DITHER_EN);
 }
 
 static const struct mtk_ddp_comp_funcs ddp_aal = {
-	.gamma_set = mtk_gamma_set,
+	.clk_enable = mtk_ddp_clk_enable,
+	.clk_disable = mtk_ddp_clk_disable,
+	.gamma_set = mtk_aal_gamma_set,
 	.config = mtk_aal_config,
 	.start = mtk_aal_start,
 	.stop = mtk_aal_stop,
 };
 
 static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+	.clk_enable = mtk_ccorr_clk_enable,
+	.clk_disable = mtk_ccorr_clk_disable,
 	.config = mtk_ccorr_config,
 	.start = mtk_ccorr_start,
 	.stop = mtk_ccorr_stop,
 	.ctm_set = mtk_ccorr_ctm_set,
 };
 
+static const struct mtk_ddp_comp_funcs ddp_color = {
+	.clk_enable = mtk_color_clk_enable,
+	.clk_disable = mtk_color_clk_disable,
+	.config = mtk_color_config,
+	.start = mtk_color_start,
+};
+
 static const struct mtk_ddp_comp_funcs ddp_dither = {
+	.clk_enable = mtk_ddp_clk_enable,
+	.clk_disable = mtk_ddp_clk_disable,
 	.config = mtk_dither_config,
 	.start = mtk_dither_start,
 	.stop = mtk_dither_stop,
 };
 
+static const struct mtk_ddp_comp_funcs ddp_dpi = {
+	.start = mtk_dpi_start,
+	.stop = mtk_dpi_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dsi = {
+	.start = mtk_dsi_ddp_start,
+	.stop = mtk_dsi_ddp_stop,
+};
+
 static const struct mtk_ddp_comp_funcs ddp_gamma = {
+	.clk_enable = mtk_gamma_clk_enable,
+	.clk_disable = mtk_gamma_clk_disable,
 	.gamma_set = mtk_gamma_set,
 	.config = mtk_gamma_config,
 	.start = mtk_gamma_start,
@@ -349,11 +299,43 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
 };
 
 static const struct mtk_ddp_comp_funcs ddp_od = {
+	.clk_enable = mtk_ddp_clk_enable,
+	.clk_disable = mtk_ddp_clk_disable,
 	.config = mtk_od_config,
 	.start = mtk_od_start,
 };
 
+static const struct mtk_ddp_comp_funcs ddp_ovl = {
+	.clk_enable = mtk_ovl_clk_enable,
+	.clk_disable = mtk_ovl_clk_disable,
+	.config = mtk_ovl_config,
+	.start = mtk_ovl_start,
+	.stop = mtk_ovl_stop,
+	.enable_vblank = mtk_ovl_enable_vblank,
+	.disable_vblank = mtk_ovl_disable_vblank,
+	.supported_rotations = mtk_ovl_supported_rotations,
+	.layer_nr = mtk_ovl_layer_nr,
+	.layer_check = mtk_ovl_layer_check,
+	.layer_config = mtk_ovl_layer_config,
+	.bgclr_in_on = mtk_ovl_bgclr_in_on,
+	.bgclr_in_off = mtk_ovl_bgclr_in_off,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_rdma = {
+	.clk_enable = mtk_rdma_clk_enable,
+	.clk_disable = mtk_rdma_clk_disable,
+	.config = mtk_rdma_config,
+	.start = mtk_rdma_start,
+	.stop = mtk_rdma_stop,
+	.enable_vblank = mtk_rdma_enable_vblank,
+	.disable_vblank = mtk_rdma_disable_vblank,
+	.layer_nr = mtk_rdma_layer_nr,
+	.layer_config = mtk_rdma_layer_config,
+};
+
 static const struct mtk_ddp_comp_funcs ddp_ufoe = {
+	.clk_enable = mtk_ddp_clk_enable,
+	.clk_disable = mtk_ddp_clk_disable,
 	.start = mtk_ufoe_start,
 };
 
@@ -387,36 +369,37 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
 	[DDP_COMPONENT_AAL1]	= { MTK_DISP_AAL,	1, &ddp_aal },
 	[DDP_COMPONENT_BLS]	= { MTK_DISP_BLS,	0, NULL },
 	[DDP_COMPONENT_CCORR]	= { MTK_DISP_CCORR,	0, &ddp_ccorr },
-	[DDP_COMPONENT_COLOR0]	= { MTK_DISP_COLOR,	0, NULL },
-	[DDP_COMPONENT_COLOR1]	= { MTK_DISP_COLOR,	1, NULL },
+	[DDP_COMPONENT_COLOR0]	= { MTK_DISP_COLOR,	0, &ddp_color },
+	[DDP_COMPONENT_COLOR1]	= { MTK_DISP_COLOR,	1, &ddp_color },
 	[DDP_COMPONENT_DITHER]	= { MTK_DISP_DITHER,	0, &ddp_dither },
-	[DDP_COMPONENT_DPI0]	= { MTK_DPI,		0, NULL },
-	[DDP_COMPONENT_DPI1]	= { MTK_DPI,		1, NULL },
-	[DDP_COMPONENT_DSI0]	= { MTK_DSI,		0, NULL },
-	[DDP_COMPONENT_DSI1]	= { MTK_DSI,		1, NULL },
-	[DDP_COMPONENT_DSI2]	= { MTK_DSI,		2, NULL },
-	[DDP_COMPONENT_DSI3]	= { MTK_DSI,		3, NULL },
+	[DDP_COMPONENT_DPI0]	= { MTK_DPI,		0, &ddp_dpi },
+	[DDP_COMPONENT_DPI1]	= { MTK_DPI,		1, &ddp_dpi },
+	[DDP_COMPONENT_DSI0]	= { MTK_DSI,		0, &ddp_dsi },
+	[DDP_COMPONENT_DSI1]	= { MTK_DSI,		1, &ddp_dsi },
+	[DDP_COMPONENT_DSI2]	= { MTK_DSI,		2, &ddp_dsi },
+	[DDP_COMPONENT_DSI3]	= { MTK_DSI,		3, &ddp_dsi },
 	[DDP_COMPONENT_GAMMA]	= { MTK_DISP_GAMMA,	0, &ddp_gamma },
 	[DDP_COMPONENT_OD0]	= { MTK_DISP_OD,	0, &ddp_od },
 	[DDP_COMPONENT_OD1]	= { MTK_DISP_OD,	1, &ddp_od },
-	[DDP_COMPONENT_OVL0]	= { MTK_DISP_OVL,	0, NULL },
-	[DDP_COMPONENT_OVL1]	= { MTK_DISP_OVL,	1, NULL },
-	[DDP_COMPONENT_OVL_2L0]	= { MTK_DISP_OVL_2L,	0, NULL },
-	[DDP_COMPONENT_OVL_2L1]	= { MTK_DISP_OVL_2L,	1, NULL },
+	[DDP_COMPONENT_OVL0]	= { MTK_DISP_OVL,	0, &ddp_ovl },
+	[DDP_COMPONENT_OVL1]	= { MTK_DISP_OVL,	1, &ddp_ovl },
+	[DDP_COMPONENT_OVL_2L0]	= { MTK_DISP_OVL_2L,	0, &ddp_ovl },
+	[DDP_COMPONENT_OVL_2L1]	= { MTK_DISP_OVL_2L,	1, &ddp_ovl },
 	[DDP_COMPONENT_PWM0]	= { MTK_DISP_PWM,	0, NULL },
 	[DDP_COMPONENT_PWM1]	= { MTK_DISP_PWM,	1, NULL },
 	[DDP_COMPONENT_PWM2]	= { MTK_DISP_PWM,	2, NULL },
-	[DDP_COMPONENT_RDMA0]	= { MTK_DISP_RDMA,	0, NULL },
-	[DDP_COMPONENT_RDMA1]	= { MTK_DISP_RDMA,	1, NULL },
-	[DDP_COMPONENT_RDMA2]	= { MTK_DISP_RDMA,	2, NULL },
+	[DDP_COMPONENT_RDMA0]	= { MTK_DISP_RDMA,	0, &ddp_rdma },
+	[DDP_COMPONENT_RDMA1]	= { MTK_DISP_RDMA,	1, &ddp_rdma },
+	[DDP_COMPONENT_RDMA2]	= { MTK_DISP_RDMA,	2, &ddp_rdma },
 	[DDP_COMPONENT_UFOE]	= { MTK_DISP_UFOE,	0, &ddp_ufoe },
 	[DDP_COMPONENT_WDMA0]	= { MTK_DISP_WDMA,	0, NULL },
 	[DDP_COMPONENT_WDMA1]	= { MTK_DISP_WDMA,	1, NULL },
 };
 
-static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
+static bool mtk_drm_find_comp_in_ddp(struct device *dev,
 				     const enum mtk_ddp_comp_id *path,
-				     unsigned int path_len)
+				     unsigned int path_len,
+				     struct mtk_ddp_comp *ddp_comp)
 {
 	unsigned int i;
 
@@ -424,7 +407,7 @@ static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
 		return false;
 
 	for (i = 0U; i < path_len; i++)
-		if (ddp_comp.id == path[i])
+		if (dev == ddp_comp[path[i]].dev)
 			return true;
 
 	return false;
@@ -446,18 +429,19 @@ int mtk_ddp_comp_get_id(struct device_node *node,
 }
 
 unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
-						struct mtk_ddp_comp ddp_comp)
+						struct device *dev)
 {
 	struct mtk_drm_private *private = drm->dev_private;
 	unsigned int ret = 0;
 
-	if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len))
+	if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len,
+				     private->ddp_comp))
 		ret = BIT(0);
-	else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path,
-					  private->data->ext_len))
+	else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path,
+					  private->data->ext_len, private->ddp_comp))
 		ret = BIT(1);
-	else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path,
-					  private->data->third_len))
+	else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path,
+					  private->data->third_len, private->ddp_comp))
 		ret = BIT(2);
 	else
 		DRM_INFO("Failed to find comp in ddp table\n");
@@ -465,59 +449,15 @@ unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
 	return ret;
 }
 
-int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
-		      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
-		      const struct mtk_ddp_comp_funcs *funcs)
+static int mtk_ddp_get_larb_dev(struct device_node *node, struct mtk_ddp_comp *comp,
+				struct device *dev)
 {
-	enum mtk_ddp_comp_type type;
 	struct device_node *larb_node;
 	struct platform_device *larb_pdev;
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-	struct resource res;
-	struct cmdq_client_reg cmdq_reg;
-	int ret;
-#endif
-
-	if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
-		return -EINVAL;
-
-	type = mtk_ddp_matches[comp_id].type;
-
-	comp->id = comp_id;
-	comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
-
-	if (comp_id == DDP_COMPONENT_BLS ||
-	    comp_id == DDP_COMPONENT_DPI0 ||
-	    comp_id == DDP_COMPONENT_DPI1 ||
-	    comp_id == DDP_COMPONENT_DSI0 ||
-	    comp_id == DDP_COMPONENT_DSI1 ||
-	    comp_id == DDP_COMPONENT_DSI2 ||
-	    comp_id == DDP_COMPONENT_DSI3 ||
-	    comp_id == DDP_COMPONENT_PWM0) {
-		comp->regs = NULL;
-		comp->clk = NULL;
-		comp->irq = 0;
-		return 0;
-	}
-
-	comp->regs = of_iomap(node, 0);
-	comp->irq = of_irq_get(node, 0);
-	comp->clk = of_clk_get(node, 0);
-	if (IS_ERR(comp->clk))
-		return PTR_ERR(comp->clk);
-
-	/* Only DMA capable components need the LARB property */
-	comp->larb_dev = NULL;
-	if (type != MTK_DISP_OVL &&
-	    type != MTK_DISP_OVL_2L &&
-	    type != MTK_DISP_RDMA &&
-	    type != MTK_DISP_WDMA)
-		return 0;
 
 	larb_node = of_parse_phandle(node, "mediatek,larb", 0);
 	if (!larb_node) {
-		dev_err(dev,
-			"Missing mediadek,larb phandle in %pOF node\n", node);
+		dev_err(dev, "Missing mediadek,larb phandle in %pOF node\n", node);
 		return -EINVAL;
 	}
 
@@ -528,40 +468,71 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
 		return -EPROBE_DEFER;
 	}
 	of_node_put(larb_node);
-
 	comp->larb_dev = &larb_pdev->dev;
 
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-	if (of_address_to_resource(node, 0, &res) != 0) {
-		dev_err(dev, "Missing reg in %s node\n", node->full_name);
-		put_device(&larb_pdev->dev);
-		return -EINVAL;
-	}
-	comp->regs_pa = res.start;
-
-	ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
-	if (ret)
-		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
-	else
-		comp->subsys = cmdq_reg.subsys;
-#endif
 	return 0;
 }
 
-int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp)
+int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+		      enum mtk_ddp_comp_id comp_id)
 {
-	struct mtk_drm_private *private = drm->dev_private;
+	struct platform_device *comp_pdev;
+	enum mtk_ddp_comp_type type;
+	struct mtk_ddp_comp_dev *priv;
+	int ret;
 
-	if (private->ddp_comp[comp->id])
-		return -EBUSY;
+	if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
+		return -EINVAL;
 
-	private->ddp_comp[comp->id] = comp;
-	return 0;
-}
+	type = mtk_ddp_matches[comp_id].type;
 
-void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp)
-{
-	struct mtk_drm_private *private = drm->dev_private;
+	comp->id = comp_id;
+	comp->funcs = mtk_ddp_matches[comp_id].funcs;
+	comp_pdev = of_find_device_by_node(node);
+	if (!comp_pdev) {
+		DRM_INFO("Waiting for device %s\n", node->full_name);
+		return -EPROBE_DEFER;
+	}
+	comp->dev = &comp_pdev->dev;
 
-	private->ddp_comp[comp->id] = NULL;
+	/* Only DMA capable components need the LARB property */
+	if (type == MTK_DISP_OVL ||
+	    type == MTK_DISP_OVL_2L ||
+	    type == MTK_DISP_RDMA ||
+	    type == MTK_DISP_WDMA) {
+		ret = mtk_ddp_get_larb_dev(node, comp, comp->dev);
+		if (ret)
+			return ret;
+	}
+
+	if (type == MTK_DISP_BLS ||
+	    type == MTK_DISP_CCORR ||
+	    type == MTK_DISP_COLOR ||
+	    type == MTK_DISP_GAMMA ||
+	    type == MTK_DPI ||
+	    type == MTK_DSI ||
+	    type == MTK_DISP_OVL ||
+	    type == MTK_DISP_OVL_2L ||
+	    type == MTK_DISP_PWM ||
+	    type == MTK_DISP_RDMA)
+		return 0;
+
+	priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regs = of_iomap(node, 0);
+	priv->clk = of_clk_get(node, 0);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
+	if (ret)
+		dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+	platform_set_drvdata(comp_pdev, priv);
+
+	return 0;
 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 5aa52b7afeecbed8b3abe0d7d741c5bbde2fb684..bb914d976cf5d3a73233621e1838cf420a3fd53d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
 #define MTK_DRM_DDP_COMP_H
 
 #include <linux/io.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
 #include <linux/soc/mediatek/mtk-mmsys.h>
 
 struct device;
@@ -39,79 +40,95 @@ enum mtk_ddp_comp_type {
 struct mtk_ddp_comp;
 struct cmdq_pkt;
 struct mtk_ddp_comp_funcs {
-	void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
+	int (*clk_enable)(struct device *dev);
+	void (*clk_disable)(struct device *dev);
+	void (*config)(struct device *dev, unsigned int w,
 		       unsigned int h, unsigned int vrefresh,
 		       unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
-	void (*start)(struct mtk_ddp_comp *comp);
-	void (*stop)(struct mtk_ddp_comp *comp);
-	void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
-	void (*disable_vblank)(struct mtk_ddp_comp *comp);
-	unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
-	unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
-	int (*layer_check)(struct mtk_ddp_comp *comp,
+	void (*start)(struct device *dev);
+	void (*stop)(struct device *dev);
+	void (*enable_vblank)(struct device *dev,
+			      void (*vblank_cb)(void *),
+			      void *vblank_cb_data);
+	void (*disable_vblank)(struct device *dev);
+	unsigned int (*supported_rotations)(struct device *dev);
+	unsigned int (*layer_nr)(struct device *dev);
+	int (*layer_check)(struct device *dev,
 			   unsigned int idx,
 			   struct mtk_plane_state *state);
-	void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
+	void (*layer_config)(struct device *dev, unsigned int idx,
 			     struct mtk_plane_state *state,
 			     struct cmdq_pkt *cmdq_pkt);
-	void (*gamma_set)(struct mtk_ddp_comp *comp,
+	void (*gamma_set)(struct device *dev,
 			  struct drm_crtc_state *state);
-	void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
-	void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
-	void (*ctm_set)(struct mtk_ddp_comp *comp,
+	void (*bgclr_in_on)(struct device *dev);
+	void (*bgclr_in_off)(struct device *dev);
+	void (*ctm_set)(struct device *dev,
 			struct drm_crtc_state *state);
 };
 
 struct mtk_ddp_comp {
-	struct clk *clk;
-	void __iomem *regs;
+	struct device *dev;
 	int irq;
 	struct device *larb_dev;
 	enum mtk_ddp_comp_id id;
 	const struct mtk_ddp_comp_funcs *funcs;
-	resource_size_t regs_pa;
-	u8 subsys;
 };
 
+static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->clk_enable)
+		return comp->funcs->clk_enable(comp->dev);
+
+	return 0;
+}
+
+static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->clk_disable)
+		comp->funcs->clk_disable(comp->dev);
+}
+
 static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
 				       unsigned int w, unsigned int h,
 				       unsigned int vrefresh, unsigned int bpc,
 				       struct cmdq_pkt *cmdq_pkt)
 {
 	if (comp->funcs && comp->funcs->config)
-		comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
+		comp->funcs->config(comp->dev, w, h, vrefresh, bpc, cmdq_pkt);
 }
 
 static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->start)
-		comp->funcs->start(comp);
+		comp->funcs->start(comp->dev);
 }
 
 static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->stop)
-		comp->funcs->stop(comp);
+		comp->funcs->stop(comp->dev);
 }
 
 static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp,
-					      struct drm_crtc *crtc)
+					      void (*vblank_cb)(void *),
+					      void *vblank_cb_data)
 {
 	if (comp->funcs && comp->funcs->enable_vblank)
-		comp->funcs->enable_vblank(comp, crtc);
+		comp->funcs->enable_vblank(comp->dev, vblank_cb, vblank_cb_data);
 }
 
 static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->disable_vblank)
-		comp->funcs->disable_vblank(comp);
+		comp->funcs->disable_vblank(comp->dev);
 }
 
 static inline
 unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->supported_rotations)
-		return comp->funcs->supported_rotations(comp);
+		return comp->funcs->supported_rotations(comp->dev);
 
 	return 0;
 }
@@ -119,7 +136,7 @@ unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
 static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->layer_nr)
-		return comp->funcs->layer_nr(comp);
+		return comp->funcs->layer_nr(comp->dev);
 
 	return 0;
 }
@@ -129,7 +146,7 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
 					   struct mtk_plane_state *state)
 {
 	if (comp->funcs && comp->funcs->layer_check)
-		return comp->funcs->layer_check(comp, idx, state);
+		return comp->funcs->layer_check(comp->dev, idx, state);
 	return 0;
 }
 
@@ -139,52 +156,49 @@ static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
 					     struct cmdq_pkt *cmdq_pkt)
 {
 	if (comp->funcs && comp->funcs->layer_config)
-		comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
+		comp->funcs->layer_config(comp->dev, idx, state, cmdq_pkt);
 }
 
 static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
 				     struct drm_crtc_state *state)
 {
 	if (comp->funcs && comp->funcs->gamma_set)
-		comp->funcs->gamma_set(comp, state);
+		comp->funcs->gamma_set(comp->dev, state);
 }
 
 static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->bgclr_in_on)
-		comp->funcs->bgclr_in_on(comp);
+		comp->funcs->bgclr_in_on(comp->dev);
 }
 
 static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
 {
 	if (comp->funcs && comp->funcs->bgclr_in_off)
-		comp->funcs->bgclr_in_off(comp);
+		comp->funcs->bgclr_in_off(comp->dev);
 }
 
 static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
 				   struct drm_crtc_state *state)
 {
 	if (comp->funcs && comp->funcs->ctm_set)
-		comp->funcs->ctm_set(comp, state);
+		comp->funcs->ctm_set(comp->dev, state);
 }
 
 int mtk_ddp_comp_get_id(struct device_node *node,
 			enum mtk_ddp_comp_type comp_type);
 unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
-						struct mtk_ddp_comp ddp_comp);
-int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
-		      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
-		      const struct mtk_ddp_comp_funcs *funcs);
-int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
-void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
-void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
-		    unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+						struct device *dev);
+int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
+		      enum mtk_ddp_comp_id comp_id);
 enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
 void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
-		   struct mtk_ddp_comp *comp, unsigned int offset);
+		   struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+		   unsigned int offset);
 void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
-			   struct mtk_ddp_comp *comp, unsigned int offset);
+			   struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+			   unsigned int offset);
 void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
-			struct mtk_ddp_comp *comp, unsigned int offset,
-			unsigned int mask);
+			struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+			unsigned int offset, unsigned int mask);
 #endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2f717df28a77a4ef81022f5a9a5d8e8b86ff3421..b013d56d2777367bb08062e4d55519474089ad6f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,7 +10,6 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
-#include <linux/soc/mediatek/mtk-mmsys.h>
 #include <linux/dma-mapping.h>
 
 #include <drm/drm_atomic.h>
@@ -26,7 +25,6 @@
 #include <drm/drm_vblank.h>
 
 #include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp.h"
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_drv.h"
 #include "mtk_drm_gem.h"
@@ -131,6 +129,24 @@ static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
 	DDP_COMPONENT_DPI0,
 };
 
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_main[] = {
+	DDP_COMPONENT_OVL0,
+	DDP_COMPONENT_OVL_2L0,
+	DDP_COMPONENT_RDMA0,
+	DDP_COMPONENT_COLOR0,
+	DDP_COMPONENT_CCORR,
+	DDP_COMPONENT_AAL0,
+	DDP_COMPONENT_GAMMA,
+	DDP_COMPONENT_DITHER,
+	DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = {
+	DDP_COMPONENT_OVL_2L1,
+	DDP_COMPONENT_RDMA1,
+	DDP_COMPONENT_DPI0,
+};
+
 static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
 	.main_path = mt2701_mtk_ddp_main,
 	.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
@@ -163,6 +179,13 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
 	.ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
 };
 
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+	.main_path = mt8183_mtk_ddp_main,
+	.main_len = ARRAY_SIZE(mt8183_mtk_ddp_main),
+	.ext_path = mt8183_mtk_ddp_ext,
+	.ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
+};
+
 static int mtk_drm_kms_init(struct drm_device *drm)
 {
 	struct mtk_drm_private *private = drm->dev_private;
@@ -377,12 +400,20 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
 	  .data = (void *)MTK_DISP_OVL },
 	{ .compatible = "mediatek,mt8173-disp-ovl",
 	  .data = (void *)MTK_DISP_OVL },
+	{ .compatible = "mediatek,mt8183-disp-ovl",
+	  .data = (void *)MTK_DISP_OVL },
+	{ .compatible = "mediatek,mt8183-disp-ovl-2l",
+	  .data = (void *)MTK_DISP_OVL_2L },
 	{ .compatible = "mediatek,mt2701-disp-rdma",
 	  .data = (void *)MTK_DISP_RDMA },
 	{ .compatible = "mediatek,mt8173-disp-rdma",
 	  .data = (void *)MTK_DISP_RDMA },
+	{ .compatible = "mediatek,mt8183-disp-rdma",
+	  .data = (void *)MTK_DISP_RDMA },
 	{ .compatible = "mediatek,mt8173-disp-wdma",
 	  .data = (void *)MTK_DISP_WDMA },
+	{ .compatible = "mediatek,mt8183-disp-ccorr",
+	  .data = (void *)MTK_DISP_CCORR },
 	{ .compatible = "mediatek,mt2701-disp-color",
 	  .data = (void *)MTK_DISP_COLOR },
 	{ .compatible = "mediatek,mt8173-disp-color",
@@ -391,22 +422,32 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
 	  .data = (void *)MTK_DISP_AAL},
 	{ .compatible = "mediatek,mt8173-disp-gamma",
 	  .data = (void *)MTK_DISP_GAMMA, },
+	{ .compatible = "mediatek,mt8183-disp-gamma",
+	  .data = (void *)MTK_DISP_GAMMA, },
+	{ .compatible = "mediatek,mt8183-disp-dither",
+	  .data = (void *)MTK_DISP_DITHER },
 	{ .compatible = "mediatek,mt8173-disp-ufoe",
 	  .data = (void *)MTK_DISP_UFOE },
 	{ .compatible = "mediatek,mt2701-dsi",
 	  .data = (void *)MTK_DSI },
 	{ .compatible = "mediatek,mt8173-dsi",
 	  .data = (void *)MTK_DSI },
+	{ .compatible = "mediatek,mt8183-dsi",
+	  .data = (void *)MTK_DSI },
 	{ .compatible = "mediatek,mt2701-dpi",
 	  .data = (void *)MTK_DPI },
 	{ .compatible = "mediatek,mt8173-dpi",
 	  .data = (void *)MTK_DPI },
+	{ .compatible = "mediatek,mt8183-dpi",
+	  .data = (void *)MTK_DPI },
 	{ .compatible = "mediatek,mt2701-disp-mutex",
 	  .data = (void *)MTK_DISP_MUTEX },
 	{ .compatible = "mediatek,mt2712-disp-mutex",
 	  .data = (void *)MTK_DISP_MUTEX },
 	{ .compatible = "mediatek,mt8173-disp-mutex",
 	  .data = (void *)MTK_DISP_MUTEX },
+	{ .compatible = "mediatek,mt8183-disp-mutex",
+	  .data = (void *)MTK_DISP_MUTEX },
 	{ .compatible = "mediatek,mt2701-disp-pwm",
 	  .data = (void *)MTK_DISP_BLS },
 	{ .compatible = "mediatek,mt8173-disp-pwm",
@@ -425,6 +466,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
 	  .data = &mt2712_mmsys_driver_data},
 	{ .compatible = "mediatek,mt8173-mmsys",
 	  .data = &mt8173_mmsys_driver_data},
+	{ .compatible = "mediatek,mt8183-mmsys",
+	  .data = &mt8183_mmsys_driver_data},
 	{ }
 };
 
@@ -488,11 +531,13 @@ static int mtk_drm_probe(struct platform_device *pdev)
 		private->comp_node[comp_id] = of_node_get(node);
 
 		/*
-		 * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have
-		 * separate component platform drivers and initialize their own
+		 * Currently only the CCORR, COLOR, GAMMA, OVL, RDMA, DSI, and DPI
+		 * blocks have separate component platform drivers and initialize their own
 		 * DDP component structure. The others are initialized here.
 		 */
-		if (comp_type == MTK_DISP_COLOR ||
+		if (comp_type == MTK_DISP_CCORR ||
+		    comp_type == MTK_DISP_COLOR ||
+		    comp_type == MTK_DISP_GAMMA ||
 		    comp_type == MTK_DISP_OVL ||
 		    comp_type == MTK_DISP_OVL_2L ||
 		    comp_type == MTK_DISP_RDMA ||
@@ -502,24 +547,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
 				 node);
 			drm_of_component_match_add(dev, &match, compare_of,
 						   node);
-		} else {
-			struct mtk_ddp_comp *comp;
-
-			comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
-			if (!comp) {
-				ret = -ENOMEM;
-				of_node_put(node);
-				goto err_node;
-			}
-
-			ret = mtk_ddp_comp_init(dev->parent, node, comp,
-						comp_id, NULL);
-			if (ret) {
-				of_node_put(node);
-				goto err_node;
-			}
-
-			private->ddp_comp[comp_id] = comp;
+		}
+
+		ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
+		if (ret) {
+			of_node_put(node);
+			goto err_node;
 		}
 	}
 
@@ -545,10 +578,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
 	of_node_put(private->mutex_node);
 	for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
 		of_node_put(private->comp_node[i]);
-		if (private->ddp_comp[i]) {
-			put_device(private->ddp_comp[i]->larb_dev);
-			private->ddp_comp[i] = NULL;
-		}
+		if (private->ddp_comp[i].larb_dev)
+			put_device(private->ddp_comp[i].larb_dev);
 	}
 	return ret;
 }
@@ -604,8 +635,9 @@ static struct platform_driver mtk_drm_platform_driver = {
 };
 
 static struct platform_driver * const mtk_drm_drivers[] = {
-	&mtk_ddp_driver,
+	&mtk_disp_ccorr_driver,
 	&mtk_disp_color_driver,
+	&mtk_disp_gamma_driver,
 	&mtk_disp_ovl_driver,
 	&mtk_disp_rdma_driver,
 	&mtk_dpi_driver,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 5d771cf0bf25ea6de655b36ee19ff9ab48b8cdf2..637f5669e895452029b2eee4350f480309e0aa35 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -41,13 +41,14 @@ struct mtk_drm_private {
 	struct device *mutex_dev;
 	struct device *mmsys_dev;
 	struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
-	struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+	struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_ID_MAX];
 	const struct mtk_mmsys_driver_data *data;
 	struct drm_atomic_state *suspend_state;
 };
 
-extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_ccorr_driver;
 extern struct platform_driver mtk_disp_color_driver;
+extern struct platform_driver mtk_disp_gamma_driver;
 extern struct platform_driver mtk_disp_ovl_driver;
 extern struct platform_driver mtk_disp_rdma_driver;
 extern struct platform_driver mtk_dpi_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 28a2ee1336ef7a479e4ad3373fbb9b41b7b21304..280ea0d5e84003a6e920a500df2a9503ca4c923a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
 		return -ENOMEM;
 	}
 
-	drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
+	drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
 
 	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
 			       pgprot_writecombine(PAGE_KERNEL));
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 65fd99c528af2446119bb1109f9636627619003d..a1ff152ef46836cdead7cf061488c04ada49b926 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -25,6 +25,7 @@
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
+#include "mtk_disp_drv.h"
 #include "mtk_drm_ddp_comp.h"
 
 #define DSI_START		0x00
@@ -178,7 +179,6 @@ struct mtk_dsi_driver_data {
 };
 
 struct mtk_dsi {
-	struct mtk_ddp_comp ddp_comp;
 	struct device *dev;
 	struct mipi_dsi_host host;
 	struct drm_encoder encoder;
@@ -767,25 +767,20 @@ static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
 	.mode_set = mtk_dsi_bridge_mode_set,
 };
 
-static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
+void mtk_dsi_ddp_start(struct device *dev)
 {
-	struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+	struct mtk_dsi *dsi = dev_get_drvdata(dev);
 
 	mtk_dsi_poweron(dsi);
 }
 
-static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
+void mtk_dsi_ddp_stop(struct device *dev)
 {
-	struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+	struct mtk_dsi *dsi = dev_get_drvdata(dev);
 
 	mtk_dsi_poweroff(dsi);
 }
 
-static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
-	.start = mtk_dsi_ddp_start,
-	.stop = mtk_dsi_ddp_stop,
-};
-
 static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
 			       struct mipi_dsi_device *device)
 {
@@ -952,7 +947,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
 		return ret;
 	}
 
-	dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
+	dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
 
 	ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
 				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
@@ -980,32 +975,17 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
 	struct drm_device *drm = data;
 	struct mtk_dsi *dsi = dev_get_drvdata(dev);
 
-	ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
-	if (ret < 0) {
-		dev_err(dev, "Failed to register component %pOF: %d\n",
-			dev->of_node, ret);
-		return ret;
-	}
-
 	ret = mtk_dsi_encoder_init(drm, dsi);
-	if (ret)
-		goto err_unregister;
 
-	return 0;
-
-err_unregister:
-	mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
 	return ret;
 }
 
 static void mtk_dsi_unbind(struct device *dev, struct device *master,
 			   void *data)
 {
-	struct drm_device *drm = data;
 	struct mtk_dsi *dsi = dev_get_drvdata(dev);
 
 	drm_encoder_cleanup(&dsi->encoder);
-	mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
 }
 
 static const struct component_ops mtk_dsi_component_ops = {
@@ -1020,7 +1000,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
 	struct drm_panel *panel;
 	struct resource *regs;
 	int irq_num;
-	int comp_id;
 	int ret;
 
 	dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -1090,20 +1069,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
 		goto err_unregister_host;
 	}
 
-	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
-	if (comp_id < 0) {
-		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
-		ret = comp_id;
-		goto err_unregister_host;
-	}
-
-	ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
-				&mtk_dsi_funcs);
-	if (ret) {
-		dev_err(dev, "Failed to initialize component: %d\n", ret);
-		goto err_unregister_host;
-	}
-
 	irq_num = platform_get_irq(pdev, 0);
 	if (irq_num < 0) {
 		dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
@@ -1111,9 +1076,8 @@ static int mtk_dsi_probe(struct platform_device *pdev)
 		goto err_unregister_host;
 	}
 
-	irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
 	ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
-			       IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
+			       IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
 		goto err_unregister_host;
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 6ccd270789c6b9ebde599a812116cf4be6bddef1..4fd4de16cd328b602ea1b76f50f89e4cf239cecb 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
  * \file mga_ioc32.c
  *
  * 32-bit ioctl compatibility routines for the MGA DRM.
@@ -159,13 +159,13 @@ static struct {
 };
 
 /**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * mga_compat_ioctl - Called whenever a 32-bit process running under
+ *                    a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
  *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd:  command.
+ * @arg:  user argument.
+ * return: zero on success or negative number on failure.
  */
 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index a977c9f49719ba7097274aab820509cf5f786e07..4e4c105f9a50865d62139440bf6722b17ed80bb5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -47,10 +47,11 @@ static const struct drm_driver mgag200_driver = {
 static bool mgag200_has_sgram(struct mga_device *mdev)
 {
 	struct drm_device *dev = &mdev->base;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 option;
 	int ret;
 
-	ret = pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+	ret = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
 	if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret))
 		return false;
 
@@ -60,6 +61,7 @@ static bool mgag200_has_sgram(struct mga_device *mdev)
 static int mgag200_regs_init(struct mga_device *mdev)
 {
 	struct drm_device *dev = &mdev->base;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u32 option, option2;
 	u8 crtcext3;
 
@@ -99,13 +101,13 @@ static int mgag200_regs_init(struct mga_device *mdev)
 	}
 
 	if (option)
-		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+		pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
 	if (option2)
-		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+		pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2);
 
 	/* BAR 1 contains registers */
-	mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
-	mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
+	mdev->rmmio_base = pci_resource_start(pdev, 1);
+	mdev->rmmio_size = pci_resource_len(pdev, 1);
 
 	if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
 				     mdev->rmmio_size, "mgadrmfb_mmio")) {
@@ -113,7 +115,7 @@ static int mgag200_regs_init(struct mga_device *mdev)
 		return -ENOMEM;
 	}
 
-	mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+	mdev->rmmio = pcim_iomap(pdev, 1, 0);
 	if (mdev->rmmio == NULL)
 		return -ENOMEM;
 
@@ -218,6 +220,7 @@ static void mgag200_g200_interpret_bios(struct mga_device *mdev,
 static void mgag200_g200_init_refclk(struct mga_device *mdev)
 {
 	struct drm_device *dev = &mdev->base;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	unsigned char __iomem *rom;
 	unsigned char *bios;
 	size_t size;
@@ -226,7 +229,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
 	mdev->model.g200.pclk_max = 230000;
 	mdev->model.g200.ref_clk = 27050;
 
-	rom = pci_map_rom(dev->pdev, &size);
+	rom = pci_map_rom(pdev, &size);
 	if (!rom)
 		return;
 
@@ -244,7 +247,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
 
 	vfree(bios);
 out:
-	pci_unmap_rom(dev->pdev, rom);
+	pci_unmap_rom(pdev, rom);
 }
 
 static void mgag200_g200se_init_unique_id(struct mga_device *mdev)
@@ -301,7 +304,6 @@ mgag200_device_create(struct pci_dev *pdev, unsigned long flags)
 		return mdev;
 	dev = &mdev->base;
 
-	dev->pdev = pdev;
 	pci_set_drvdata(pdev, dev);
 
 	ret = mgag200_device_init(mdev, flags);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 09731e614e46d72b487ed2aca62b3509a74c5f40..ac8e34eef5138b036b5248f48bcad828719d0bad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -126,7 +126,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
 	i2c->clock = clock;
 	i2c->adapter.owner = THIS_MODULE;
 	i2c->adapter.class = I2C_CLASS_DDC;
-	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->adapter.dev.parent = dev->dev;
 	i2c->dev = dev;
 	i2c_set_adapdata(&i2c->adapter, i2c);
 	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index 641f1aa992be33aa951a407a511afde12e4473fc..b667371b69a4a24546ffdf5f1187064536a7c1fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -78,11 +78,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
 static void mgag200_mm_release(struct drm_device *dev, void *ptr)
 {
 	struct mga_device *mdev = to_mga_device(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	mdev->vram_fb_available = 0;
 	iounmap(mdev->vram);
-	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
-				pci_resource_len(dev->pdev, 0));
+	arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
 	arch_phys_wc_del(mdev->fb_mtrr);
 	mdev->fb_mtrr = 0;
 }
@@ -90,6 +91,7 @@ static void mgag200_mm_release(struct drm_device *dev, void *ptr)
 int mgag200_mm_init(struct mga_device *mdev)
 {
 	struct drm_device *dev = &mdev->base;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	u8 misc;
 	resource_size_t start, len;
 	int ret;
@@ -102,8 +104,8 @@ int mgag200_mm_init(struct mga_device *mdev)
 	WREG8(MGA_MISC_OUT, misc);
 
 	/* BAR 0 is VRAM */
-	start = pci_resource_start(dev->pdev, 0);
-	len = pci_resource_len(dev->pdev, 0);
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
 
 	if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) {
 		drm_err(dev, "can't reserve VRAM\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index e3462f5d96d7554c075b195c4f6f5c4ace535bd4..36b39c381b3fa728ece3a70377b8d92af3b48989 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1420,16 +1420,14 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
 static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
 {
 	u8 *dpcd = ctrl->panel->dpcd;
-	u32 edid_quirks = 0;
 
-	edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
 	/*
 	 * For better interop experience, used a fixed NVID=0x8000
 	 * whenever connected to a VGA dongle downstream.
 	 */
 	if (drm_dp_is_branch(dpcd))
-		return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
-				DP_DPCD_QUIRK_CONSTANT_N));
+		return (drm_dp_has_quirk(&ctrl->panel->desc,
+					 DP_DPCD_QUIRK_CONSTANT_N));
 
 	return false;
 }
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9d10739c4eb2dab23cb27257e2f8a0addc31ec68..a588b0388d271bbeac7b76e71813fcab5c0f8303 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1216,7 +1216,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		goto fail;
 	}
 
-	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
 	if (ret) {
 		msm_gem_unlock(obj);
 		goto fail;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 9d4a2d97507e03660b432a410497717a497c4a43..1d3542d6006b597828d7bfdb8fbd179804103ffe 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -200,16 +200,17 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
 	int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
 	uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	sim_data.pclk_khz = VClk;
 	sim_data.mclk_khz = MClk;
 	sim_data.nvclk_khz = NVClk;
 	sim_data.bpp = bpp;
 	sim_data.two_heads = nv_two_heads(dev);
-	if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
-	    (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+	if ((pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+	    (pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
 		uint32_t type;
-		int domain = pci_domain_nr(dev->pdev->bus);
+		int domain = pci_domain_nr(pdev->bus);
 
 		pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
 				      0x7c, &type);
@@ -251,11 +252,12 @@ void
 nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
 		nv04_update_arb(dev, vclk, bpp, burst, lwm);
-	else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
-		 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+	else if ((pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+		 (pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
 		*burst = 128;
 		*lwm = 0x0480;
 	} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 42687ea2a4ca32fb3a30b37d5bd115c7218f12de..ce3d8c6ef000a4623dca525820f5b46f6401e8b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -488,12 +488,13 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 #ifdef __powerpc__
 	struct drm_device *dev = encoder->dev;
 	struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
 	 */
-	if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
-	    dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
+	if (pdev->device == 0x0174 || pdev->device == 0x0179 ||
+	    pdev->device == 0x0189 || pdev->device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
 			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
 			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 5ace5e906949a02c5b4fc9653182a19f87212b0f..f0a24126641abb999945e450188ac3ba6e4d9e4d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -130,7 +130,7 @@ static inline bool
 nv_two_heads(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	const int impl = dev->pdev->device & 0x0ff0;
+	const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
 	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -142,14 +142,14 @@ nv_two_heads(struct drm_device *dev)
 static inline bool
 nv_gf4_disp_arch(struct drm_device *dev)
 {
-	return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
+	return nv_two_heads(dev) && (to_pci_dev(dev->dev)->device & 0x0ff0) != 0x0110;
 }
 
 static inline bool
 nv_two_reg_pll(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	const int impl = dev->pdev->device & 0x0ff0;
+	const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
 
 	if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
 		return true;
@@ -160,9 +160,11 @@ static inline bool
 nv_match_device(struct drm_device *dev, unsigned device,
 		unsigned sub_vendor, unsigned sub_device)
 {
-	return dev->pdev->device == device &&
-		dev->pdev->subsystem_vendor == sub_vendor &&
-		dev->pdev->subsystem_device == sub_device;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+	return pdev->device == device &&
+		pdev->subsystem_vendor == sub_vendor &&
+		pdev->subsystem_device == sub_device;
 }
 
 #include <subdev/bios/init.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index b674d68ef28ab8bc1efb2538530a05773ed16359..f7d35657aa64c270d6fa6debc4492af36682803a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -214,14 +214,15 @@ nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv)
 int
 nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct nvkm_pll_vals pllvals;
 	int ret;
 	int domain;
 
-	domain = pci_domain_nr(dev->pdev->bus);
+	domain = pci_domain_nr(pdev->bus);
 
 	if (plltype == PLL_MEMORY &&
-	    (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
+	    (pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
 		uint32_t mpllP;
 		pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
 				      0x6c, &mpllP);
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		return 400000 / mpllP;
 	} else
 	if (plltype == PLL_MEMORY &&
-	    (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
+	    (pdev->device & 0xff0) == CHIPSET_NFORCE2) {
 		uint32_t clock;
 
 		pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
@@ -309,6 +310,7 @@ void
 nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	uint8_t misc, gr4, gr5, gr6, seq2, seq4;
 	bool graphicsmode;
 	unsigned plane;
@@ -327,7 +329,7 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
 	NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
 
 	/* map first 64KiB of VRAM, holds VGA fonts etc */
-	iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+	iovram = ioremap(pci_resource_start(pdev, 1), 65536);
 	if (!iovram) {
 		NV_ERROR(drm, "Failed to map VRAM, "
 					"cannot save/restore VGA fonts.\n");
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e6f16a7750f07c2090c9acb2259b02701b020619..1a1d806e0b01a061f936b93a4664ed7aecfc7f35 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -36,7 +36,7 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 	struct nvif_push *push = core->chan.push;
 	int ret;
 
-	if ((ret = PUSH_WAIT(push, 5)))
+	if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
 		return ret;
 
 	if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 9035d3ab062c4619e10714f03cb83d1d0d6980bb..42f877f2ced2770812bf7b051086d5b04d3917ed 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -54,7 +54,7 @@ corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 	struct nvif_push *push = core->chan.push;
 	int ret;
 
-	if ((ret = PUSH_WAIT(push, 9)))
+	if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
 		return ret;
 
 	if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 5f4f09a601d4c38ca5d1dbebb46e052807cd8af6..196612addfd615ef5c8b324f9df94bdd595d8e87 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -220,6 +220,10 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
 	return 0;
 }
 
+MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
+static int nv50_dmac_vram_pushbuf = -1;
+module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
+
 int
 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 		 const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
@@ -241,7 +245,8 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	 *
 	 * This appears to match NVIDIA's behaviour on Pascal.
 	 */
-	if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+	if ((nv50_dmac_vram_pushbuf > 0) ||
+	    (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
 		type |= NVIF_MEM_VRAM;
 
 	ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
@@ -304,6 +309,14 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 /******************************************************************************
  * Output path helpers
  *****************************************************************************/
+static void
+nv50_outp_dump_caps(struct nouveau_drm *drm,
+		    struct nouveau_encoder *outp)
+{
+	NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
+		 outp->base.base.name, outp->caps.dp_interlace);
+}
+
 static void
 nv50_outp_release(struct nouveau_encoder *nv_encoder)
 {
@@ -419,8 +432,7 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
 }
 
 struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
-			    struct drm_atomic_state *state)
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
 {
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
@@ -436,8 +448,7 @@ nv50_outp_get_new_connector(struct nouveau_encoder *outp,
 }
 
 struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
-			    struct drm_atomic_state *state)
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
 {
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
@@ -452,27 +463,44 @@ nv50_outp_get_old_connector(struct nouveau_encoder *outp,
 	return NULL;
 }
 
+static struct nouveau_crtc *
+nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	const u32 mask = drm_encoder_mask(&outp->base.base);
+	int i;
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		if (crtc_state->encoder_mask & mask)
+			return nouveau_crtc(crtc);
+	}
+
+	return NULL;
+}
+
 /******************************************************************************
  * DAC
  *****************************************************************************/
 static void
-nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 	const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
-	if (nv_encoder->crtc)
-		core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+	core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
 	nv_encoder->crtc = NULL;
 	nv50_outp_release(nv_encoder);
 }
 
 static void
-nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+	struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+	struct nv50_head_atom *asyh =
+		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 	u32 ctrl = 0;
 
@@ -493,7 +521,7 @@ nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 	core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
 	asyh->or.depth = 0;
 
-	nv_encoder->crtc = encoder->crtc;
+	nv_encoder->crtc = &nv_crtc->base;
 }
 
 static enum drm_connector_status
@@ -526,8 +554,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 static const struct drm_encoder_helper_funcs
 nv50_dac_help = {
 	.atomic_check = nv50_outp_atomic_check,
-	.atomic_enable = nv50_dac_enable,
-	.atomic_disable = nv50_dac_disable,
+	.atomic_enable = nv50_dac_atomic_enable,
+	.atomic_disable = nv50_dac_atomic_disable,
 	.detect = nv50_dac_detect
 };
 
@@ -593,34 +621,27 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 	struct drm_encoder *encoder;
 	struct nouveau_encoder *nv_encoder;
-	struct drm_connector *connector;
 	struct nouveau_crtc *nv_crtc;
-	struct drm_connector_list_iter conn_iter;
 	int ret = 0;
 
 	*enabled = false;
 
+	mutex_lock(&drm->audio.lock);
+
 	drm_for_each_encoder(encoder, drm->dev) {
 		struct nouveau_connector *nv_connector = NULL;
 
+		if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
+			continue; /* TODO */
+
 		nv_encoder = nouveau_encoder(encoder);
+		nv_connector = nouveau_connector(nv_encoder->audio.connector);
+		nv_crtc = nouveau_crtc(nv_encoder->crtc);
 
-		drm_connector_list_iter_begin(drm_dev, &conn_iter);
-		drm_for_each_connector_iter(connector, &conn_iter) {
-			if (connector->state->best_encoder == encoder) {
-				nv_connector = nouveau_connector(connector);
-				break;
-			}
-		}
-		drm_connector_list_iter_end(&conn_iter);
-		if (!nv_connector)
+		if (!nv_crtc || nv_encoder->or != port || nv_crtc->index != dev_id)
 			continue;
 
-		nv_crtc = nouveau_crtc(encoder->crtc);
-		if (!nv_crtc || nv_encoder->or != port ||
-		    nv_crtc->index != dev_id)
-			continue;
-		*enabled = nv_encoder->audio;
+		*enabled = nv_encoder->audio.enabled;
 		if (*enabled) {
 			ret = drm_eld_size(nv_connector->base.eld);
 			memcpy(buf, nv_connector->base.eld,
@@ -629,6 +650,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
 		break;
 	}
 
+	mutex_unlock(&drm->audio.lock);
+
 	return ret;
 }
 
@@ -678,17 +701,22 @@ static const struct component_ops nv50_audio_component_bind_ops = {
 static void
 nv50_audio_component_init(struct nouveau_drm *drm)
 {
-	if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
-		drm->audio.component_registered = true;
+	if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+		return;
+
+	drm->audio.component_registered = true;
+	mutex_init(&drm->audio.lock);
 }
 
 static void
 nv50_audio_component_fini(struct nouveau_drm *drm)
 {
-	if (drm->audio.component_registered) {
-		component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
-		drm->audio.component_registered = false;
-	}
+	if (!drm->audio.component_registered)
+		return;
+
+	component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+	drm->audio.component_registered = false;
+	mutex_destroy(&drm->audio.lock);
 }
 
 /******************************************************************************
@@ -711,24 +739,25 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 				(0x0100 << nv_crtc->index),
 	};
 
-	if (!nv_encoder->audio)
-		return;
-
-	nv_encoder->audio = false;
-	nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+	mutex_lock(&drm->audio.lock);
+	if (nv_encoder->audio.enabled) {
+		nv_encoder->audio.enabled = false;
+		nv_encoder->audio.connector = NULL;
+		nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+	}
+	mutex_unlock(&drm->audio.lock);
 
 	nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
 					nv_crtc->index);
 }
 
 static void
-nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+		  struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
 		  struct drm_display_mode *mode)
 {
 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nouveau_connector *nv_connector;
 	struct nv50_disp *disp = nv50_disp(encoder->dev);
 	struct __packed {
 		struct {
@@ -744,15 +773,19 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
 				     (0x0100 << nv_crtc->index),
 	};
 
-	nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
 	if (!drm_detect_monitor_audio(nv_connector->edid))
 		return;
 
+	mutex_lock(&drm->audio.lock);
+
 	memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
 
 	nvif_mthd(&disp->disp->object, 0, &args,
 		  sizeof(args.base) + drm_eld_size(args.data));
-	nv_encoder->audio = true;
+	nv_encoder->audio.enabled = true;
+	nv_encoder->audio.connector = &nv_connector->base;
+
+	mutex_unlock(&drm->audio.lock);
 
 	nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
 					nv_crtc->index);
@@ -781,12 +814,12 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 }
 
 static void
-nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+		 struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
 		 struct drm_display_mode *mode)
 {
 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nv50_disp *disp = nv50_disp(encoder->dev);
 	struct {
 		struct nv50_disp_mthd_v1 base;
@@ -801,7 +834,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
 		.pwr.state = 1,
 		.pwr.rekey = 56, /* binary driver, and tegra, constant */
 	};
-	struct nouveau_connector *nv_connector;
 	struct drm_hdmi_info *hdmi;
 	u32 max_ac_packet;
 	union hdmi_infoframe avi_frame;
@@ -811,7 +843,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
 	int ret;
 	int size;
 
-	nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
 	if (!drm_detect_hdmi_monitor(nv_connector->edid))
 		return;
 
@@ -857,7 +888,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
 		+ args.pwr.vendor_infoframe_length;
 	nvif_mthd(&disp->disp->object, 0, &args, size);
 
-	nv50_audio_enable(encoder, state, mode);
+	nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
 
 	/* If SCDC is supported by the downstream monitor, update
 	 * divider / scrambling settings to what we programmed above.
@@ -898,6 +929,7 @@ struct nv50_mstc {
 struct nv50_msto {
 	struct drm_encoder encoder;
 
+	/* head is statically assigned on msto creation */
 	struct nv50_head *head;
 	struct nv50_mstc *mstc;
 	bool disabled;
@@ -1056,11 +1088,12 @@ nv50_dp_bpc_to_depth(unsigned int bpc)
 }
 
 static void
-nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
-	struct nv50_head *head = nv50_head(encoder->crtc);
-	struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
 	struct nv50_msto *msto = nv50_msto(encoder);
+	struct nv50_head *head = msto->head;
+	struct nv50_head_atom *asyh =
+		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
 	struct nv50_mstc *mstc = NULL;
 	struct nv50_mstm *mstm = NULL;
 	struct drm_connector *connector;
@@ -1081,8 +1114,7 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 	if (WARN_ON(!mstc))
 		return;
 
-	r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
-				     armh->dp.tu);
+	r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
 	if (!r)
 		DRM_DEBUG_KMS("Failed to allocate VCPI\n");
 
@@ -1094,15 +1126,15 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 	else
 		proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
 
-	mstm->outp->update(mstm->outp, head->base.index, armh, proto,
-			   nv50_dp_bpc_to_depth(armh->or.bpc));
+	mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
+			   nv50_dp_bpc_to_depth(asyh->or.bpc));
 
 	msto->mstc = mstc;
 	mstm->modified = true;
 }
 
 static void
-nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nv50_msto *msto = nv50_msto(encoder);
 	struct nv50_mstc *mstc = msto->mstc;
@@ -1119,8 +1151,8 @@ nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 
 static const struct drm_encoder_helper_funcs
 nv50_msto_help = {
-	.atomic_disable = nv50_msto_disable,
-	.atomic_enable = nv50_msto_enable,
+	.atomic_disable = nv50_msto_atomic_disable,
+	.atomic_enable = nv50_msto_atomic_enable,
 	.atomic_check = nv50_msto_atomic_check,
 };
 
@@ -1616,43 +1648,38 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
 }
 
 static void
-nv50_sor_disable(struct drm_encoder *encoder,
-		 struct drm_atomic_state *state)
+nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-	struct nouveau_connector *nv_connector =
-		nv50_outp_get_old_connector(nv_encoder, state);
-
-	nv_encoder->crtc = NULL;
-
-	if (nv_crtc) {
-		struct drm_dp_aux *aux = &nv_connector->aux;
-		u8 pwr;
+	struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
+	struct drm_dp_aux *aux = &nv_connector->aux;
+	u8 pwr;
 
-		if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-			int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+		int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
 
-			if (ret == 0) {
-				pwr &= ~DP_SET_POWER_MASK;
-				pwr |=  DP_SET_POWER_D3;
-				drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
-			}
+		if (ret == 0) {
+			pwr &= ~DP_SET_POWER_MASK;
+			pwr |=  DP_SET_POWER_D3;
+			drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
 		}
-
-		nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
-		nv50_audio_disable(encoder, nv_crtc);
-		nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
-		nv50_outp_release(nv_encoder);
 	}
+
+	nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+	nv50_audio_disable(encoder, nv_crtc);
+	nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+	nv50_outp_release(nv_encoder);
+	nv_encoder->crtc = NULL;
 }
 
 static void
-nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+	struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+	struct nv50_head_atom *asyh =
+		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
 	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
 	struct {
 		struct nv50_disp_mthd_v1 base;
@@ -1672,8 +1699,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 	u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
 	u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
 
-	nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
-	nv_encoder->crtc = encoder->crtc;
+	nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
+	nv_encoder->crtc = &nv_crtc->base;
 
 	if ((disp->disp->object.oclass == GT214_DISP ||
 	     disp->disp->object.oclass >= GF110_DISP) &&
@@ -1699,7 +1726,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 			proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
 		}
 
-		nv50_hdmi_enable(&nv_encoder->base.base, state, mode);
+		nv50_hdmi_enable(&nv_encoder->base.base, nv_crtc, nv_connector, state, mode);
 		break;
 	case DCB_OUTPUT_LVDS:
 		proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
@@ -1740,7 +1767,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 		else
 			proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
 
-		nv50_audio_enable(encoder, state, mode);
+		nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
 		break;
 	default:
 		BUG();
@@ -1753,8 +1780,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 static const struct drm_encoder_helper_funcs
 nv50_sor_help = {
 	.atomic_check = nv50_outp_atomic_check,
-	.atomic_enable = nv50_sor_enable,
-	.atomic_disable = nv50_sor_disable,
+	.atomic_enable = nv50_sor_atomic_enable,
+	.atomic_disable = nv50_sor_atomic_disable,
 };
 
 static void
@@ -1821,6 +1848,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 	drm_connector_attach_encoder(connector, encoder);
 
 	disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+	nv50_outp_dump_caps(drm, nv_encoder);
 
 	if (dcbe->type == DCB_OUTPUT_DP) {
 		struct nvkm_i2c_aux *aux =
@@ -1875,23 +1903,24 @@ nv50_pior_atomic_check(struct drm_encoder *encoder,
 }
 
 static void
-nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 	const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
-	if (nv_encoder->crtc)
-		core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+	core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
 	nv_encoder->crtc = NULL;
 	nv50_outp_release(nv_encoder);
 }
 
 static void
-nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+	struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+	struct nv50_head_atom *asyh =
+		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 	u32 ctrl = 0;
 
@@ -1929,8 +1958,8 @@ nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 static const struct drm_encoder_helper_funcs
 nv50_pior_help = {
 	.atomic_check = nv50_pior_atomic_check,
-	.atomic_enable = nv50_pior_enable,
-	.atomic_disable = nv50_pior_disable,
+	.atomic_enable = nv50_pior_atomic_enable,
+	.atomic_disable = nv50_pior_atomic_disable,
 };
 
 static void
@@ -1991,6 +2020,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
 	drm_connector_attach_encoder(connector, encoder);
 
 	disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+	nv50_outp_dump_caps(drm, nv_encoder);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 537c1ef2e4646e88518c6f3ed73e905c566c274b..ec361d17e900bec9df47f826e034f17561344cf3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -503,7 +503,6 @@ nv50_head_destroy(struct drm_crtc *crtc)
 static const struct drm_crtc_funcs
 nv50_head_func = {
 	.reset = nv50_head_reset,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.destroy = nv50_head_destroy,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
@@ -518,7 +517,6 @@ nv50_head_func = {
 static const struct drm_crtc_funcs
 nvd9_head_func = {
 	.reset = nv50_head_reset,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.destroy = nv50_head_destroy,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 8f860e9c52247c8a178393820148a09d4b9074b7..85648d790743f02a69c8bff402116e8cfd765b04 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -322,7 +322,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 	const int i = head->base.index;
 	int ret;
 
-	if ((ret = PUSH_WAIT(push, 14)))
+	if ((ret = PUSH_WAIT(push, 13)))
 		return ret;
 
 	PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
@@ -353,14 +353,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 	PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
 		  NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
 		  NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
-		  NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0),
-
-				HEAD_SET_CRC_CONTROL(i),
-		  NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
-		  NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
-		  NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
-		  NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE) |
-		  NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE));
+		  NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
 
 	PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
 		  NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 57d4f457a7d4ad03b4bd998a1ef1777923ebab83..0b86c44878e0c85c32bf11b20ab518642775e5c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -60,37 +60,33 @@ struct nv_device_time_v0 {
 
 #define NV_DEVICE_INFO_UNIT                               (0xffffffffULL << 32)
 #define NV_DEVICE_INFO(n)                          ((n) | (0x00000000ULL << 32))
-#define NV_DEVICE_FIFO(n)                          ((n) | (0x00000001ULL << 32))
+#define NV_DEVICE_HOST(n)                          ((n) | (0x00000001ULL << 32))
 
-/* This will be returned for unsupported queries. */
+/* This will be returned in the mthd field for unsupported queries. */
 #define NV_DEVICE_INFO_INVALID                                           ~0ULL
 
-/* These return a mask of available engines of particular type. */
-#define NV_DEVICE_INFO_ENGINE_SW                     NV_DEVICE_INFO(0x00000000)
-#define NV_DEVICE_INFO_ENGINE_GR                     NV_DEVICE_INFO(0x00000001)
-#define NV_DEVICE_INFO_ENGINE_MPEG                   NV_DEVICE_INFO(0x00000002)
-#define NV_DEVICE_INFO_ENGINE_ME                     NV_DEVICE_INFO(0x00000003)
-#define NV_DEVICE_INFO_ENGINE_CIPHER                 NV_DEVICE_INFO(0x00000004)
-#define NV_DEVICE_INFO_ENGINE_BSP                    NV_DEVICE_INFO(0x00000005)
-#define NV_DEVICE_INFO_ENGINE_VP                     NV_DEVICE_INFO(0x00000006)
-#define NV_DEVICE_INFO_ENGINE_CE                     NV_DEVICE_INFO(0x00000007)
-#define NV_DEVICE_INFO_ENGINE_SEC                    NV_DEVICE_INFO(0x00000008)
-#define NV_DEVICE_INFO_ENGINE_MSVLD                  NV_DEVICE_INFO(0x00000009)
-#define NV_DEVICE_INFO_ENGINE_MSPDEC                 NV_DEVICE_INFO(0x0000000a)
-#define NV_DEVICE_INFO_ENGINE_MSPPP                  NV_DEVICE_INFO(0x0000000b)
-#define NV_DEVICE_INFO_ENGINE_MSENC                  NV_DEVICE_INFO(0x0000000c)
-#define NV_DEVICE_INFO_ENGINE_VIC                    NV_DEVICE_INFO(0x0000000d)
-#define NV_DEVICE_INFO_ENGINE_SEC2                   NV_DEVICE_INFO(0x0000000e)
-#define NV_DEVICE_INFO_ENGINE_NVDEC                  NV_DEVICE_INFO(0x0000000f)
-#define NV_DEVICE_INFO_ENGINE_NVENC                  NV_DEVICE_INFO(0x00000010)
-
+/* Returns the number of available runlists. */
+#define NV_DEVICE_HOST_RUNLISTS                       NV_DEVICE_HOST(0x00000000)
 /* Returns the number of available channels. */
-#define NV_DEVICE_FIFO_CHANNELS                      NV_DEVICE_FIFO(0x00000000)
-
-/* Returns a mask of available runlists. */
-#define NV_DEVICE_FIFO_RUNLISTS                      NV_DEVICE_FIFO(0x00000001)
+#define NV_DEVICE_HOST_CHANNELS                       NV_DEVICE_HOST(0x00000001)
 
-/* These return a mask of engines available on a particular runlist. */
-#define NV_DEVICE_FIFO_RUNLIST_ENGINES(n)     ((n) + NV_DEVICE_FIFO(0x00000010))
-#define NV_DEVICE_FIFO_RUNLIST_ENGINES__SIZE                                64
+/* Returns a mask of available engine types on runlist(data). */
+#define NV_DEVICE_HOST_RUNLIST_ENGINES                NV_DEVICE_HOST(0x00000100)
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SW                            0x00000001
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_GR                            0x00000002
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MPEG                          0x00000004
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_ME                            0x00000008
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_CIPHER                        0x00000010
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_BSP                           0x00000020
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_VP                            0x00000040
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_CE                            0x00000080
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC                           0x00000100
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD                         0x00000200
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC                        0x00000400
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP                         0x00000800
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSENC                         0x00001000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_VIC                           0x00002000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2                          0x00004000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC                         0x00008000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC                         0x00010000
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/fifo.h b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
index e9468c9f9abfddaba36057fc93baa34d2b69cdc6..d351ac890ca1a6c8edc060ab6ae289dad95df328 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
@@ -2,15 +2,15 @@
 #define __NVIF_FIFO_H__
 #include <nvif/device.h>
 
-/* Returns mask of runlists that support a NV_DEVICE_INFO_ENGINE_* type. */
+/* Returns mask of runlists that support a NV_DEVICE_INFO_RUNLIST_ENGINES_* type. */
 u64 nvif_fifo_runlist(struct nvif_device *, u64 engine);
 
 /* CE-supporting runlists (excluding GRCE, if others exist). */
 static inline u64
 nvif_fifo_runlist_ce(struct nvif_device *device)
 {
-	u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
-	u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_CE);
+	u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
+	u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_CE);
 	if (runmce && !(runmce &= ~runmgr))
 		runmce = runmgr;
 	return runmce;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index c920939a1467946c1c2281a1798d92dda5dc8623..a18b6cfda07e4a605d0b6e32d798b69b667a3e64 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -3,79 +3,7 @@
 #define __NVKM_DEVICE_H__
 #include <core/oclass.h>
 #include <core/event.h>
-
-enum nvkm_devidx {
-	NVKM_SUBDEV_PCI,
-	NVKM_SUBDEV_VBIOS,
-	NVKM_SUBDEV_DEVINIT,
-	NVKM_SUBDEV_TOP,
-	NVKM_SUBDEV_IBUS,
-	NVKM_SUBDEV_GPIO,
-	NVKM_SUBDEV_I2C,
-	NVKM_SUBDEV_FUSE,
-	NVKM_SUBDEV_MXM,
-	NVKM_SUBDEV_MC,
-	NVKM_SUBDEV_BUS,
-	NVKM_SUBDEV_TIMER,
-	NVKM_SUBDEV_INSTMEM,
-	NVKM_SUBDEV_FB,
-	NVKM_SUBDEV_LTC,
-	NVKM_SUBDEV_MMU,
-	NVKM_SUBDEV_BAR,
-	NVKM_SUBDEV_FAULT,
-	NVKM_SUBDEV_ACR,
-	NVKM_SUBDEV_PMU,
-	NVKM_SUBDEV_VOLT,
-	NVKM_SUBDEV_ICCSENSE,
-	NVKM_SUBDEV_THERM,
-	NVKM_SUBDEV_CLK,
-	NVKM_SUBDEV_GSP,
-
-	NVKM_ENGINE_BSP,
-
-	NVKM_ENGINE_CE0,
-	NVKM_ENGINE_CE1,
-	NVKM_ENGINE_CE2,
-	NVKM_ENGINE_CE3,
-	NVKM_ENGINE_CE4,
-	NVKM_ENGINE_CE5,
-	NVKM_ENGINE_CE6,
-	NVKM_ENGINE_CE7,
-	NVKM_ENGINE_CE8,
-	NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE8,
-
-	NVKM_ENGINE_CIPHER,
-	NVKM_ENGINE_DISP,
-	NVKM_ENGINE_DMAOBJ,
-	NVKM_ENGINE_FIFO,
-	NVKM_ENGINE_GR,
-	NVKM_ENGINE_IFB,
-	NVKM_ENGINE_ME,
-	NVKM_ENGINE_MPEG,
-	NVKM_ENGINE_MSENC,
-	NVKM_ENGINE_MSPDEC,
-	NVKM_ENGINE_MSPPP,
-	NVKM_ENGINE_MSVLD,
-
-	NVKM_ENGINE_NVENC0,
-	NVKM_ENGINE_NVENC1,
-	NVKM_ENGINE_NVENC2,
-	NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
-
-	NVKM_ENGINE_NVDEC0,
-	NVKM_ENGINE_NVDEC1,
-	NVKM_ENGINE_NVDEC2,
-	NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
-
-	NVKM_ENGINE_PM,
-	NVKM_ENGINE_SEC,
-	NVKM_ENGINE_SEC2,
-	NVKM_ENGINE_SW,
-	NVKM_ENGINE_VIC,
-	NVKM_ENGINE_VP,
-
-	NVKM_SUBDEV_NR
-};
+enum nvkm_subdev_type;
 
 enum nvkm_device_type {
 	NVKM_DEVICE_PCI,
@@ -102,7 +30,6 @@ struct nvkm_device {
 
 	struct nvkm_event event;
 
-	u64 disable_mask;
 	u32 debug;
 
 	const struct nvkm_device_chip *chip;
@@ -130,58 +57,16 @@ struct nvkm_device {
 		struct notifier_block nb;
 	} acpi;
 
-	struct nvkm_acr *acr;
-	struct nvkm_bar *bar;
-	struct nvkm_bios *bios;
-	struct nvkm_bus *bus;
-	struct nvkm_clk *clk;
-	struct nvkm_devinit *devinit;
-	struct nvkm_fault *fault;
-	struct nvkm_fb *fb;
-	struct nvkm_fuse *fuse;
-	struct nvkm_gpio *gpio;
-	struct nvkm_gsp *gsp;
-	struct nvkm_i2c *i2c;
-	struct nvkm_subdev *ibus;
-	struct nvkm_iccsense *iccsense;
-	struct nvkm_instmem *imem;
-	struct nvkm_ltc *ltc;
-	struct nvkm_mc *mc;
-	struct nvkm_mmu *mmu;
-	struct nvkm_subdev *mxm;
-	struct nvkm_pci *pci;
-	struct nvkm_pmu *pmu;
-	struct nvkm_therm *therm;
-	struct nvkm_timer *timer;
-	struct nvkm_top *top;
-	struct nvkm_volt *volt;
-
-	struct nvkm_engine *bsp;
-	struct nvkm_engine *ce[9];
-	struct nvkm_engine *cipher;
-	struct nvkm_disp *disp;
-	struct nvkm_dma *dma;
-	struct nvkm_fifo *fifo;
-	struct nvkm_gr *gr;
-	struct nvkm_engine *ifb;
-	struct nvkm_engine *me;
-	struct nvkm_engine *mpeg;
-	struct nvkm_engine *msenc;
-	struct nvkm_engine *mspdec;
-	struct nvkm_engine *msppp;
-	struct nvkm_engine *msvld;
-	struct nvkm_nvenc *nvenc[3];
-	struct nvkm_nvdec *nvdec[3];
-	struct nvkm_pm *pm;
-	struct nvkm_engine *sec;
-	struct nvkm_sec2 *sec2;
-	struct nvkm_sw *sw;
-	struct nvkm_engine *vic;
-	struct nvkm_engine *vp;
+#define NVKM_LAYOUT_ONCE(type,data,ptr) data *ptr;
+#define NVKM_LAYOUT_INST(type,data,ptr,cnt) data *ptr[cnt];
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
+	struct list_head subdev;
 };
 
-struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
-struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
 
 struct nvkm_device_func {
 	struct nvkm_device_pci *(*pci)(struct nvkm_device *);
@@ -202,55 +87,15 @@ struct nvkm_device_quirk {
 
 struct nvkm_device_chip {
 	const char *name;
-
-	int (*acr     )(struct nvkm_device *, int idx, struct nvkm_acr **);
-	int (*bar     )(struct nvkm_device *, int idx, struct nvkm_bar **);
-	int (*bios    )(struct nvkm_device *, int idx, struct nvkm_bios **);
-	int (*bus     )(struct nvkm_device *, int idx, struct nvkm_bus **);
-	int (*clk     )(struct nvkm_device *, int idx, struct nvkm_clk **);
-	int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
-	int (*fault   )(struct nvkm_device *, int idx, struct nvkm_fault **);
-	int (*fb      )(struct nvkm_device *, int idx, struct nvkm_fb **);
-	int (*fuse    )(struct nvkm_device *, int idx, struct nvkm_fuse **);
-	int (*gpio    )(struct nvkm_device *, int idx, struct nvkm_gpio **);
-	int (*gsp     )(struct nvkm_device *, int idx, struct nvkm_gsp **);
-	int (*i2c     )(struct nvkm_device *, int idx, struct nvkm_i2c **);
-	int (*ibus    )(struct nvkm_device *, int idx, struct nvkm_subdev **);
-	int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **);
-	int (*imem    )(struct nvkm_device *, int idx, struct nvkm_instmem **);
-	int (*ltc     )(struct nvkm_device *, int idx, struct nvkm_ltc **);
-	int (*mc      )(struct nvkm_device *, int idx, struct nvkm_mc **);
-	int (*mmu     )(struct nvkm_device *, int idx, struct nvkm_mmu **);
-	int (*mxm     )(struct nvkm_device *, int idx, struct nvkm_subdev **);
-	int (*pci     )(struct nvkm_device *, int idx, struct nvkm_pci **);
-	int (*pmu     )(struct nvkm_device *, int idx, struct nvkm_pmu **);
-	int (*therm   )(struct nvkm_device *, int idx, struct nvkm_therm **);
-	int (*timer   )(struct nvkm_device *, int idx, struct nvkm_timer **);
-	int (*top     )(struct nvkm_device *, int idx, struct nvkm_top **);
-	int (*volt    )(struct nvkm_device *, int idx, struct nvkm_volt **);
-
-	int (*bsp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*ce[9]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*cipher  )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*disp    )(struct nvkm_device *, int idx, struct nvkm_disp **);
-	int (*dma     )(struct nvkm_device *, int idx, struct nvkm_dma **);
-	int (*fifo    )(struct nvkm_device *, int idx, struct nvkm_fifo **);
-	int (*gr      )(struct nvkm_device *, int idx, struct nvkm_gr **);
-	int (*ifb     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*me      )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*mpeg    )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*msenc   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*mspdec  )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
-	int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
-	int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
-	int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*sec2    )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
-	int (*sw      )(struct nvkm_device *, int idx, struct nvkm_sw **);
-	int (*vic     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*vp      )(struct nvkm_device *, int idx, struct nvkm_engine **);
+#define NVKM_LAYOUT_ONCE(type,data,ptr,...)                                                  \
+	struct {                                                                             \
+		u32 inst;                                                                    \
+		int (*ctor)(struct nvkm_device *, enum nvkm_subdev_type, int inst, data **); \
+	} ptr;
+#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
 };
 
 struct nvkm_device *nvkm_device_find(u64 name);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index c6b401a6ea236e1a66366a5c26e0fffdfcf54f04..e58923b67d7435abe9134cc76744d97d04c35581 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -6,12 +6,18 @@
 struct nvkm_fifo_chan;
 struct nvkm_fb_tile;
 
+extern const struct nvkm_subdev_func nvkm_engine;
+
 struct nvkm_engine {
 	const struct nvkm_engine_func *func;
 	struct nvkm_subdev subdev;
 	spinlock_t lock;
 
-	int usecount;
+	struct {
+		refcount_t refcount;
+		struct mutex mutex;
+		bool enabled;
+	} use;
 };
 
 struct nvkm_engine_func {
@@ -42,9 +48,10 @@ struct nvkm_engine_func {
 };
 
 int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
-		     int index, bool enable, struct nvkm_engine *);
+		     enum nvkm_subdev_type, int inst, bool enable, struct nvkm_engine *);
 int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
-		     int index, bool enable, struct nvkm_engine **);
+		     enum nvkm_subdev_type, int, bool enable, struct nvkm_engine **);
+
 struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
 void nvkm_engine_unref(struct nvkm_engine **);
 void nvkm_engine_tile(struct nvkm_engine *, int region);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index ce98efd4b209f5f0e814599c25c8e1ea2ca2b2e1..070462be35d9668381935df55fcc058d1d88ee4c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -8,6 +8,7 @@ struct nvkm_enum {
 	const char *name;
 	const void *data;
 	u32 data2;
+	int inst;
 };
 
 const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
index 3981cb106aae7db6b7920806d899e41c6be2d8ea..fd9a3f9a518e8dae1ef0c2272679032b4e60f9dd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -21,11 +21,11 @@ void nvkm_falcon_v1_disable(struct nvkm_falcon *);
 void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
 int gp102_sec2_flcn_enable(struct nvkm_falcon *);
 
-#define FLCN_PRINTK(t,f,fmt,a...) do {                                         \
-	if (nvkm_subdev_name[(f)->owner->index] != (f)->name)                  \
-		nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a);           \
-	else                                                                   \
-		nvkm_##t((f)->owner, fmt"\n", ##a);                            \
+#define FLCN_PRINTK(t,f,fmt,a...) do {                               \
+	if ((f)->owner->name != (f)->name)                           \
+		nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
+	else                                                         \
+		nvkm_##t((f)->owner, fmt"\n", ##a);                  \
 } while(0)
 #define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
 #define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
new file mode 100644
index 0000000000000000000000000000000000000000..7afe1579b20f433b26771791095f06d3c9bc4c69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI     , struct nvkm_pci     ,      pci)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS   , struct nvkm_bios    ,     bios)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_DEVINIT , struct nvkm_devinit ,  devinit)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP     , struct nvkm_top     ,      top)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PRIVRING, struct nvkm_subdev  , privring)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GPIO    , struct nvkm_gpio    ,     gpio)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_I2C     , struct nvkm_i2c     ,      i2c)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FUSE    , struct nvkm_fuse    ,     fuse)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MXM     , struct nvkm_subdev  ,      mxm)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MC      , struct nvkm_mc      ,       mc)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BUS     , struct nvkm_bus     ,      bus)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TIMER   , struct nvkm_timer   ,    timer)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_INSTMEM , struct nvkm_instmem ,     imem)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FB      , struct nvkm_fb      ,       fb)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_LTC     , struct nvkm_ltc     ,      ltc)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MMU     , struct nvkm_mmu     ,      mmu)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BAR     , struct nvkm_bar     ,      bar)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FAULT   , struct nvkm_fault   ,    fault)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ACR     , struct nvkm_acr     ,      acr)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PMU     , struct nvkm_pmu     ,      pmu)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VOLT    , struct nvkm_volt    ,     volt)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ICCSENSE, struct nvkm_iccsense, iccsense)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_THERM   , struct nvkm_therm   ,    therm)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_CLK     , struct nvkm_clk     ,      clk)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP     , struct nvkm_gsp     ,      gsp)
+NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL  , struct nvkm_subdev  ,   ioctrl, 3)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA     , struct nvkm_subdev  ,      fla)
+
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP     , struct nvkm_engine  ,      bsp)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE      , struct nvkm_engine  ,       ce, 10)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER  , struct nvkm_engine  ,   cipher)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP    , struct nvkm_disp    ,     disp)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ  , struct nvkm_dma     ,      dma)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_FIFO    , struct nvkm_fifo    ,     fifo)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_GR      , struct nvkm_gr      ,       gr)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_IFB     , struct nvkm_engine  ,      ifb)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_ME      , struct nvkm_engine  ,       me)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MPEG    , struct nvkm_engine  ,     mpeg)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC   , struct nvkm_engine  ,    msenc)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC  , struct nvkm_engine  ,   mspdec)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP   , struct nvkm_engine  ,    msppp)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD   , struct nvkm_engine  ,    msvld)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC   , struct nvkm_nvdec   ,    nvdec, 5)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC   , struct nvkm_nvenc   ,    nvenc, 3)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG   , struct nvkm_engine  ,    nvjpg)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA     , struct nvkm_engine  ,      ofa)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM      , struct nvkm_pm      ,       pm)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC     , struct nvkm_engine  ,      sec)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2    , struct nvkm_sec2    ,     sec2)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW      , struct nvkm_sw      ,       sw)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_VIC     , struct nvkm_engine  ,      vic)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_VP      , struct nvkm_engine  ,       vp)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 76288c682e9eaed70332e5ce0a19bf47b0faace2..1665738948fb474127ec67d3b9ea64653c94b829 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -3,13 +3,25 @@
 #define __NVKM_SUBDEV_H__
 #include <core/device.h>
 
+enum nvkm_subdev_type {
+#define NVKM_LAYOUT_ONCE(t,s,p,...) t,
+#define NVKM_LAYOUT_INST NVKM_LAYOUT_ONCE
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
+	NVKM_SUBDEV_NR
+};
+
 struct nvkm_subdev {
 	const struct nvkm_subdev_func *func;
 	struct nvkm_device *device;
-	enum nvkm_devidx index;
-	struct mutex mutex;
+	enum nvkm_subdev_type type;
+	int inst;
+	char name[16];
 	u32 debug;
+	struct list_head head;
 
+	void **pself;
 	bool oneinit;
 };
 
@@ -23,11 +35,12 @@ struct nvkm_subdev_func {
 	void (*intr)(struct nvkm_subdev *);
 };
 
-extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
-int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
-		     int index, struct nvkm_subdev **);
+extern const char *nvkm_subdev_type[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		     int inst, struct nvkm_subdev **);
 void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
-		      int index, struct nvkm_subdev *);
+		      enum nvkm_subdev_type, int inst, struct nvkm_subdev *);
+void nvkm_subdev_disable(struct nvkm_device *, enum nvkm_subdev_type, int inst);
 void nvkm_subdev_del(struct nvkm_subdev **);
 int  nvkm_subdev_preinit(struct nvkm_subdev *);
 int  nvkm_subdev_init(struct nvkm_subdev *);
@@ -38,10 +51,8 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
 /* subdev logging */
 #define nvkm_printk_(s,l,p,f,a...) do {                                        \
 	const struct nvkm_subdev *_subdev = (s);                               \
-	if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) {            \
-		dev_##p(_subdev->device->dev, "%s: "f,                         \
-			nvkm_subdev_name[_subdev->index], ##a);                \
-	}                                                                      \
+	if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l))              \
+		dev_##p(_subdev->device->dev, "%s: "f, _subdev->name, ##a);    \
 } while(0)
 #define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
 #define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL,   crit, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index f938f024db8107e95ddc3d6925bc13231943d616..d5530faf025e2306d720dc6ac2b7160c4a6a4fdc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -2,5 +2,5 @@
 #ifndef __NVKM_BSP_H__
 #define __NVKM_BSP_H__
 #include <engine/xtensa.h>
-int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_bsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 86f420f4630b28320f17960000ff616eab9399fc..cfd2da8e66fed60efd4a136bfedbe54d452aa5b1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -3,13 +3,13 @@
 #define __NVKM_CE_H__
 #include <engine/falcon.h>
 
-int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int tu102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gm107_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gm200_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gp102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gv100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int tu102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 66c5c5e275208f51157e411cf30047dd6d5309d5..9da9176bbbbf23c3659ebedd4d7b8f79d705c0f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -2,5 +2,5 @@
 #ifndef __NVKM_CIPHER_H__
 #define __NVKM_CIPHER_H__
 #include <core/engine.h>
-int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_cipher_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 0f6fa6631a197aaa7f26479e5bf24192678a0091..d08d3337ba0d0ea1cc8aded5bdf3f8e948250001 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -17,25 +17,28 @@ struct nvkm_disp {
 	struct nvkm_event hpd;
 	struct nvkm_event vblank;
 
-	struct nvkm_oproxy *client;
+	struct {
+		spinlock_t lock;
+		struct nvkm_oproxy *object;
+	} client;
 };
 
-int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int mcp77_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int mcp89_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv04_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int mcp77_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int mcp89_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gm200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 2e12cdb6bb9357f0a77d84563a7003b07d8b38c9..a003da39fd13d270dc28153cef35446cca8b8a0a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -23,9 +23,9 @@ struct nvkm_dma {
 
 struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object);
 
-int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gv100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv04_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gv100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 27c1f868552cdb4380d0904cfda06eab792c725c..306125d17ecedc019e8c24bbcc8c5b52cd224e28 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -64,7 +64,7 @@ int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
 void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
 
 int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
-		     int index, bool enable, u32 addr, struct nvkm_engine **);
+		     enum nvkm_subdev_type, int inst, bool enable, u32 addr, struct nvkm_engine **);
 
 struct nvkm_falcon_func {
 	struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index b335f3a1e66da0810378c486b0d76230dee6d213..54fab7cc36c1b84ba925ffa78a846fe0918a02bc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -7,6 +7,7 @@
 struct nvkm_fault_data;
 
 #define NVKM_FIFO_CHID_NR 4096
+#define NVKM_FIFO_ENGN_NR 16
 
 struct nvkm_fifo_engn {
 	struct nvkm_object *object;
@@ -17,7 +18,7 @@ struct nvkm_fifo_engn {
 struct nvkm_fifo_chan {
 	const struct nvkm_fifo_chan_func *func;
 	struct nvkm_fifo *fifo;
-	u64 engines;
+	u32 engm;
 	struct nvkm_object object;
 
 	struct list_head head;
@@ -29,7 +30,7 @@ struct nvkm_fifo_chan {
 	u64 addr;
 	u32 size;
 
-	struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
+	struct nvkm_fifo_engn engn[NVKM_FIFO_ENGN_NR];
 };
 
 struct nvkm_fifo {
@@ -40,6 +41,7 @@ struct nvkm_fifo {
 	int nr;
 	struct list_head chan;
 	spinlock_t lock;
+	struct mutex mutex;
 
 	struct nvkm_event uevent; /* async user trigger */
 	struct nvkm_event cevent; /* channel creation event */
@@ -57,22 +59,22 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
 struct nvkm_fifo_chan *
 nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
 
-int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk110_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int tu102_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv04_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk110_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm107_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm200_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 1530c81f86a28548b0db6eea8092faf40b4c7e42..b28b752ffaa255db083aacf9ae500e183b22983e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -14,44 +14,44 @@ int nvkm_gr_ctxsw_pause(struct nvkm_device *);
 int nvkm_gr_ctxsw_resume(struct nvkm_device *);
 u32 nvkm_gr_ctxsw_inst(struct nvkm_device *);
 
-int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv04_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 8585a31f59432725feced87d248021192fa1be71..f137f277935f5be81e3c522134fa23493ac83e76 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -2,9 +2,9 @@
 #ifndef __NVKM_MPEG_H__
 #define __NVKM_MPEG_H__
 #include <core/engine.h>
-int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv31_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 83bb2fcb2cbf5831a04a71d4a10661e922d290c0..ac8f08ce183ce3782c28d3fef6811c6f9f2bc530 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -2,8 +2,8 @@
 #ifndef __NVKM_MSPDEC_H__
 #define __NVKM_MSPDEC_H__
 #include <engine/falcon.h>
-int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index 69e09fd96e0cbb35ab8f56d1c08cc2afac338157..81c2b6f0ad845547ce4107c9a608c929d4c7af59 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -2,7 +2,7 @@
 #ifndef __NVKM_MSPPP_H__
 #define __NVKM_MSPPP_H__
 #include <engine/falcon.h>
-int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 9e11cefc9649d28095d3aa53b62935d89f8377f5..2d5fa961ba663f2094042f17362c47dd31f6c05c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -2,9 +2,9 @@
 #ifndef __NVKM_MSVLD_H__
 #define __NVKM_MSVLD_H__
 #include <engine/falcon.h>
-int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 1b3183e31606823a7d5c35f0926b6e13d1843a8d..97bd3092f68a6c520c6c13258e53726fbd757f3d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -11,5 +11,5 @@ struct nvkm_nvdec {
 	struct nvkm_falcon falcon;
 };
 
-int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 33e6ba8adc8dc7ab0972349b004657ddf70f207d..1a259c5c9a7140e12be1e5c130cc255fe9ea074e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -11,5 +11,5 @@ struct nvkm_nvenc {
 	struct nvkm_falcon falcon;
 };
 
-int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
+int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 4d754e7650d9c2263b38e077e1d045c620836a5b..af89d46ea3603ad3423bda116b86d0585e8e014f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -7,20 +7,23 @@ struct nvkm_pm {
 	const struct nvkm_pm_func *func;
 	struct nvkm_engine engine;
 
-	struct nvkm_object *perfmon;
+	struct {
+		spinlock_t lock;
+		struct nvkm_object *object;
+	} client;
 
 	struct list_head domains;
 	struct list_head sources;
 	u32 sequence;
 };
 
-int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index f14e98a8a0cafdbd611af97159f52ec198b78d86..37ed7ab8d050ae346de152cf58937d119ffb07a0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -2,5 +2,5 @@
 #ifndef __NVKM_SEC_H__
 #define __NVKM_SEC_H__
 #include <engine/falcon.h>
-int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_sec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 34dc765648d56f371b024f8ca4448701b85fcbea..06264c840eae6a55ca7063d40fcbef3b66a2bf28 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -18,7 +18,7 @@ struct nvkm_sec2 {
 	bool initmsg_received;
 };
 
-int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
-int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
-int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
+int tu102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index 2e91769e3ee29f7779c4cb667e42a3ce4175e6ac..b1a53ffbfdef6afbf2a5e4cc3916074c883f3c90 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -12,8 +12,8 @@ struct nvkm_sw {
 
 bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
 
-int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv04_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 8984415b2a3d5ed24417265fbf9abaebc1862281..1bab268585381fce7d3ce23661e894105fd0dd3a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -2,5 +2,5 @@
 #ifndef __NVKM_VP_H__
 #define __NVKM_VP_H__
 #include <engine/xtensa.h>
-int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_vp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index fbf27b2293a9f9c1200bf3b217bc51ecf661e7b7..3083a5866a5575ba43d5f87023bc7b8394e1fbd0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -13,7 +13,7 @@ struct nvkm_xtensa {
 };
 
 int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
-		     int index, bool enable, u32 addr, struct nvkm_engine **);
+		     enum nvkm_subdev_type, int, bool enable, u32 addr, struct nvkm_engine **);
 
 struct nvkm_xtensa_func {
 	u32 fifo_val;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
index 836d8b93282296718f120df97ef2a60070db1d5e..c0b254f7f0b5b4b828cc23340f00da465c7f46b3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -59,12 +59,12 @@ struct nvkm_acr {
 bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
 int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
 
-int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm200_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
 
 struct nvkm_acr_lsfw {
 	const struct nvkm_acr_lsf_func *func;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index 14b09f7e46a579a6940b0efc7a518653f39e07bc..4f07836ab984e74636d8943b3f9e1700f054e2fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -23,11 +23,11 @@ void nvkm_bar_bar2_reset(struct nvkm_device *);
 struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
 void nvkm_bar_flush(struct nvkm_bar *);
 
-int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int tu102_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int nv50_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int tu102_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index f2860f8e0c2e111f3e2a12c5f77933479ef74897..b61cfb077533ac5c77a0cb9a3136db89e4ee138f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -30,5 +30,5 @@ u8  nvbios_rd08(struct nvkm_bios *, u32 addr);
 u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
 u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
 
-int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
+int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index f5f59261ea819f694b3999029f0e1068cbb99c63..d1beaad0c82b624bc7e909ca49483b07e02cf64b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -14,6 +14,7 @@ enum dcb_connector_type {
 	DCB_CONNECTOR_LVDS_SPWG = 0x41,
 	DCB_CONNECTOR_DP = 0x46,
 	DCB_CONNECTOR_eDP = 0x47,
+	DCB_CONNECTOR_mDP = 0x48,
 	DCB_CONNECTOR_HDMI_0 = 0x60,
 	DCB_CONNECTOR_HDMI_1 = 0x61,
 	DCB_CONNECTOR_HDMI_C = 0x63,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index ae9ad6c034fbddf9c82f4214daad600754e687d7..2ac03bbc6133d5beb2c0bfc245eb6e456c7b0856 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -18,9 +18,9 @@ void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
 void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *);
 void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
 
-int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv04_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index bf937e7dfd779ba423aa79b12f85dd89af535996..05b99c9e9a26abe1ecc1da2e79a95aedf62730b1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -125,14 +125,14 @@ int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
 int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
 int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
 
-int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gm20b_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv04_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 50cc7c05eac49c64587bc3c90b24994e82505566..848b5d9ce7055f4340960f92a1f252828d966474 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -14,23 +14,22 @@ struct nvkm_devinit {
 u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
 int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
 void nvkm_devinit_meminit(struct nvkm_devinit *);
-u64 nvkm_devinit_disable(struct nvkm_devinit *);
-int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
+int nvkm_devinit_post(struct nvkm_devinit *);
 
-int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv04_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gm200_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gv100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int tu102_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index a513c16ab10569a595eb573e60ee9687aac26b65..581458ad38e064b6e9ffaf9fe40d7c7e40305ae2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -30,8 +30,8 @@ struct nvkm_fault_data {
 	u8 reason;
 };
 
-int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int gv100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int tu102_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 2ecd52aec1d121d050844e1e2e8b425c64c54aee..ef6a6297148c13200ca3f80b7b9fe33340c28c38 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -36,7 +36,11 @@ struct nvkm_fb {
 	struct nvkm_blob vpr_scrubber;
 
 	struct nvkm_ram *ram;
-	struct nvkm_mm tags;
+
+	struct {
+		struct mutex mutex; /* protects mm and nvkm_memory::tags */
+		struct nvkm_mm mm;
+	} tags;
 
 	struct {
 		struct nvkm_fb_tile region[16];
@@ -54,40 +58,40 @@ void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
 void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
 void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
 
-int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv04_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gf108_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk110_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm200_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
@@ -128,6 +132,7 @@ struct nvkm_ram {
 #define NVKM_RAM_MM_MIXED  (NVKM_MM_HEAP_ANY + 3)
 	struct nvkm_mm vram;
 	u64 stolen;
+	struct mutex mutex;
 
 	int ranks;
 	int parts;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index 00111c34311e7794a5967d93b9537047e19e53b3..dabbef0ac96cbad423d57d92987d1b259c0692c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -11,7 +11,7 @@ struct nvkm_fuse {
 
 u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
 
-int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
-int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
-int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int nv50_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index cdcce5ece6ff5973426f791cb4f02fc27537a904..0e46ea1fe97291296bb73ab66d508b3674e13077 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -32,10 +32,10 @@ int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
 int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
 int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
 
-int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv10_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 06db67610a5002d90fbed4a962d215203b045f99..cf42a59d4e58f66264e7c4429d00bd2cc2819d4a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -9,5 +9,5 @@ struct nvkm_gsp {
 	struct nvkm_falcon falcon;
 };
 
-int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
+int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 640f649ce497e4a9c93f8fc5ad1940f4ba205125..146e13292203df767794823272d5849e4a5a53c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -85,15 +85,15 @@ struct nvkm_i2c {
 struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
 struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
 
-int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv04_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gm200_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
 
 static inline int
 nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
deleted file mode 100644
index db791411eaa8bfd2e7b91045d73a1633809bcd13..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_IBUS_H__
-#define __NVKM_IBUS_H__
-#include <core/subdev.h>
-
-int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index f483dcd7cd1c9405e6d984e39e28afc35a2a2978..7400d62dcbec49086a5176b27acdcf1c6ff7011a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -14,6 +14,6 @@ struct nvkm_iccsense {
 	u32 power_w_crit;
 };
 
-int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
+int gf100_iccsense_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **);
 int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index c74ab7c31d052a1f80afec4c16963272ce7d750b..f967b97d163c0a444bd4813fd41ce388aa3feafc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -13,6 +13,11 @@ struct nvkm_instmem {
 	struct list_head boot;
 	u32 reserved;
 
+	/* <=nv4x: protects NV_PRAMIN/BAR2 MM
+	 * >=nv50: protects BAR2 MM & LRU
+	 */
+	struct mutex mutex;
+
 	struct nvkm_memory *vbios;
 	struct nvkm_ramht  *ramht;
 	struct nvkm_memory *ramro;
@@ -25,8 +30,8 @@ int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
 		     struct nvkm_memory **);
 
 
-int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index d76f60d7d29a6924b32910211d02ca1c46b1c547..d32a326a9290740c26015654a7610a61f4c72848 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -13,6 +13,7 @@ struct nvkm_ltc {
 	u32 ltc_nr;
 	u32 lts_nr;
 
+	struct mutex mutex; /* serialises CBC operations */
 	u32 num_tags;
 	u32 tag_base;
 	struct nvkm_memory *tag_ram;
@@ -33,12 +34,11 @@ int nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *, int index, const u32);
 void nvkm_ltc_invalidate(struct nvkm_ltc *);
 void nvkm_ltc_flush(struct nvkm_ltc *);
 
-int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gf100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gm200_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp102_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index e45ca458396709be315edb7cf88c36604db96dc4..cb86a56e68d4f86f82537c2a9662cfd7ce4b3f16 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -8,29 +8,29 @@ struct nvkm_mc {
 	struct nvkm_subdev subdev;
 };
 
-void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
-void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
-bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx);
-void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_subdev_type, int);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_subdev_type, int);
+bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_subdev_type, int);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_subdev_type, int);
 void nvkm_mc_intr(struct nvkm_device *, bool *handled);
 void nvkm_mc_intr_unarm(struct nvkm_device *);
 void nvkm_mc_intr_rearm(struct nvkm_device *);
-void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int, bool enable);
 void nvkm_mc_unk260(struct nvkm_device *, u32 data);
 
-int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv17_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int g84_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv04_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv11_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv17_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int g84_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gt215_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gk104_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gp10b_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int tu102_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 54cdcb017518660b5509604524db932eee391690..0911e73f742464a4d23e1c13c87339699b75ec1b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -117,22 +117,24 @@ struct nvkm_mmu {
 		struct list_head list;
 	} ptc, ptp;
 
+	struct mutex mutex; /* serialises mmu invalidations */
+
 	struct nvkm_device_oclass user;
 };
 
-int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index 78df1e9def05b098cd571ec0da2e2115a5766bd8..7d4132a17d0fe4cd453b7f7fedf2002e5a7c393a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -3,5 +3,5 @@
 #define __NVKM_MXM_H__
 #include <core/subdev.h>
 
-int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int nv50_mxm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 4803a4fad4a27dfd5003f46744bc2407a2d0f3a2..74c19bdfb757c3112ee53d3dea7beffdda4889d6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -39,17 +39,17 @@ void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
 u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value);
 void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
 
-int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv04_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv46_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g84_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g92_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g94_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
 
 /* pcie functions */
 int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 5ff6d1f8985a438bb6daf496363ba384a9081304..f57a3a5a288d303274e357055c17a51319fa5cf3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -18,6 +18,7 @@ struct nvkm_pmu {
 	struct completion wpr_ready;
 
 	struct {
+		struct mutex mutex;
 		u32 base;
 		u32 size;
 	} send;
@@ -39,18 +40,18 @@ int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
 void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
 bool nvkm_pmu_fan_controlled(struct nvkm_device *);
 
-int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gt215_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm200_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm20b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gp102_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
 
 /* interface to MEMX process running on PMU */
 struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h
new file mode 100644
index 0000000000000000000000000000000000000000..e1399f8a90ad96da0912dd7acb79379e3287d1fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_PRIVRING_H__
+#define __NVKM_PRIVRING_H__
+#include <core/subdev.h>
+
+int gf100_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gf117_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gk104_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gk20a_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gm200_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gp10b_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 62c34f98c93079fb0c91bfe21f8a9029606a6e83..bd04f49272a696ff3ef5a54b3bdba91043adaef6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -107,13 +107,13 @@ void nvkm_therm_clkgate_init(struct nvkm_therm *,
 void nvkm_therm_clkgate_enable(struct nvkm_therm *);
 void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
 
-int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv40_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gk104_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gm200_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index d06dcbe1faa6e77b7c8ab7c2ef52f3d5b1b714eb..439a3f72b0d7d4ba79f8b671cb35469b1b9a5cd3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -76,8 +76,8 @@ s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
 #define nvkm_wait_msec(d,m,addr,mask,data)                                     \
 	nvkm_wait_usec((d), (m) * 1000, (addr), (mask), (data))
 
-int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv04_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 7be0e7e7bd77fa7c16d91774d2c3c29f3073ed9f..ee75c5524c436888eae67e15bfe90350872c4151 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -9,13 +9,24 @@ struct nvkm_top {
 	struct list_head device;
 };
 
-u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_devidx);
-u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
-u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
-int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
+struct nvkm_top_device {
+	enum nvkm_subdev_type type;
+	int inst;
+	u32 addr;
+	int fault;
+	int engine;
+	int runlist;
+	int reset;
+	int intr;
+	struct list_head head;
+};
+
+u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_subdev_type, int);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_subdev_type, int);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int);
+int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_subdev_type, int);
+struct nvkm_subdev *nvkm_top_fault(struct nvkm_device *, int fault);
 
-int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
+int gk104_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **);
+int ga100_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 45053a2809309b7e1507c5ee9eb3f3ecf60986a4..0be86d5f0158e2447d1c3739c688b85a508eba0b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -36,10 +36,10 @@ int nvkm_volt_get(struct nvkm_volt *);
 int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
 		     int condition);
 
-int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int nv40_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gf117_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gk104_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gm20b_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 9a5be6f3242496013779cd78c9f509a36e95bb8b..0a9334deffe20296770ce590eb427c9286bad4ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -181,6 +181,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 	struct nvif_device *device = &drm->client.device;
 	struct nvkm_gr *gr = nvxx_gr(device);
 	struct drm_nouveau_getparam *getparam = data;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	switch (getparam->param) {
 	case NOUVEAU_GETPARAM_CHIPSET_ID:
@@ -188,13 +189,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
-			getparam->value = dev->pdev->vendor;
+			getparam->value = pdev->vendor;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
-			getparam->value = dev->pdev->device;
+			getparam->value = pdev->device;
 		else
 			getparam->value = 0;
 		break;
@@ -205,7 +206,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
 		case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
 		case NV_DEVICE_INFO_V0_IGP :
-			if (!pci_is_pcie(dev->pdev))
+			if (!pci_is_pcie(pdev))
 				getparam->value = 1;
 			else
 				getparam->value = 2;
@@ -268,19 +269,19 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 		if (init->fb_ctxdma_handle == ~0) {
 			switch (init->tt_ctxdma_handle) {
-			case 0x01: engine = NV_DEVICE_INFO_ENGINE_GR    ; break;
-			case 0x02: engine = NV_DEVICE_INFO_ENGINE_MSPDEC; break;
-			case 0x04: engine = NV_DEVICE_INFO_ENGINE_MSPPP ; break;
-			case 0x08: engine = NV_DEVICE_INFO_ENGINE_MSVLD ; break;
-			case 0x30: engine = NV_DEVICE_INFO_ENGINE_CE    ; break;
+			case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR    ; break;
+			case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
+			case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
+			case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
+			case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE    ; break;
 			default:
 				return nouveau_abi16_put(abi16, -ENOSYS);
 			}
 		} else {
-			engine = NV_DEVICE_INFO_ENGINE_GR;
+			engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
 		}
 
-		if (engine != NV_DEVICE_INFO_ENGINE_CE)
+		if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
 			engine = nvif_fifo_runlist(device, engine);
 		else
 			engine = nvif_fifo_runlist_ce(device);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 69a84d0197d0af2ce4544b564f9c9605e9da1e8c..7c15f64484281ede2a8ca79ebcc5267a4271b9fb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -377,7 +377,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
 		return NULL;
 	}
 
-	handle = ACPI_HANDLE(&dev->pdev->dev);
+	handle = ACPI_HANDLE(dev->dev);
 	if (!handle)
 		return NULL;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d204ea8a5618edf2d42a63cad34696d7667dfdc7..e8c445eb110047c54c7b6fd920d3bfd5b0b4148a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -110,6 +110,9 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
 	struct nvbios *bios = &drm->vbios;
 	uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
 	uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+#ifdef __powerpc__
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+#endif
 
 	if (!bios->fp.xlated_entry || !sub || !scriptofs)
 		return -EINVAL;
@@ -123,8 +126,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
 #ifdef __powerpc__
 	/* Powerbook specific quirks */
 	if (script == LVDS_RESET &&
-	    (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
-	     dev->pdev->device == 0x0329))
+	    (pdev->device == 0x0179 || pdev->device == 0x0189 ||
+	     pdev->device == 0x0329))
 		nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
 #endif
 
@@ -2083,7 +2086,7 @@ nouveau_bios_init(struct drm_device *dev)
 	int ret;
 
 	/* only relevant for PCI devices */
-	if (!dev->pdev)
+	if (!dev_is_pci(dev->dev))
 		return 0;
 
 	if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7ea367a5444dd3e26d96a601200e3670ff663f72..2375711877cf3ac6ebe4570b3541f25627659f1a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		drm->gem.vram_available -= bo->mem.size;
+		drm->gem.vram_available -= bo->base.size;
 		break;
 	case TTM_PL_TT:
-		drm->gem.gart_available -= bo->mem.size;
+		drm->gem.gart_available -= bo->base.size;
 		break;
 	default:
 		break;
@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 	if (!nvbo->bo.pin_count) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
-			drm->gem.vram_available += bo->mem.size;
+			drm->gem.vram_available += bo->base.size;
 			break;
 		case TTM_PL_TT:
-			drm->gem.gart_available += bo->mem.size;
+			drm->gem.gart_available += bo->base.size;
 			break;
 		default:
 			break;
@@ -797,7 +797,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
 			return ret;
 	}
 
-	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+	if (drm_drv_uses_atomic_modeset(drm->dev))
+		mutex_lock(&cli->mutex);
+	else
+		mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
 	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
 	if (ret == 0) {
 		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
@@ -933,7 +936,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
 		return 0;
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
-		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
+		*new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
 					       nvbo->mode, nvbo->zeta);
 	}
 
@@ -1255,9 +1258,8 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
 		return 0;
 
 	if (slave && ttm->sg) {
-		/* make userspace faulting work */
-		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
-						 ttm_dma->dma_address, ttm->num_pages);
+		drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
+					       ttm->num_pages);
 		return 0;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 5d191e58edf113bf83335c1741ca678fe9f87ed7..7cfac265fd4527c1af5e26b696c19afa08f65065 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -533,6 +533,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
 	if (ret) {
 		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
 		nouveau_channel_del(pchan);
+		goto done;
 	}
 
 	ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
@@ -555,7 +556,7 @@ nouveau_channels_init(struct nouveau_drm *drm)
 	} args = {
 		.m.version = 1,
 		.m.count = sizeof(args.v) / sizeof(args.v.channels),
-		.v.channels.mthd = NV_DEVICE_FIFO_CHANNELS,
+		.v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
 	};
 	struct nvif_object *device = &drm->client.device.object;
 	int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8b4b3688c7ae3a608f0e76d5aec9d54571d94da0..61e6d7412505a39464c27c65cdbfabcfc5757457 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -411,6 +411,7 @@ static struct nouveau_encoder *
 nouveau_connector_ddc_detect(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
 	struct drm_encoder *encoder;
 	int ret;
@@ -438,11 +439,11 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
 				break;
 
 			if (switcheroo_ddc)
-				vga_switcheroo_lock_ddc(dev->pdev);
+				vga_switcheroo_lock_ddc(pdev);
 			if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
 				found = nv_encoder;
 			if (switcheroo_ddc)
-				vga_switcheroo_unlock_ddc(dev->pdev);
+				vga_switcheroo_unlock_ddc(pdev);
 
 			break;
 		}
@@ -490,6 +491,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct drm_device *dev = connector->dev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	if (nv_connector->detected_encoder == nv_encoder)
 		return;
@@ -511,8 +513,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 		connector->doublescan_allowed = true;
 		if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
 		    (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
-		     (dev->pdev->device & 0x0ff0) != 0x0100 &&
-		     (dev->pdev->device & 0x0ff0) != 0x0150))
+		     (pdev->device & 0x0ff0) != 0x0100 &&
+		     (pdev->device & 0x0ff0) != 0x0150))
 			/* HW is broken */
 			connector->interlace_allowed = false;
 		else
@@ -1210,6 +1212,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
 	case DCB_CONNECTOR_DMS59_DP0:
 	case DCB_CONNECTOR_DMS59_DP1:
 	case DCB_CONNECTOR_DP       :
+	case DCB_CONNECTOR_mDP      :
 	case DCB_CONNECTOR_USB_C    : return DRM_MODE_CONNECTOR_DisplayPort;
 	case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
 	case DCB_CONNECTOR_HDMI_0   :
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bceb48a2dfca6ba30c89fe28716250d7284c7b6d..17831ee897ea46d1bc88234017fc89bd12ddf2b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
 
 	bl_size = bw * bh * (1 << tile_mode) * gob_size;
 
-	DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+	DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
 		      offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
-		      nvbo->bo.mem.size);
+		      nvbo->bo.base.size);
 
-	if (bl_size + offset > nvbo->bo.mem.size)
+	if (bl_size + offset > nvbo->bo.base.size)
 		return -ERANGE;
 
 	return 0;
@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
 		} else {
 			uint32_t size = mode_cmd->pitches[i] * height;
 
-			if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+			if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
 				return -ERANGE;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d141a5f004afc9f729d2defef3739641b98f60b9..885815ea917f89341b7c1b3fb21015bf84d46908 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -115,8 +115,8 @@ nouveau_platform_name(struct platform_device *platformdev)
 static u64
 nouveau_name(struct drm_device *dev)
 {
-	if (dev->pdev)
-		return nouveau_pci_name(dev->pdev);
+	if (dev_is_pci(dev->dev))
+		return nouveau_pci_name(to_pci_dev(dev->dev));
 	else
 		return nouveau_platform_name(to_platform_device(dev->dev));
 }
@@ -344,7 +344,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
 
 	/* Allocate channel that has access to the graphics engine. */
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
-		arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
+		arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
 		arg1 = 1;
 	} else {
 		arg0 = NvDmaFB;
@@ -760,7 +760,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 	if (ret)
 		goto fail_drm;
 
-	drm_dev->pdev = pdev;
 	pci_set_drvdata(pdev, drm_dev);
 
 	ret = nouveau_drm_device_init(drm_dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9d04d1b364343538e4b7a97f6f084ea72aedcef1..d28ee68442453b4bd472e05e135c9dfca661c86e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -55,7 +55,6 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
 
 #include <drm/drm_audio_component.h>
 
@@ -222,6 +221,7 @@ struct nouveau_drm {
 
 	struct {
 		struct drm_audio_component *component;
+		struct mutex lock;
 		bool component_registered;
 	} audio;
 };
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 21937f1c7dd90e8ced809e081755a2898db770af..1ffcc0a491fd22b8811ad06558cb6d511e4362e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -53,7 +53,12 @@ struct nouveau_encoder {
 	 * actually programmed on the hw, not the proposed crtc */
 	struct drm_crtc *crtc;
 	u32 ctrl;
-	bool audio;
+
+	/* Protected by nouveau_drm.audio.lock */
+	struct {
+		bool enabled;
+		struct drm_connector *connector;
+	} audio;
 
 	struct drm_display_mode mode;
 	int last_dpms;
@@ -141,11 +146,9 @@ enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
 					unsigned *clock);
 
 struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
-			    struct drm_atomic_state *state);
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
 struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
-			    struct drm_atomic_state *state);
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
 
 int nv50_mstm_detect(struct nouveau_encoder *encoder);
 void nv50_mstm_remove(struct nv50_mstm *mstm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24ec5339efb46880bda75eae5eb41631b0a6b02b..4fc0fa696461f417f199ccae33f0e85a5bceab05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -396,7 +396,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 	NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
 		fb->width, fb->height, nvbo->offset, nvbo);
 
-	vga_switcheroo_client_fb_set(dev->pdev, info);
+	if (dev_is_pci(dev->dev))
+		vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
+
 	return 0;
 
 out_unlock:
@@ -548,7 +550,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 	int ret;
 
 	if (!dev->mode_config.num_crtc ||
-	    (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+	    (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
 		return 0;
 
 	fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 2f16b52492836532385e6047c843f6707afc3a58..347488685f745a2cd80a88b431769daa59006371 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -30,9 +30,9 @@
 struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
 	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-	int npages = nvbo->bo.num_pages;
 
-	return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
+	return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
+				     nvbo->bo.ttm->num_pages);
 }
 
 struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a2e23fd4906ce58edaee011af20f197f8cdae461..1cf52635ea74c541cd3f51b7bca882b7c8c33bc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -84,7 +84,7 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
 	if (!nvbe)
 		return NULL;
 
-	if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
+	if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
 		kfree(nvbe);
 		return NULL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c85dd8afa3c3df7c65c3141488e3fba7deec468d..7c4b374b3eca32fbbf296184bcee078f32b00594 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,18 +87,20 @@ nouveau_vga_init(struct nouveau_drm *drm)
 {
 	struct drm_device *dev = drm->dev;
 	bool runtime = nouveau_pmops_runtime();
+	struct pci_dev *pdev;
 
 	/* only relevant for PCI devices */
-	if (!dev->pdev)
+	if (!dev_is_pci(dev->dev))
 		return;
+	pdev = to_pci_dev(dev->dev);
 
-	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+	vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode);
 
 	/* don't register Thunderbolt eGPU with vga_switcheroo */
-	if (pci_is_thunderbolt_attached(dev->pdev))
+	if (pci_is_thunderbolt_attached(pdev))
 		return;
 
-	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
+	vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
 
 	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
 		vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
@@ -109,17 +111,19 @@ nouveau_vga_fini(struct nouveau_drm *drm)
 {
 	struct drm_device *dev = drm->dev;
 	bool runtime = nouveau_pmops_runtime();
+	struct pci_dev *pdev;
 
 	/* only relevant for PCI devices */
-	if (!dev->pdev)
+	if (!dev_is_pci(dev->dev))
 		return;
+	pdev = to_pci_dev(dev->dev);
 
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
+	vga_client_register(pdev, NULL, NULL, NULL);
 
-	if (pci_is_thunderbolt_attached(dev->pdev))
+	if (pci_is_thunderbolt_attached(pdev))
 		return;
 
-	vga_switcheroo_unregister_client(dev->pdev);
+	vga_switcheroo_unregister_client(pdev);
 	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
 		vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
 }
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 1253fdec712d40eb9c7ef67e82f38ee8a99d2b88..b1cd8d7dd87d4248607aaa4e7b335bef4725ad0e 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
 	struct nv10_fence_chan *fctx;
 	struct ttm_resource *reg = &priv->bo->bo.mem;
 	u32 start = reg->start * PAGE_SIZE;
-	u32 limit = start + reg->size - 1;
+	u32 limit = start + priv->bo->bo.base.size - 1;
 	int ret = 0;
 
 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 447238e3cbe776d7f7ab30c666511e4bc0f08bc2..1625826505f698dc004617b44cbffc784474b43f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
 	struct nv10_fence_chan *fctx;
 	struct ttm_resource *reg = &priv->bo->bo.mem;
 	u32 start = reg->start * PAGE_SIZE;
-	u32 limit = start + reg->size - 1;
+	u32 limit = start + priv->bo->bo.base.size - 1;
 	int ret;
 
 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c
index e84a2e2ff043152b058c519eadfceee4741286e5..a463289962b2507cc439a8958c1c157be0f29052 100644
--- a/drivers/gpu/drm/nouveau/nvif/fifo.c
+++ b/drivers/gpu/drm/nouveau/nvif/fifo.c
@@ -41,9 +41,11 @@ nvif_fifo_runlists(struct nvif_device *device)
 		return -ENOMEM;
 	a->m.version = 1;
 	a->m.count = sizeof(a->v) / sizeof(a->v.runlists);
-	a->v.runlists.mthd = NV_DEVICE_FIFO_RUNLISTS;
-	for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++)
-		a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i);
+	a->v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS;
+	for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) {
+		a->v.runlist[i].mthd = NV_DEVICE_HOST_RUNLIST_ENGINES;
+		a->v.runlist[i].data = i;
+	}
 
 	ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a));
 	if (ret)
@@ -58,7 +60,7 @@ nvif_fifo_runlists(struct nvif_device *device)
 	}
 
 	for (i = 0; i < device->runlists; i++) {
-		if (a->v.runlists.data & BIT_ULL(i))
+		if (a->v.runlist[i].mthd != NV_DEVICE_INFO_INVALID)
 			device->runlist[i].engines = a->v.runlist[i].data;
 	}
 
@@ -70,29 +72,15 @@ nvif_fifo_runlists(struct nvif_device *device)
 u64
 nvif_fifo_runlist(struct nvif_device *device, u64 engine)
 {
-	struct nvif_object *object = &device->object;
-	struct {
-		struct nv_device_info_v1 m;
-		struct {
-			struct nv_device_info_v1_data engine;
-		} v;
-	} a = {
-		.m.version = 1,
-		.m.count = sizeof(a.v) / sizeof(a.v.engine),
-		.v.engine.mthd = engine,
-	};
 	u64 runm = 0;
 	int ret, i;
 
 	if ((ret = nvif_fifo_runlists(device)))
 		return runm;
 
-	ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, &a, sizeof(a));
-	if (ret == 0) {
-		for (i = 0; i < device->runlists; i++) {
-			if (device->runlist[i].engines & a.v.engine.data)
-				runm |= BIT_ULL(i);
-		}
+	for (i = 0; i < device->runlists; i++) {
+		if (device->runlist[i].engines & engine)
+			runm |= BIT_ULL(i);
 	}
 
 	return runm;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 1a47c40e171b36e0e2ee519ea30851357ca99f05..e41a39ae1597aac09ed5bc9c78e7613ca9026507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -40,10 +40,11 @@ nvkm_engine_unref(struct nvkm_engine **pengine)
 {
 	struct nvkm_engine *engine = *pengine;
 	if (engine) {
-		mutex_lock(&engine->subdev.mutex);
-		if (--engine->usecount == 0)
+		if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) {
 			nvkm_subdev_fini(&engine->subdev, false);
-		mutex_unlock(&engine->subdev.mutex);
+			engine->use.enabled = false;
+			mutex_unlock(&engine->use.mutex);
+		}
 		*pengine = NULL;
 	}
 }
@@ -51,17 +52,21 @@ nvkm_engine_unref(struct nvkm_engine **pengine)
 struct nvkm_engine *
 nvkm_engine_ref(struct nvkm_engine *engine)
 {
+	int ret;
 	if (engine) {
-		mutex_lock(&engine->subdev.mutex);
-		if (++engine->usecount == 1) {
-			int ret = nvkm_subdev_init(&engine->subdev);
-			if (ret) {
-				engine->usecount--;
-				mutex_unlock(&engine->subdev.mutex);
-				return ERR_PTR(ret);
+		if (!refcount_inc_not_zero(&engine->use.refcount)) {
+			mutex_lock(&engine->use.mutex);
+			if (!refcount_inc_not_zero(&engine->use.refcount)) {
+				engine->use.enabled = true;
+				if ((ret = nvkm_subdev_init(&engine->subdev))) {
+					engine->use.enabled = false;
+					mutex_unlock(&engine->use.mutex);
+					return ERR_PTR(ret);
+				}
+				refcount_set(&engine->use.refcount, 1);
 			}
+			mutex_unlock(&engine->use.mutex);
 		}
-		mutex_unlock(&engine->subdev.mutex);
 	}
 	return engine;
 }
@@ -114,7 +119,7 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
 	int ret = 0, i;
 	s64 time;
 
-	if (!engine->usecount) {
+	if (!engine->use.enabled) {
 		nvkm_trace(subdev, "init skipped, engine has no users\n");
 		return ret;
 	}
@@ -156,11 +161,12 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
 	struct nvkm_engine *engine = nvkm_engine(subdev);
 	if (engine->func->dtor)
 		return engine->func->dtor(engine);
+	mutex_destroy(&engine->use.mutex);
 	return engine;
 }
 
-static const struct nvkm_subdev_func
-nvkm_engine_func = {
+const struct nvkm_subdev_func
+nvkm_engine = {
 	.dtor = nvkm_engine_dtor,
 	.preinit = nvkm_engine_preinit,
 	.init = nvkm_engine_init,
@@ -170,14 +176,15 @@ nvkm_engine_func = {
 };
 
 int
-nvkm_engine_ctor(const struct nvkm_engine_func *func,
-		 struct nvkm_device *device, int index, bool enable,
-		 struct nvkm_engine *engine)
+nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine)
 {
-	nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev);
+	nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
 	engine->func = func;
+	refcount_set(&engine->use.refcount, 0);
+	mutex_init(&engine->use.mutex);
 
-	if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+	if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) {
 		nvkm_debug(&engine->subdev, "disabled\n");
 		return -ENODEV;
 	}
@@ -187,11 +194,11 @@ nvkm_engine_ctor(const struct nvkm_engine_func *func,
 }
 
 int
-nvkm_engine_new_(const struct nvkm_engine_func *func,
-		 struct nvkm_device *device, int index, bool enable,
+nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, bool enable,
 		 struct nvkm_engine **pengine)
 {
 	if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_engine_ctor(func, device, index, enable, *pengine);
+	return nvkm_engine_ctor(func, device, type, inst, enable, *pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 38130ef272d6f1023458f18cdf122fe1b16f3329..c69daac9bac7b0761e99815fc3dce7f51798e836 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -33,13 +33,13 @@ nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
 	struct nvkm_fb *fb = device->fb;
 	struct nvkm_tags *tags = *ptags;
 	if (tags) {
-		mutex_lock(&fb->subdev.mutex);
+		mutex_lock(&fb->tags.mutex);
 		if (refcount_dec_and_test(&tags->refcount)) {
-			nvkm_mm_free(&fb->tags, &tags->mn);
+			nvkm_mm_free(&fb->tags.mm, &tags->mn);
 			kfree(memory->tags);
 			memory->tags = NULL;
 		}
-		mutex_unlock(&fb->subdev.mutex);
+		mutex_unlock(&fb->tags.mutex);
 		*ptags = NULL;
 	}
 }
@@ -52,29 +52,29 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
 	struct nvkm_fb *fb = device->fb;
 	struct nvkm_tags *tags;
 
-	mutex_lock(&fb->subdev.mutex);
+	mutex_lock(&fb->tags.mutex);
 	if ((tags = memory->tags)) {
 		/* If comptags exist for the memory, but a different amount
 		 * than requested, the buffer is being mapped with settings
 		 * that are incompatible with existing mappings.
 		 */
 		if (tags->mn && tags->mn->length != nr) {
-			mutex_unlock(&fb->subdev.mutex);
+			mutex_unlock(&fb->tags.mutex);
 			return -EINVAL;
 		}
 
 		refcount_inc(&tags->refcount);
-		mutex_unlock(&fb->subdev.mutex);
+		mutex_unlock(&fb->tags.mutex);
 		*ptags = tags;
 		return 0;
 	}
 
 	if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
-		mutex_unlock(&fb->subdev.mutex);
+		mutex_unlock(&fb->tags.mutex);
 		return -ENOMEM;
 	}
 
-	if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+	if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) {
 		if (clr)
 			clr(device, tags->mn->offset, tags->mn->length);
 	} else {
@@ -92,7 +92,7 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
 
 	refcount_set(&tags->refcount, 1);
 	*ptags = memory->tags = tags;
-	mutex_unlock(&fb->subdev.mutex);
+	mutex_unlock(&fb->tags.mutex);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 49d468b45d3f4def458e7d1e9051e79434e72340..a74b7acb6832e55540d2229c0dd16f9288af8a08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -26,69 +26,13 @@
 #include <core/option.h>
 #include <subdev/mc.h>
 
-static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
-
 const char *
-nvkm_subdev_name[NVKM_SUBDEV_NR] = {
-	[NVKM_SUBDEV_ACR     ] = "acr",
-	[NVKM_SUBDEV_BAR     ] = "bar",
-	[NVKM_SUBDEV_VBIOS   ] = "bios",
-	[NVKM_SUBDEV_BUS     ] = "bus",
-	[NVKM_SUBDEV_CLK     ] = "clk",
-	[NVKM_SUBDEV_DEVINIT ] = "devinit",
-	[NVKM_SUBDEV_FAULT   ] = "fault",
-	[NVKM_SUBDEV_FB      ] = "fb",
-	[NVKM_SUBDEV_FUSE    ] = "fuse",
-	[NVKM_SUBDEV_GPIO    ] = "gpio",
-	[NVKM_SUBDEV_GSP     ] = "gsp",
-	[NVKM_SUBDEV_I2C     ] = "i2c",
-	[NVKM_SUBDEV_IBUS    ] = "priv",
-	[NVKM_SUBDEV_ICCSENSE] = "iccsense",
-	[NVKM_SUBDEV_INSTMEM ] = "imem",
-	[NVKM_SUBDEV_LTC     ] = "ltc",
-	[NVKM_SUBDEV_MC      ] = "mc",
-	[NVKM_SUBDEV_MMU     ] = "mmu",
-	[NVKM_SUBDEV_MXM     ] = "mxm",
-	[NVKM_SUBDEV_PCI     ] = "pci",
-	[NVKM_SUBDEV_PMU     ] = "pmu",
-	[NVKM_SUBDEV_THERM   ] = "therm",
-	[NVKM_SUBDEV_TIMER   ] = "tmr",
-	[NVKM_SUBDEV_TOP     ] = "top",
-	[NVKM_SUBDEV_VOLT    ] = "volt",
-	[NVKM_ENGINE_BSP     ] = "bsp",
-	[NVKM_ENGINE_CE0     ] = "ce0",
-	[NVKM_ENGINE_CE1     ] = "ce1",
-	[NVKM_ENGINE_CE2     ] = "ce2",
-	[NVKM_ENGINE_CE3     ] = "ce3",
-	[NVKM_ENGINE_CE4     ] = "ce4",
-	[NVKM_ENGINE_CE5     ] = "ce5",
-	[NVKM_ENGINE_CE6     ] = "ce6",
-	[NVKM_ENGINE_CE7     ] = "ce7",
-	[NVKM_ENGINE_CE8     ] = "ce8",
-	[NVKM_ENGINE_CIPHER  ] = "cipher",
-	[NVKM_ENGINE_DISP    ] = "disp",
-	[NVKM_ENGINE_DMAOBJ  ] = "dma",
-	[NVKM_ENGINE_FIFO    ] = "fifo",
-	[NVKM_ENGINE_GR      ] = "gr",
-	[NVKM_ENGINE_IFB     ] = "ifb",
-	[NVKM_ENGINE_ME      ] = "me",
-	[NVKM_ENGINE_MPEG    ] = "mpeg",
-	[NVKM_ENGINE_MSENC   ] = "msenc",
-	[NVKM_ENGINE_MSPDEC  ] = "mspdec",
-	[NVKM_ENGINE_MSPPP   ] = "msppp",
-	[NVKM_ENGINE_MSVLD   ] = "msvld",
-	[NVKM_ENGINE_NVENC0  ] = "nvenc0",
-	[NVKM_ENGINE_NVENC1  ] = "nvenc1",
-	[NVKM_ENGINE_NVENC2  ] = "nvenc2",
-	[NVKM_ENGINE_NVDEC0  ] = "nvdec0",
-	[NVKM_ENGINE_NVDEC1  ] = "nvdec1",
-	[NVKM_ENGINE_NVDEC2  ] = "nvdec2",
-	[NVKM_ENGINE_PM      ] = "pm",
-	[NVKM_ENGINE_SEC     ] = "sec",
-	[NVKM_ENGINE_SEC2    ] = "sec2",
-	[NVKM_ENGINE_SW      ] = "sw",
-	[NVKM_ENGINE_VIC     ] = "vic",
-	[NVKM_ENGINE_VP      ] = "vp",
+nvkm_subdev_type[NVKM_SUBDEV_NR] = {
+#define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr,
+#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
+#include <core/layout.h>
+#undef NVKM_LAYOUT_ONCE
+#undef NVKM_LAYOUT_INST
 };
 
 void
@@ -125,7 +69,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
 		}
 	}
 
-	nvkm_mc_reset(device, subdev->index);
+	nvkm_mc_reset(device, subdev->type, subdev->inst);
 
 	time = ktime_to_us(ktime_get()) - time;
 	nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
@@ -199,6 +143,7 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
 	if (subdev && !WARN_ON(!subdev->func)) {
 		nvkm_trace(subdev, "destroy running...\n");
 		time = ktime_to_us(ktime_get());
+		list_del(&subdev->head);
 		if (subdev->func->dtor)
 			*psubdev = subdev->func->dtor(subdev);
 		time = ktime_to_us(ktime_get()) - time;
@@ -209,26 +154,41 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
 }
 
 void
-nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
-		 struct nvkm_device *device, int index,
-		 struct nvkm_subdev *subdev)
+nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
+{
+	struct nvkm_subdev *subdev;
+	list_for_each_entry(subdev, &device->subdev, head) {
+		if (subdev->type == type && subdev->inst == inst) {
+			*subdev->pself = NULL;
+			nvkm_subdev_del(&subdev);
+			break;
+		}
+	}
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
 {
-	const char *name = nvkm_subdev_name[index];
 	subdev->func = func;
 	subdev->device = device;
-	subdev->index = index;
-
-	__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
-	subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+	subdev->type = type;
+	subdev->inst = inst < 0 ? 0 : inst;
+
+	if (inst >= 0)
+		snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst);
+	else
+		strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name));
+	subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name);
+	list_add_tail(&subdev->head, &device->subdev);
 }
 
 int
-nvkm_subdev_new_(const struct nvkm_subdev_func *func,
-		 struct nvkm_device *device, int index,
-		 struct nvkm_subdev **psubdev)
+nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev)
 {
 	if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(func, device, index, *psubdev);
+	nvkm_subdev_ctor(func, device, type, inst, *psubdev);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 44e116f7880dd02e6754d3d328f1d909a0a7041a..39f6db269c7a5f25a126ed32d81f3617983020f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -36,8 +36,9 @@ g84_bsp = {
 };
 
 int
-g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+g84_bsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_engine **pengine)
 {
-	return nvkm_xtensa_new_(&g84_bsp, device, index,
+	return nvkm_xtensa_new_(&g84_bsp, device, type, inst,
 				device->chipset != 0x92, 0x103000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index ad9f855c9a407223c63b505e151e34b6953a1f16..b9cc3956598570fe89db1c416ed06ec1cfb98c6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -29,9 +29,7 @@
 static void
 gf100_ce_init(struct nvkm_falcon *ce)
 {
-	struct nvkm_device *device = ce->engine.subdev.device;
-	const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
-	nvkm_wr32(device, ce->addr + 0x084, index);
+	nvkm_wr32(ce->engine.subdev.device, ce->addr + 0x084, ce->engine.subdev.inst);
 }
 
 static const struct nvkm_falcon_func
@@ -63,16 +61,9 @@ gf100_ce1 = {
 };
 
 int
-gf100_ce_new(struct nvkm_device *device, int index,
+gf100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	if (index == NVKM_ENGINE_CE0) {
-		return nvkm_falcon_new_(&gf100_ce0, device, index, true,
-					0x104000, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE1) {
-		return nvkm_falcon_new_(&gf100_ce1, device, index, true,
-					0x105000, pengine);
-	}
-	return -ENODEV;
+	return nvkm_falcon_new_(inst ? &gf100_ce1 : &gf100_ce0, device, type, inst, true,
+				0x104000 + (inst * 0x1000), pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index 9e0b53a10f773bf648f046cc8e151427a88afc94..27f29eb0494da1212add0523ef51128ce8a5e0a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -58,9 +58,9 @@ gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
 void
 gk104_ce_intr(struct nvkm_engine *ce)
 {
-	const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
 	struct nvkm_subdev *subdev = &ce->subdev;
 	struct nvkm_device *device = subdev->device;
+	const u32 base = subdev->inst * 0x1000;
 	u32 mask = nvkm_rd32(device, 0x104904 + base);
 	u32 intr = nvkm_rd32(device, 0x104908 + base) & mask;
 	if (intr & 0x00000001) {
@@ -94,8 +94,8 @@ gk104_ce = {
 };
 
 int
-gk104_ce_new(struct nvkm_device *device, int index,
+gk104_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&gk104_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&gk104_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
index c0df7daa85e274760e13cde68d4530fce514f7cf..c3c476592c4343031970104ad753581cfd78c088 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -36,8 +36,8 @@ gm107_ce = {
 };
 
 int
-gm107_ce_new(struct nvkm_device *device, int index,
+gm107_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&gm107_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&gm107_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index c6fa8b20737e690cdc6d5d9e94bdd23b91b1129e..d2db61865371534b0b70b6781246be58787d20c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -35,8 +35,8 @@ gm200_ce = {
 };
 
 int
-gm200_ce_new(struct nvkm_device *device, int index,
+gm200_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&gm200_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&gm200_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
index c7710456bc309d2c013a37711a1585b8d6a57784..a4f08a4472c900a942ec59722020ee4c8421e2a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
@@ -59,9 +59,9 @@ gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
 void
 gp100_ce_intr(struct nvkm_engine *ce)
 {
-	const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
 	struct nvkm_subdev *subdev = &ce->subdev;
 	struct nvkm_device *device = subdev->device;
+	const u32 base = subdev->inst * 0x80;
 	u32 mask = nvkm_rd32(device, 0x10440c + base);
 	u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
 	if (intr & 0x00000001) { //XXX: guess
@@ -95,8 +95,8 @@ gp100_ce = {
 };
 
 int
-gp100_ce_new(struct nvkm_device *device, int index,
+gp100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&gp100_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
index 985c8f653874d2a066ad683634ea776d390ac6ec..180d497a95eb0322dc0f792e0c4b500179ca40de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
@@ -37,8 +37,8 @@ gp102_ce = {
 };
 
 int
-gp102_ce_new(struct nvkm_device *device, int index,
+gp102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&gp102_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index 63ac51a54fd3c981d105e9b1cdb0525ac7016be9..704df0f2d1f16c39fc5cbe897b80ffdc76fe3461 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -44,7 +44,7 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
 {
 	struct nvkm_subdev *subdev = &ce->engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+	const u32 base = subdev->inst * 0x1000;
 	u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
 	u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
@@ -75,9 +75,9 @@ gt215_ce = {
 };
 
 int
-gt215_ce_new(struct nvkm_device *device, int index,
+gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_falcon_new_(&gt215_ce, device, index,
+	return nvkm_falcon_new_(&gt215_ce, device, type, inst,
 				(device->chipset != 0xaf), 0x104000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
index fcda3de45857583be80a84bdaa02a0713200b1fb..cd5e9cdca1cf9609f3d88fcfc4d2de06ce93a527 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
@@ -33,8 +33,8 @@ gv100_ce = {
 };
 
 int
-gv100_ce_new(struct nvkm_device *device, int index,
+gv100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&gv100_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&gv100_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index b4308e2d8c759962fbd91e8deecea968a7b6d6e4..e5ff92d9364cb0e8ebabe7b4dead56cfb02b96af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -33,8 +33,8 @@ tu102_ce = {
 };
 
 int
-tu102_ce_new(struct nvkm_device *device, int index,
+tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&tu102_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 68ffb520531e0f784b485e663f3f637b99528253..be2a7181dc159a41739f8e0caedae6d58cdef8fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -127,8 +127,8 @@ g84_cipher = {
 };
 
 int
-g84_cipher_new(struct nvkm_device *device, int index,
+g84_cipher_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	       struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&g84_cipher, device, index, true, pengine);
+	return nvkm_engine_new_(&g84_cipher, device, type, inst, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index cdcc851e06f9b5d8e82341048f8e77e53f07363c..b930f539feec727c98640c72f0249a94e3eead60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -71,2640 +71,2557 @@ nvkm_device_list(u64 *name, int size)
 static const struct nvkm_device_chip
 null_chipset = {
 	.name = "NULL",
-	.bios = nvkm_bios_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
 };
 
 static const struct nvkm_device_chip
 nv4_chipset = {
 	.name = "NV04",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv04_devinit_new,
-	.fb = nv04_fb_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv04_fifo_new,
-	.gr = nv04_gr_new,
-	.sw = nv04_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv04_devinit_new },
+	.fb       = { 0x00000001, nv04_fb_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv04_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv04_fifo_new },
+	.gr       = { 0x00000001, nv04_gr_new },
+	.sw       = { 0x00000001, nv04_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv5_chipset = {
 	.name = "NV05",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv05_devinit_new,
-	.fb = nv04_fb_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv04_fifo_new,
-	.gr = nv04_gr_new,
-	.sw = nv04_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv05_devinit_new },
+	.fb       = { 0x00000001, nv04_fb_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv04_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv04_fifo_new },
+	.gr       = { 0x00000001, nv04_gr_new },
+	.sw       = { 0x00000001, nv04_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv10_chipset = {
 	.name = "NV10",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv10_devinit_new,
-	.fb = nv10_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.gr = nv10_gr_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv10_devinit_new },
+	.fb       = { 0x00000001, nv10_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv04_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.gr       = { 0x00000001, nv10_gr_new },
 };
 
 static const struct nvkm_device_chip
 nv11_chipset = {
 	.name = "NV11",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv10_devinit_new,
-	.fb = nv10_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv11_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv10_fifo_new,
-	.gr = nv15_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv10_devinit_new },
+	.fb       = { 0x00000001, nv10_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv11_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv10_fifo_new },
+	.gr       = { 0x00000001, nv15_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv15_chipset = {
 	.name = "NV15",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv10_devinit_new,
-	.fb = nv10_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv10_fifo_new,
-	.gr = nv15_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv10_devinit_new },
+	.fb       = { 0x00000001, nv10_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv04_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv10_fifo_new },
+	.gr       = { 0x00000001, nv15_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv17_chipset = {
 	.name = "NV17",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv10_devinit_new,
-	.fb = nv10_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv17_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv10_devinit_new },
+	.fb       = { 0x00000001, nv10_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv17_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv18_chipset = {
 	.name = "NV18",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv10_devinit_new,
-	.fb = nv10_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv17_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv10_devinit_new },
+	.fb       = { 0x00000001, nv10_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv17_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv1a_chipset = {
 	.name = "nForce",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv1a_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv10_fifo_new,
-	.gr = nv15_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv1a_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv04_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv10_fifo_new },
+	.gr       = { 0x00000001, nv15_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv1f_chipset = {
 	.name = "nForce2",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv1a_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv17_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv1a_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv17_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv20_chipset = {
 	.name = "NV20",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv20_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv20_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv20_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv20_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv25_chipset = {
 	.name = "NV25",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv25_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv25_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv25_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv25_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv28_chipset = {
 	.name = "NV28",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv25_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv25_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv25_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv25_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv2a_chipset = {
 	.name = "NV2A",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv25_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv2a_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv25_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv2a_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv30_chipset = {
 	.name = "NV30",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv30_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv30_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv30_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv30_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv31_chipset = {
 	.name = "NV31",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv30_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv30_gr_new,
-	.mpeg = nv31_mpeg_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv30_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv30_gr_new },
+	.mpeg     = { 0x00000001, nv31_mpeg_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv34_chipset = {
 	.name = "NV34",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv10_devinit_new,
-	.fb = nv10_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv34_gr_new,
-	.mpeg = nv31_mpeg_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv10_devinit_new },
+	.fb       = { 0x00000001, nv10_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv34_gr_new },
+	.mpeg     = { 0x00000001, nv31_mpeg_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv35_chipset = {
 	.name = "NV35",
-	.bios = nvkm_bios_new,
-	.bus = nv04_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv35_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv35_gr_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv04_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv35_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv35_gr_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv36_chipset = {
 	.name = "NV36",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv04_clk_new,
-	.devinit = nv20_devinit_new,
-	.fb = nv36_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv04_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv04_pci_new,
-	.timer = nv04_timer_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv17_fifo_new,
-	.gr = nv35_gr_new,
-	.mpeg = nv31_mpeg_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv04_clk_new },
+	.devinit  = { 0x00000001, nv20_devinit_new },
+	.fb       = { 0x00000001, nv36_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv04_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv04_pci_new },
+	.timer    = { 0x00000001, nv04_timer_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv17_fifo_new },
+	.gr       = { 0x00000001, nv35_gr_new },
+	.mpeg     = { 0x00000001, nv31_mpeg_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv40_chipset = {
 	.name = "NV40",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv40_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv40_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv40_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv40_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv40_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv40_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv41_chipset = {
 	.name = "NV41",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv41_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv41_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv40_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv41_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv41_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv40_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv42_chipset = {
 	.name = "NV42",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv41_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv41_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv40_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv41_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv41_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv40_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv43_chipset = {
 	.name = "NV43",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv41_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv41_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv40_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv41_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv41_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv40_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv44_chipset = {
 	.name = "NV44",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv44_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv44_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv45_chipset = {
 	.name = "NV45",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv40_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv40_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv46_chipset = {
 	.name = "G72",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv46_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv46_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv46_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv46_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv47_chipset = {
 	.name = "G70",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv47_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv41_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv47_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv41_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv49_chipset = {
 	.name = "G71",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv49_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv41_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv49_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv41_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv4a_chipset = {
 	.name = "NV44A",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv44_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv04_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv44_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv04_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv4b_chipset = {
 	.name = "G73",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv49_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv17_mc_new,
-	.mmu = nv41_mmu_new,
-	.pci = nv40_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv40_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv49_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv17_mc_new },
+	.mmu      = { 0x00000001, nv41_mmu_new },
+	.pci      = { 0x00000001, nv40_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv40_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv4c_chipset = {
 	.name = "C61",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv46_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv4c_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv46_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv4c_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv4e_chipset = {
 	.name = "C51",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv4e_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv4e_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv4c_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv4e_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv4e_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv4c_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv50_chipset = {
 	.name = "G80",
-	.bar = nv50_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = nv50_bus_new,
-	.clk = nv50_clk_new,
-	.devinit = nv50_devinit_new,
-	.fb = nv50_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = nv50_gpio_new,
-	.i2c = nv50_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = nv50_mc_new,
-	.mmu = nv50_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = nv46_pci_new,
-	.therm = nv50_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv50_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = nv50_fifo_new,
-	.gr = nv50_gr_new,
-	.mpeg = nv50_mpeg_new,
-	.pm = nv50_pm_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, nv50_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv50_bus_new },
+	.clk      = { 0x00000001, nv50_clk_new },
+	.devinit  = { 0x00000001, nv50_devinit_new },
+	.fb       = { 0x00000001, nv50_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, nv50_gpio_new },
+	.i2c      = { 0x00000001, nv50_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, nv50_mc_new },
+	.mmu      = { 0x00000001, nv50_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, nv46_pci_new },
+	.therm    = { 0x00000001, nv50_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv50_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, nv50_fifo_new },
+	.gr       = { 0x00000001, nv50_gr_new },
+	.mpeg     = { 0x00000001, nv50_mpeg_new },
+	.pm       = { 0x00000001, nv50_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv63_chipset = {
 	.name = "C73",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv46_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv4c_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv46_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv4c_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv67_chipset = {
 	.name = "C67",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv46_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv4c_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv46_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv4c_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv68_chipset = {
 	.name = "C68",
-	.bios = nvkm_bios_new,
-	.bus = nv31_bus_new,
-	.clk = nv40_clk_new,
-	.devinit = nv1a_devinit_new,
-	.fb = nv46_fb_new,
-	.gpio = nv10_gpio_new,
-	.i2c = nv04_i2c_new,
-	.imem = nv40_instmem_new,
-	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
-	.pci = nv4c_pci_new,
-	.therm = nv40_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = nv04_disp_new,
-	.dma = nv04_dma_new,
-	.fifo = nv40_fifo_new,
-	.gr = nv44_gr_new,
-	.mpeg = nv44_mpeg_new,
-	.pm = nv40_pm_new,
-	.sw = nv10_sw_new,
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv31_bus_new },
+	.clk      = { 0x00000001, nv40_clk_new },
+	.devinit  = { 0x00000001, nv1a_devinit_new },
+	.fb       = { 0x00000001, nv46_fb_new },
+	.gpio     = { 0x00000001, nv10_gpio_new },
+	.i2c      = { 0x00000001, nv04_i2c_new },
+	.imem     = { 0x00000001, nv40_instmem_new },
+	.mc       = { 0x00000001, nv44_mc_new },
+	.mmu      = { 0x00000001, nv44_mmu_new },
+	.pci      = { 0x00000001, nv4c_pci_new },
+	.therm    = { 0x00000001, nv40_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, nv04_disp_new },
+	.dma      = { 0x00000001, nv04_dma_new },
+	.fifo     = { 0x00000001, nv40_fifo_new },
+	.gr       = { 0x00000001, nv44_gr_new },
+	.mpeg     = { 0x00000001, nv44_mpeg_new },
+	.pm       = { 0x00000001, nv40_pm_new },
+	.sw       = { 0x00000001, nv10_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv84_chipset = {
 	.name = "G84",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = nv50_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g84_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = nv50_gpio_new,
-	.i2c = nv50_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g84_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g84_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.bsp = g84_bsp_new,
-	.cipher = g84_cipher_new,
-	.disp = g84_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = g84_gr_new,
-	.mpeg = g84_mpeg_new,
-	.pm = g84_pm_new,
-	.sw = nv50_sw_new,
-	.vp = g84_vp_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv50_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g84_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, nv50_gpio_new },
+	.i2c      = { 0x00000001, nv50_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g84_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g84_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.bsp      = { 0x00000001, g84_bsp_new },
+	.cipher   = { 0x00000001, g84_cipher_new },
+	.disp     = { 0x00000001, g84_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, g84_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
+	.vp       = { 0x00000001, g84_vp_new },
 };
 
 static const struct nvkm_device_chip
 nv86_chipset = {
 	.name = "G86",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = nv50_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g84_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = nv50_gpio_new,
-	.i2c = nv50_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g84_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g84_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.bsp = g84_bsp_new,
-	.cipher = g84_cipher_new,
-	.disp = g84_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = g84_gr_new,
-	.mpeg = g84_mpeg_new,
-	.pm = g84_pm_new,
-	.sw = nv50_sw_new,
-	.vp = g84_vp_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv50_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g84_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, nv50_gpio_new },
+	.i2c      = { 0x00000001, nv50_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g84_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g84_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.bsp      = { 0x00000001, g84_bsp_new },
+	.cipher   = { 0x00000001, g84_cipher_new },
+	.disp     = { 0x00000001, g84_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, g84_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
+	.vp       = { 0x00000001, g84_vp_new },
 };
 
 static const struct nvkm_device_chip
 nv92_chipset = {
 	.name = "G92",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = nv50_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g84_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = nv50_gpio_new,
-	.i2c = nv50_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g84_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g92_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.bsp = g84_bsp_new,
-	.cipher = g84_cipher_new,
-	.disp = g84_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = g84_gr_new,
-	.mpeg = g84_mpeg_new,
-	.pm = g84_pm_new,
-	.sw = nv50_sw_new,
-	.vp = g84_vp_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, nv50_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g84_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, nv50_gpio_new },
+	.i2c      = { 0x00000001, nv50_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g84_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g92_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.bsp      = { 0x00000001, g84_bsp_new },
+	.cipher   = { 0x00000001, g84_cipher_new },
+	.disp     = { 0x00000001, g84_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, g84_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
+	.vp       = { 0x00000001, g84_vp_new },
 };
 
 static const struct nvkm_device_chip
 nv94_chipset = {
 	.name = "G94",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g84_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g84_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.bsp = g84_bsp_new,
-	.cipher = g84_cipher_new,
-	.disp = g94_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = g84_gr_new,
-	.mpeg = g84_mpeg_new,
-	.pm = g84_pm_new,
-	.sw = nv50_sw_new,
-	.vp = g84_vp_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g84_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g84_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.bsp      = { 0x00000001, g84_bsp_new },
+	.cipher   = { 0x00000001, g84_cipher_new },
+	.disp     = { 0x00000001, g94_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, g84_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
+	.vp       = { 0x00000001, g84_vp_new },
 };
 
 static const struct nvkm_device_chip
 nv96_chipset = {
 	.name = "G96",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g84_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g84_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.bsp = g84_bsp_new,
-	.cipher = g84_cipher_new,
-	.disp = g94_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = g84_gr_new,
-	.mpeg = g84_mpeg_new,
-	.pm = g84_pm_new,
-	.sw = nv50_sw_new,
-	.vp = g84_vp_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g84_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g84_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.bsp      = { 0x00000001, g84_bsp_new },
+	.cipher   = { 0x00000001, g84_cipher_new },
+	.disp     = { 0x00000001, g94_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, g84_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
+	.vp       = { 0x00000001, g84_vp_new },
 };
 
 static const struct nvkm_device_chip
 nv98_chipset = {
 	.name = "G98",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g98_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = g94_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = g84_gr_new,
-	.mspdec = g98_mspdec_new,
-	.msppp = g98_msppp_new,
-	.msvld = g98_msvld_new,
-	.pm = g84_pm_new,
-	.sec = g98_sec_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g98_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g98_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, g94_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, g84_gr_new },
+	.mspdec   = { 0x00000001, g98_mspdec_new },
+	.msppp    = { 0x00000001, g98_msppp_new },
+	.msvld    = { 0x00000001, g98_msvld_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sec      = { 0x00000001, g98_sec_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nva0_chipset = {
 	.name = "GT200",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = g84_clk_new,
-	.devinit = g84_devinit_new,
-	.fb = g84_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = nv50_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g84_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.bsp = g84_bsp_new,
-	.cipher = g84_cipher_new,
-	.disp = gt200_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = gt200_gr_new,
-	.mpeg = g84_mpeg_new,
-	.pm = gt200_pm_new,
-	.sw = nv50_sw_new,
-	.vp = g84_vp_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, g84_clk_new },
+	.devinit  = { 0x00000001, g84_devinit_new },
+	.fb       = { 0x00000001, g84_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, nv50_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g84_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.bsp      = { 0x00000001, g84_bsp_new },
+	.cipher   = { 0x00000001, g84_cipher_new },
+	.disp     = { 0x00000001, gt200_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, gt200_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.pm       = { 0x00000001, gt200_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
+	.vp       = { 0x00000001, g84_vp_new },
 };
 
 static const struct nvkm_device_chip
 nva3_chipset = {
 	.name = "GT215",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = gt215_clk_new,
-	.devinit = gt215_devinit_new,
-	.fb = gt215_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = gt215_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.pmu = gt215_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.ce[0] = gt215_ce_new,
-	.disp = gt215_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = gt215_gr_new,
-	.mpeg = g84_mpeg_new,
-	.mspdec = gt215_mspdec_new,
-	.msppp = gt215_msppp_new,
-	.msvld = gt215_msvld_new,
-	.pm = gt215_pm_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, gt215_clk_new },
+	.devinit  = { 0x00000001, gt215_devinit_new },
+	.fb       = { 0x00000001, gt215_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, gt215_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.pmu      = { 0x00000001, gt215_pmu_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.ce       = { 0x00000001, gt215_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, gt215_gr_new },
+	.mpeg     = { 0x00000001, g84_mpeg_new },
+	.mspdec   = { 0x00000001, gt215_mspdec_new },
+	.msppp    = { 0x00000001, gt215_msppp_new },
+	.msvld    = { 0x00000001, gt215_msvld_new },
+	.pm       = { 0x00000001, gt215_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nva5_chipset = {
 	.name = "GT216",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = gt215_clk_new,
-	.devinit = gt215_devinit_new,
-	.fb = gt215_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = gt215_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.pmu = gt215_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.ce[0] = gt215_ce_new,
-	.disp = gt215_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = gt215_gr_new,
-	.mspdec = gt215_mspdec_new,
-	.msppp = gt215_msppp_new,
-	.msvld = gt215_msvld_new,
-	.pm = gt215_pm_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, gt215_clk_new },
+	.devinit  = { 0x00000001, gt215_devinit_new },
+	.fb       = { 0x00000001, gt215_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, gt215_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.pmu      = { 0x00000001, gt215_pmu_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.ce       = { 0x00000001, gt215_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, gt215_gr_new },
+	.mspdec   = { 0x00000001, gt215_mspdec_new },
+	.msppp    = { 0x00000001, gt215_msppp_new },
+	.msvld    = { 0x00000001, gt215_msvld_new },
+	.pm       = { 0x00000001, gt215_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nva8_chipset = {
 	.name = "GT218",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = gt215_clk_new,
-	.devinit = gt215_devinit_new,
-	.fb = gt215_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = gt215_mc_new,
-	.mmu = g84_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.pmu = gt215_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.ce[0] = gt215_ce_new,
-	.disp = gt215_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = gt215_gr_new,
-	.mspdec = gt215_mspdec_new,
-	.msppp = gt215_msppp_new,
-	.msvld = gt215_msvld_new,
-	.pm = gt215_pm_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, gt215_clk_new },
+	.devinit  = { 0x00000001, gt215_devinit_new },
+	.fb       = { 0x00000001, gt215_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, gt215_mc_new },
+	.mmu      = { 0x00000001, g84_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.pmu      = { 0x00000001, gt215_pmu_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.ce       = { 0x00000001, gt215_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, gt215_gr_new },
+	.mspdec   = { 0x00000001, gt215_mspdec_new },
+	.msppp    = { 0x00000001, gt215_msppp_new },
+	.msvld    = { 0x00000001, gt215_msvld_new },
+	.pm       = { 0x00000001, gt215_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvaa_chipset = {
 	.name = "MCP77/MCP78",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = mcp77_clk_new,
-	.devinit = g98_devinit_new,
-	.fb = mcp77_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
-	.mmu = mcp77_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = mcp77_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = gt200_gr_new,
-	.mspdec = g98_mspdec_new,
-	.msppp = g98_msppp_new,
-	.msvld = g98_msvld_new,
-	.pm = g84_pm_new,
-	.sec = g98_sec_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, mcp77_clk_new },
+	.devinit  = { 0x00000001, g98_devinit_new },
+	.fb       = { 0x00000001, mcp77_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g98_mc_new },
+	.mmu      = { 0x00000001, mcp77_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, mcp77_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, gt200_gr_new },
+	.mspdec   = { 0x00000001, g98_mspdec_new },
+	.msppp    = { 0x00000001, g98_msppp_new },
+	.msvld    = { 0x00000001, g98_msvld_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sec      = { 0x00000001, g98_sec_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvac_chipset = {
 	.name = "MCP79/MCP7A",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = mcp77_clk_new,
-	.devinit = g98_devinit_new,
-	.fb = mcp77_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
-	.mmu = mcp77_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.therm = g84_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.disp = mcp77_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = mcp79_gr_new,
-	.mspdec = g98_mspdec_new,
-	.msppp = g98_msppp_new,
-	.msvld = g98_msvld_new,
-	.pm = g84_pm_new,
-	.sec = g98_sec_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, mcp77_clk_new },
+	.devinit  = { 0x00000001, g98_devinit_new },
+	.fb       = { 0x00000001, mcp77_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, g98_mc_new },
+	.mmu      = { 0x00000001, mcp77_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.therm    = { 0x00000001, g84_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.disp     = { 0x00000001, mcp77_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, mcp79_gr_new },
+	.mspdec   = { 0x00000001, g98_mspdec_new },
+	.msppp    = { 0x00000001, g98_msppp_new },
+	.msvld    = { 0x00000001, g98_msvld_new },
+	.pm       = { 0x00000001, g84_pm_new },
+	.sec      = { 0x00000001, g98_sec_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvaf_chipset = {
 	.name = "MCP89",
-	.bar = g84_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = g94_bus_new,
-	.clk = gt215_clk_new,
-	.devinit = mcp89_devinit_new,
-	.fb = mcp89_fb_new,
-	.fuse = nv50_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.imem = nv50_instmem_new,
-	.mc = gt215_mc_new,
-	.mmu = mcp77_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = g94_pci_new,
-	.pmu = gt215_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
-	.ce[0] = gt215_ce_new,
-	.disp = mcp89_disp_new,
-	.dma = nv50_dma_new,
-	.fifo = g84_fifo_new,
-	.gr = mcp89_gr_new,
-	.mspdec = gt215_mspdec_new,
-	.msppp = gt215_msppp_new,
-	.msvld = mcp89_msvld_new,
-	.pm = gt215_pm_new,
-	.sw = nv50_sw_new,
+	.bar      = { 0x00000001, g84_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, g94_bus_new },
+	.clk      = { 0x00000001, gt215_clk_new },
+	.devinit  = { 0x00000001, mcp89_devinit_new },
+	.fb       = { 0x00000001, mcp89_fb_new },
+	.fuse     = { 0x00000001, nv50_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, gt215_mc_new },
+	.mmu      = { 0x00000001, mcp77_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, g94_pci_new },
+	.pmu      = { 0x00000001, gt215_pmu_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, nv40_volt_new },
+	.ce       = { 0x00000001, gt215_ce_new },
+	.disp     = { 0x00000001, mcp89_disp_new },
+	.dma      = { 0x00000001, nv50_dma_new },
+	.fifo     = { 0x00000001, g84_fifo_new },
+	.gr       = { 0x00000001, mcp89_gr_new },
+	.mspdec   = { 0x00000001, gt215_mspdec_new },
+	.msppp    = { 0x00000001, gt215_msppp_new },
+	.msvld    = { 0x00000001, mcp89_msvld_new },
+	.pm       = { 0x00000001, gt215_pm_new },
+	.sw       = { 0x00000001, nv50_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvc0_chipset = {
 	.name = "GF100",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf100_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.ce[1] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf100_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf100_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf100_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000003, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf100_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf100_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvc1_chipset = {
 	.name = "GF108",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf108_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf106_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf108_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf108_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf108_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf106_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000001, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf108_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf108_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvc3_chipset = {
 	.name = "GF106",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf106_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf104_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf100_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf106_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000001, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf104_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf100_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvc4_chipset = {
 	.name = "GF104",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf100_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.ce[1] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf104_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf100_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf100_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000003, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf104_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf100_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvc8_chipset = {
 	.name = "GF110",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf100_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.ce[1] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf110_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf100_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf100_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000003, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf110_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf100_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvce_chipset = {
 	.name = "GF114",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf100_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.ce[1] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf104_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf100_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf100_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000003, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf104_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf100_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvcf_chipset = {
 	.name = "GF116",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = g94_gpio_new,
-	.i2c = g94_i2c_new,
-	.ibus = gf100_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf106_pci_new,
-	.pmu = gf100_pmu_new,
-	.therm = gt215_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.disp = gt215_disp_new,
-	.dma = gf100_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf104_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf100_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, g94_gpio_new },
+	.i2c      = { 0x00000001, g94_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf106_pci_new },
+	.pmu      = { 0x00000001, gf100_pmu_new },
+	.privring = { 0x00000001, gf100_privring_new },
+	.therm    = { 0x00000001, gt215_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000001, gf100_ce_new },
+	.disp     = { 0x00000001, gt215_disp_new },
+	.dma      = { 0x00000001, gf100_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf104_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf100_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvd7_chipset = {
 	.name = "GF117",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gf119_gpio_new,
-	.i2c = gf117_i2c_new,
-	.ibus = gf117_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf106_pci_new,
-	.therm = gf119_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf117_volt_new,
-	.ce[0] = gf100_ce_new,
-	.disp = gf119_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf117_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf117_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gf119_gpio_new },
+	.i2c      = { 0x00000001, gf117_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf106_pci_new },
+	.privring = { 0x00000001, gf117_privring_new },
+	.therm    = { 0x00000001, gf119_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf117_volt_new },
+	.ce       = { 0x00000001, gf100_ce_new },
+	.disp     = { 0x00000001, gf119_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf117_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf117_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvd9_chipset = {
 	.name = "GF119",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gf100_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gf100_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gf119_gpio_new,
-	.i2c = gf119_i2c_new,
-	.ibus = gf117_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gf100_ltc_new,
-	.mc = gf100_mc_new,
-	.mmu = gf100_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gf106_pci_new,
-	.pmu = gf119_pmu_new,
-	.therm = gf119_therm_new,
-	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
-	.ce[0] = gf100_ce_new,
-	.disp = gf119_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gf100_fifo_new,
-	.gr = gf119_gr_new,
-	.mspdec = gf100_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gf100_msvld_new,
-	.pm = gf117_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gf100_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gf100_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gf119_gpio_new },
+	.i2c      = { 0x00000001, gf119_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gf100_ltc_new },
+	.mc       = { 0x00000001, gf100_mc_new },
+	.mmu      = { 0x00000001, gf100_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gf106_pci_new },
+	.pmu      = { 0x00000001, gf119_pmu_new },
+	.privring = { 0x00000001, gf117_privring_new },
+	.therm    = { 0x00000001, gf119_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.volt     = { 0x00000001, gf100_volt_new },
+	.ce       = { 0x00000001, gf100_ce_new },
+	.disp     = { 0x00000001, gf119_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gf100_fifo_new },
+	.gr       = { 0x00000001, gf119_gr_new },
+	.mspdec   = { 0x00000001, gf100_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gf100_msvld_new },
+	.pm       = { 0x00000001, gf117_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nve4_chipset = {
 	.name = "GK104",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk104_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk104_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk104_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk104_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk104_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk104_fifo_new,
-	.gr = gk104_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.pm = gk104_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk104_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk104_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk104_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk104_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk104_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk104_fifo_new },
+	.gr       = { 0x00000001, gk104_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.pm       = { 0x00000001, gk104_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nve6_chipset = {
 	.name = "GK106",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk104_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk104_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk104_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk104_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk104_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk104_fifo_new,
-	.gr = gk104_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.pm = gk104_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk104_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk104_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk104_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk104_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk104_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk104_fifo_new },
+	.gr       = { 0x00000001, gk104_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.pm       = { 0x00000001, gk104_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nve7_chipset = {
 	.name = "GK107",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk104_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk104_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk104_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk104_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk104_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk104_fifo_new,
-	.gr = gk104_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.pm = gk104_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk104_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk104_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk104_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk104_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk104_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk104_fifo_new },
+	.gr       = { 0x00000001, gk104_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.pm       = { 0x00000001, gk104_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvea_chipset = {
 	.name = "GK20A",
-	.bar = gk20a_bar_new,
-	.bus = gf100_bus_new,
-	.clk = gk20a_clk_new,
-	.fb = gk20a_fb_new,
-	.fuse = gf100_fuse_new,
-	.ibus = gk20a_ibus_new,
-	.imem = gk20a_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gk20a_mmu_new,
-	.pmu = gk20a_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.volt = gk20a_volt_new,
-	.ce[2] = gk104_ce_new,
-	.dma = gf119_dma_new,
-	.fifo = gk20a_fifo_new,
-	.gr = gk20a_gr_new,
-	.pm = gk104_pm_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gk20a_bar_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk20a_clk_new },
+	.fb       = { 0x00000001, gk20a_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.imem     = { 0x00000001, gk20a_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gk20a_mmu_new },
+	.pmu      = { 0x00000001, gk20a_pmu_new },
+	.privring = { 0x00000001, gk20a_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk20a_volt_new },
+	.ce       = { 0x00000004, gk104_ce_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk20a_fifo_new },
+	.gr       = { 0x00000001, gk20a_gr_new },
+	.pm       = { 0x00000001, gk104_pm_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvf0_chipset = {
 	.name = "GK110",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk110_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk110_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk104_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk110_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk110_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk110_fifo_new,
-	.gr = gk110_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk110_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk110_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk104_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk110_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk110_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk110_fifo_new },
+	.gr       = { 0x00000001, gk110_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nvf1_chipset = {
 	.name = "GK110B",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk110_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk110_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk104_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk110_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk110_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk110_fifo_new,
-	.gr = gk110b_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk110_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk110_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk104_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk110_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk110_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk110_fifo_new },
+	.gr       = { 0x00000001, gk110b_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv106_chipset = {
 	.name = "GK208B",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk110_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk110_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk208_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk110_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk208_fifo_new,
-	.gr = gk208_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk110_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk110_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk208_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk110_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk208_fifo_new },
+	.gr       = { 0x00000001, gk208_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv108_chipset = {
 	.name = "GK208",
-	.bar = gf100_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gf100_devinit_new,
-	.fb = gk110_fb_new,
-	.fuse = gf100_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk110_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gk104_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gk208_pmu_new,
-	.therm = gk104_therm_new,
-	.timer = nv41_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[1] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
-	.disp = gk110_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gk208_fifo_new,
-	.gr = gk208_gr_new,
-	.mspdec = gk104_mspdec_new,
-	.msppp = gf100_msppp_new,
-	.msvld = gk104_msvld_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gf100_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gf100_devinit_new },
+	.fb       = { 0x00000001, gk110_fb_new },
+	.fuse     = { 0x00000001, gf100_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk110_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gk104_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gk208_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gk104_therm_new },
+	.timer    = { 0x00000001, nv41_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gk104_ce_new },
+	.disp     = { 0x00000001, gk110_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gk208_fifo_new },
+	.gr       = { 0x00000001, gk208_gr_new },
+	.mspdec   = { 0x00000001, gk104_mspdec_new },
+	.msppp    = { 0x00000001, gf100_msppp_new },
+	.msvld    = { 0x00000001, gk104_msvld_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv117_chipset = {
 	.name = "GM107",
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gm107_devinit_new,
-	.fb = gm107_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk110_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gm107_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gm107_pmu_new,
-	.therm = gm107_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gm107_ce_new,
-	.ce[2] = gm107_ce_new,
-	.disp = gm107_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gm107_fifo_new,
-	.gr = gm107_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gm107_devinit_new },
+	.fb       = { 0x00000001, gm107_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk110_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gm107_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gm107_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gm107_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000005, gm107_ce_new },
+	.disp     = { 0x00000001, gm107_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gm107_fifo_new },
+	.gr       = { 0x00000001, gm107_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv118_chipset = {
 	.name = "GM108",
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.clk = gk104_clk_new,
-	.devinit = gm107_devinit_new,
-	.fb = gm107_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gk110_i2c_new,
-	.ibus = gk104_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gm107_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gk104_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gm107_pmu_new,
-	.therm = gm107_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gm107_ce_new,
-	.ce[2] = gm107_ce_new,
-	.disp = gm107_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gm107_fifo_new,
-	.gr = gm107_gr_new,
-	.sw = gf100_sw_new,
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gk104_clk_new },
+	.devinit  = { 0x00000001, gm107_devinit_new },
+	.fb       = { 0x00000001, gm107_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gk110_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gm107_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gk104_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gm107_pmu_new },
+	.privring = { 0x00000001, gk104_privring_new },
+	.therm    = { 0x00000001, gm107_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000005, gm107_ce_new },
+	.disp     = { 0x00000001, gm107_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gm107_fifo_new },
+	.gr       = { 0x00000001, gm107_gr_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv120_chipset = {
 	.name = "GM200",
-	.acr = gm200_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fb = gm200_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gm200_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gm200_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gm200_pmu_new,
-	.therm = gm200_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gm200_ce_new,
-	.ce[1] = gm200_ce_new,
-	.ce[2] = gm200_ce_new,
-	.disp = gm200_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gm200_fifo_new,
-	.gr = gm200_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gm200_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fb       = { 0x00000001, gm200_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gm200_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gm200_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gm200_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gm200_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gm200_ce_new },
+	.disp     = { 0x00000001, gm200_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gm200_fifo_new },
+	.gr       = { 0x00000001, gm200_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000003, gm107_nvenc_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv124_chipset = {
 	.name = "GM204",
-	.acr = gm200_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fb = gm200_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gm200_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gm200_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gm200_pmu_new,
-	.therm = gm200_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gm200_ce_new,
-	.ce[1] = gm200_ce_new,
-	.ce[2] = gm200_ce_new,
-	.disp = gm200_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gm200_fifo_new,
-	.gr = gm200_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gm200_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fb       = { 0x00000001, gm200_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gm200_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gm200_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gm200_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gm200_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gm200_ce_new },
+	.disp     = { 0x00000001, gm200_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gm200_fifo_new },
+	.gr       = { 0x00000001, gm200_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000003, gm107_nvenc_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv126_chipset = {
 	.name = "GM206",
-	.acr = gm200_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fb = gm200_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.iccsense = gf100_iccsense_new,
-	.imem = nv50_instmem_new,
-	.ltc = gm200_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gm200_mmu_new,
-	.mxm = nv50_mxm_new,
-	.pci = gk104_pci_new,
-	.pmu = gm200_pmu_new,
-	.therm = gm200_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.volt = gk104_volt_new,
-	.ce[0] = gm200_ce_new,
-	.ce[1] = gm200_ce_new,
-	.ce[2] = gm200_ce_new,
-	.disp = gm200_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gm200_fifo_new,
-	.gr = gm200_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gm200_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fb       = { 0x00000001, gm200_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.iccsense = { 0x00000001, gf100_iccsense_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gm200_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gm200_mmu_new },
+	.mxm      = { 0x00000001, nv50_mxm_new },
+	.pci      = { 0x00000001, gk104_pci_new },
+	.pmu      = { 0x00000001, gm200_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gm200_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gk104_volt_new },
+	.ce       = { 0x00000007, gm200_ce_new },
+	.disp     = { 0x00000001, gm200_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gm200_fifo_new },
+	.gr       = { 0x00000001, gm200_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv12b_chipset = {
 	.name = "GM20B",
-	.acr = gm20b_acr_new,
-	.bar = gm20b_bar_new,
-	.bus = gf100_bus_new,
-	.clk = gm20b_clk_new,
-	.fb = gm20b_fb_new,
-	.fuse = gm107_fuse_new,
-	.ibus = gk20a_ibus_new,
-	.imem = gk20a_instmem_new,
-	.ltc = gm200_ltc_new,
-	.mc = gk20a_mc_new,
-	.mmu = gm20b_mmu_new,
-	.pmu = gm20b_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[2] = gm200_ce_new,
-	.volt = gm20b_volt_new,
-	.dma = gf119_dma_new,
-	.fifo = gm20b_fifo_new,
-	.gr = gm20b_gr_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gm20b_acr_new },
+	.bar      = { 0x00000001, gm20b_bar_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.clk      = { 0x00000001, gm20b_clk_new },
+	.fb       = { 0x00000001, gm20b_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.imem     = { 0x00000001, gk20a_instmem_new },
+	.ltc      = { 0x00000001, gm200_ltc_new },
+	.mc       = { 0x00000001, gk20a_mc_new },
+	.mmu      = { 0x00000001, gm20b_mmu_new },
+	.pmu      = { 0x00000001, gm20b_pmu_new },
+	.privring = { 0x00000001, gk20a_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.volt     = { 0x00000001, gm20b_volt_new },
+	.ce       = { 0x00000004, gm200_ce_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gm20b_fifo_new },
+	.gr       = { 0x00000001, gm20b_gr_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv130_chipset = {
 	.name = "GP100",
-	.acr = gm200_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fault = gp100_fault_new,
-	.fb = gp100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp100_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gp100_mmu_new,
-	.therm = gp100_therm_new,
-	.pci = gp100_pci_new,
-	.pmu = gm200_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp100_ce_new,
-	.ce[1] = gp100_ce_new,
-	.ce[2] = gp100_ce_new,
-	.ce[3] = gp100_ce_new,
-	.ce[4] = gp100_ce_new,
-	.ce[5] = gp100_ce_new,
-	.dma = gf119_dma_new,
-	.disp = gp100_disp_new,
-	.fifo = gp100_fifo_new,
-	.gr = gp100_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.nvenc[2] = gm107_nvenc_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gm200_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fault    = { 0x00000001, gp100_fault_new },
+	.fb       = { 0x00000001, gp100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp100_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gp100_mmu_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gm200_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000003f, gp100_ce_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.disp     = { 0x00000001, gp100_disp_new },
+	.fifo     = { 0x00000001, gp100_fifo_new },
+	.gr       = { 0x00000001, gp100_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000007, gm107_nvenc_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv132_chipset = {
 	.name = "GP102",
-	.acr = gp102_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fault = gp100_fault_new,
-	.fb = gp102_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gp100_mmu_new,
-	.therm = gp100_therm_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp102_ce_new,
-	.ce[1] = gp102_ce_new,
-	.ce[2] = gp102_ce_new,
-	.ce[3] = gp102_ce_new,
-	.disp = gp102_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gp100_fifo_new,
-	.gr = gp102_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.sec2 = gp102_sec2_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gp102_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fault    = { 0x00000001, gp100_fault_new },
+	.fb       = { 0x00000001, gp102_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gp100_mmu_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000000f, gp102_ce_new },
+	.disp     = { 0x00000001, gp102_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gp100_fifo_new },
+	.gr       = { 0x00000001, gp102_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000003, gm107_nvenc_new },
+	.sec2     = { 0x00000001, gp102_sec2_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv134_chipset = {
 	.name = "GP104",
-	.acr = gp102_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fault = gp100_fault_new,
-	.fb = gp102_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gp100_mmu_new,
-	.therm = gp100_therm_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp102_ce_new,
-	.ce[1] = gp102_ce_new,
-	.ce[2] = gp102_ce_new,
-	.ce[3] = gp102_ce_new,
-	.disp = gp102_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gp100_fifo_new,
-	.gr = gp104_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.sec2 = gp102_sec2_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gp102_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fault    = { 0x00000001, gp100_fault_new },
+	.fb       = { 0x00000001, gp102_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gp100_mmu_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000000f, gp102_ce_new },
+	.disp     = { 0x00000001, gp102_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gp100_fifo_new },
+	.gr       = { 0x00000001, gp104_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000003, gm107_nvenc_new },
+	.sec2     = { 0x00000001, gp102_sec2_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv136_chipset = {
 	.name = "GP106",
-	.acr = gp102_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fault = gp100_fault_new,
-	.fb = gp102_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gp100_mmu_new,
-	.therm = gp100_therm_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp102_ce_new,
-	.ce[1] = gp102_ce_new,
-	.ce[2] = gp102_ce_new,
-	.ce[3] = gp102_ce_new,
-	.disp = gp102_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gp100_fifo_new,
-	.gr = gp104_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sec2 = gp102_sec2_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gp102_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fault    = { 0x00000001, gp100_fault_new },
+	.fb       = { 0x00000001, gp102_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gp100_mmu_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000000f, gp102_ce_new },
+	.disp     = { 0x00000001, gp102_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gp100_fifo_new },
+	.gr       = { 0x00000001, gp104_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sec2     = { 0x00000001, gp102_sec2_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv137_chipset = {
 	.name = "GP107",
-	.acr = gp102_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fault = gp100_fault_new,
-	.fb = gp102_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gp100_mmu_new,
-	.therm = gp100_therm_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp102_ce_new,
-	.ce[1] = gp102_ce_new,
-	.ce[2] = gp102_ce_new,
-	.ce[3] = gp102_ce_new,
-	.disp = gp102_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gp100_fifo_new,
-	.gr = gp107_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.sec2 = gp102_sec2_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gp102_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fault    = { 0x00000001, gp100_fault_new },
+	.fb       = { 0x00000001, gp102_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gp100_mmu_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000000f, gp102_ce_new },
+	.disp     = { 0x00000001, gp102_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gp100_fifo_new },
+	.gr       = { 0x00000001, gp107_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000003, gm107_nvenc_new },
+	.sec2     = { 0x00000001, gp102_sec2_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv138_chipset = {
 	.name = "GP108",
-	.acr = gp108_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gm200_devinit_new,
-	.fault = gp100_fault_new,
-	.fb = gp102_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gp100_mmu_new,
-	.therm = gp100_therm_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp102_ce_new,
-	.ce[1] = gp102_ce_new,
-	.ce[2] = gp102_ce_new,
-	.ce[3] = gp102_ce_new,
-	.disp = gp102_disp_new,
-	.dma = gf119_dma_new,
-	.fifo = gp100_fifo_new,
-	.gr = gp108_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.sec2 = gp108_sec2_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gp108_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gm200_devinit_new },
+	.fault    = { 0x00000001, gp100_fault_new },
+	.fb       = { 0x00000001, gp102_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gp100_mmu_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000000f, gp102_ce_new },
+	.disp     = { 0x00000001, gp102_disp_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gp100_fifo_new },
+	.gr       = { 0x00000001, gp108_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.sec2     = { 0x00000001, gp108_sec2_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv13b_chipset = {
 	.name = "GP10B",
-	.acr = gp10b_acr_new,
-	.bar = gm20b_bar_new,
-	.bus = gf100_bus_new,
-	.fault = gp10b_fault_new,
-	.fb = gp10b_fb_new,
-	.fuse = gm107_fuse_new,
-	.ibus = gp10b_ibus_new,
-	.imem = gk20a_instmem_new,
-	.ltc = gp10b_ltc_new,
-	.mc = gp10b_mc_new,
-	.mmu = gp10b_mmu_new,
-	.pmu = gp10b_pmu_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = gp100_ce_new,
-	.dma = gf119_dma_new,
-	.fifo = gp10b_fifo_new,
-	.gr = gp10b_gr_new,
-	.sw = gf100_sw_new,
+	.acr      = { 0x00000001, gp10b_acr_new },
+	.bar      = { 0x00000001, gm20b_bar_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.fault    = { 0x00000001, gp10b_fault_new },
+	.fb       = { 0x00000001, gp10b_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.imem     = { 0x00000001, gk20a_instmem_new },
+	.ltc      = { 0x00000001, gp10b_ltc_new },
+	.mc       = { 0x00000001, gp10b_mc_new },
+	.mmu      = { 0x00000001, gp10b_mmu_new },
+	.pmu      = { 0x00000001, gp10b_pmu_new },
+	.privring = { 0x00000001, gp10b_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x00000001, gp100_ce_new },
+	.dma      = { 0x00000001, gf119_dma_new },
+	.fifo     = { 0x00000001, gp10b_fifo_new },
+	.gr       = { 0x00000001, gp10b_gr_new },
+	.sw       = { 0x00000001, gf100_sw_new },
 };
 
 static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
-	.acr = gp108_acr_new,
-	.bar = gm107_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = gv100_devinit_new,
-	.fault = gv100_fault_new,
-	.fb = gv100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.gsp = gv100_gsp_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = gp100_mc_new,
-	.mmu = gv100_mmu_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.therm = gp100_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.disp = gv100_disp_new,
-	.ce[0] = gv100_ce_new,
-	.ce[1] = gv100_ce_new,
-	.ce[2] = gv100_ce_new,
-	.ce[3] = gv100_ce_new,
-	.ce[4] = gv100_ce_new,
-	.ce[5] = gv100_ce_new,
-	.ce[6] = gv100_ce_new,
-	.ce[7] = gv100_ce_new,
-	.ce[8] = gv100_ce_new,
-	.dma = gv100_dma_new,
-	.fifo = gv100_fifo_new,
-	.gr = gv100_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.nvenc[1] = gm107_nvenc_new,
-	.nvenc[2] = gm107_nvenc_new,
-	.sec2 = gp108_sec2_new,
+	.acr      = { 0x00000001, gp108_acr_new },
+	.bar      = { 0x00000001, gm107_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, gv100_devinit_new },
+	.fault    = { 0x00000001, gv100_fault_new },
+	.fb       = { 0x00000001, gv100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.gsp      = { 0x00000001, gv100_gsp_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, gp100_mc_new },
+	.mmu      = { 0x00000001, gv100_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x000001ff, gv100_ce_new },
+	.disp     = { 0x00000001, gv100_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
+	.fifo     = { 0x00000001, gv100_fifo_new },
+	.gr       = { 0x00000001, gv100_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000007, gm107_nvenc_new },
+	.sec2     = { 0x00000001, gp108_sec2_new },
 };
 
 static const struct nvkm_device_chip
 nv162_chipset = {
 	.name = "TU102",
-	.acr = tu102_acr_new,
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = tu102_devinit_new,
-	.fault = tu102_fault_new,
-	.fb = gv100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.gsp = gv100_gsp_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = tu102_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.therm = gp100_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = tu102_ce_new,
-	.ce[1] = tu102_ce_new,
-	.ce[2] = tu102_ce_new,
-	.ce[3] = tu102_ce_new,
-	.ce[4] = tu102_ce_new,
-	.disp = tu102_disp_new,
-	.dma = gv100_dma_new,
-	.fifo = tu102_fifo_new,
-	.gr = tu102_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sec2 = tu102_sec2_new,
+	.acr      = { 0x00000001, tu102_acr_new },
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, tu102_devinit_new },
+	.fault    = { 0x00000001, tu102_fault_new },
+	.fb       = { 0x00000001, gv100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.gsp      = { 0x00000001, gv100_gsp_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, tu102_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000001f, tu102_ce_new },
+	.disp     = { 0x00000001, tu102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
+	.fifo     = { 0x00000001, tu102_fifo_new },
+	.gr       = { 0x00000001, tu102_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sec2     = { 0x00000001, tu102_sec2_new },
 };
 
 static const struct nvkm_device_chip
 nv164_chipset = {
 	.name = "TU104",
-	.acr = tu102_acr_new,
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = tu102_devinit_new,
-	.fault = tu102_fault_new,
-	.fb = gv100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.gsp = gv100_gsp_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = tu102_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.therm = gp100_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = tu102_ce_new,
-	.ce[1] = tu102_ce_new,
-	.ce[2] = tu102_ce_new,
-	.ce[3] = tu102_ce_new,
-	.ce[4] = tu102_ce_new,
-	.disp = tu102_disp_new,
-	.dma = gv100_dma_new,
-	.fifo = tu102_fifo_new,
-	.gr = tu102_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvdec[1] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sec2 = tu102_sec2_new,
+	.acr      = { 0x00000001, tu102_acr_new },
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, tu102_devinit_new },
+	.fault    = { 0x00000001, tu102_fault_new },
+	.fb       = { 0x00000001, gv100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.gsp      = { 0x00000001, gv100_gsp_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, tu102_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000001f, tu102_ce_new },
+	.disp     = { 0x00000001, tu102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
+	.fifo     = { 0x00000001, tu102_fifo_new },
+	.gr       = { 0x00000001, tu102_gr_new },
+	.nvdec    = { 0x00000003, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sec2     = { 0x00000001, tu102_sec2_new },
 };
 
 static const struct nvkm_device_chip
 nv166_chipset = {
 	.name = "TU106",
-	.acr = tu102_acr_new,
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = tu102_devinit_new,
-	.fault = tu102_fault_new,
-	.fb = gv100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.gsp = gv100_gsp_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = tu102_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.therm = gp100_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = tu102_ce_new,
-	.ce[1] = tu102_ce_new,
-	.ce[2] = tu102_ce_new,
-	.ce[3] = tu102_ce_new,
-	.ce[4] = tu102_ce_new,
-	.disp = tu102_disp_new,
-	.dma = gv100_dma_new,
-	.fifo = tu102_fifo_new,
-	.gr = tu102_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvdec[1] = gm107_nvdec_new,
-	.nvdec[2] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sec2 = tu102_sec2_new,
+	.acr      = { 0x00000001, tu102_acr_new },
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, tu102_devinit_new },
+	.fault    = { 0x00000001, tu102_fault_new },
+	.fb       = { 0x00000001, gv100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.gsp      = { 0x00000001, gv100_gsp_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, tu102_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000001f, tu102_ce_new },
+	.disp     = { 0x00000001, tu102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
+	.fifo     = { 0x00000001, tu102_fifo_new },
+	.gr       = { 0x00000001, tu102_gr_new },
+	.nvdec    = { 0x00000007, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sec2     = { 0x00000001, tu102_sec2_new },
 };
 
 static const struct nvkm_device_chip
 nv167_chipset = {
 	.name = "TU117",
-	.acr = tu102_acr_new,
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = tu102_devinit_new,
-	.fault = tu102_fault_new,
-	.fb = gv100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.gsp = gv100_gsp_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = tu102_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.therm = gp100_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = tu102_ce_new,
-	.ce[1] = tu102_ce_new,
-	.ce[2] = tu102_ce_new,
-	.ce[3] = tu102_ce_new,
-	.ce[4] = tu102_ce_new,
-	.disp = tu102_disp_new,
-	.dma = gv100_dma_new,
-	.fifo = tu102_fifo_new,
-	.gr = tu102_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sec2 = tu102_sec2_new,
+	.acr      = { 0x00000001, tu102_acr_new },
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, tu102_devinit_new },
+	.fault    = { 0x00000001, tu102_fault_new },
+	.fb       = { 0x00000001, gv100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.gsp      = { 0x00000001, gv100_gsp_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, tu102_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000001f, tu102_ce_new },
+	.disp     = { 0x00000001, tu102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
+	.fifo     = { 0x00000001, tu102_fifo_new },
+	.gr       = { 0x00000001, tu102_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sec2     = { 0x00000001, tu102_sec2_new },
 };
 
 static const struct nvkm_device_chip
 nv168_chipset = {
 	.name = "TU116",
-	.acr = tu102_acr_new,
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.bus = gf100_bus_new,
-	.devinit = tu102_devinit_new,
-	.fault = tu102_fault_new,
-	.fb = gv100_fb_new,
-	.fuse = gm107_fuse_new,
-	.gpio = gk104_gpio_new,
-	.gsp = gv100_gsp_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.ltc = gp102_ltc_new,
-	.mc = tu102_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.pmu = gp102_pmu_new,
-	.therm = gp100_therm_new,
-	.timer = gk20a_timer_new,
-	.top = gk104_top_new,
-	.ce[0] = tu102_ce_new,
-	.ce[1] = tu102_ce_new,
-	.ce[2] = tu102_ce_new,
-	.ce[3] = tu102_ce_new,
-	.ce[4] = tu102_ce_new,
-	.disp = tu102_disp_new,
-	.dma = gv100_dma_new,
-	.fifo = tu102_fifo_new,
-	.gr = tu102_gr_new,
-	.nvdec[0] = gm107_nvdec_new,
-	.nvenc[0] = gm107_nvenc_new,
-	.sec2 = tu102_sec2_new,
+	.acr      = { 0x00000001, tu102_acr_new },
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.bus      = { 0x00000001, gf100_bus_new },
+	.devinit  = { 0x00000001, tu102_devinit_new },
+	.fault    = { 0x00000001, tu102_fault_new },
+	.fb       = { 0x00000001, gv100_fb_new },
+	.fuse     = { 0x00000001, gm107_fuse_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.gsp      = { 0x00000001, gv100_gsp_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.ltc      = { 0x00000001, gp102_ltc_new },
+	.mc       = { 0x00000001, tu102_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.pmu      = { 0x00000001, gp102_pmu_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.therm    = { 0x00000001, gp100_therm_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, gk104_top_new },
+	.ce       = { 0x0000001f, tu102_ce_new },
+	.disp     = { 0x00000001, tu102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
+	.fifo     = { 0x00000001, tu102_fifo_new },
+	.gr       = { 0x00000001, tu102_gr_new },
+	.nvdec    = { 0x00000001, gm107_nvdec_new },
+	.nvenc    = { 0x00000001, gm107_nvenc_new },
+	.sec2     = { 0x00000001, tu102_sec2_new },
 };
 
 static const struct nvkm_device_chip
 nv170_chipset = {
 	.name = "GA100",
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.devinit = ga100_devinit_new,
-	.fb = ga100_fb_new,
-	.gpio = gk104_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.mc = ga100_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.timer = gk20a_timer_new,
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.devinit  = { 0x00000001, ga100_devinit_new },
+	.fb       = { 0x00000001, ga100_fb_new },
+	.gpio     = { 0x00000001, gk104_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, ga100_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, ga100_top_new },
 };
 
 static const struct nvkm_device_chip
 nv172_chipset = {
 	.name = "GA102",
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.devinit = ga100_devinit_new,
-	.fb = ga102_fb_new,
-	.gpio = ga102_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.mc = ga100_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.timer = gk20a_timer_new,
-	.disp = ga102_disp_new,
-	.dma = gv100_dma_new,
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.devinit  = { 0x00000001, ga100_devinit_new },
+	.fb       = { 0x00000001, ga102_fb_new },
+	.gpio     = { 0x00000001, ga102_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, ga100_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, ga100_top_new },
+	.disp     = { 0x00000001, ga102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
 };
 
 static const struct nvkm_device_chip
 nv174_chipset = {
 	.name = "GA104",
-	.bar = tu102_bar_new,
-	.bios = nvkm_bios_new,
-	.devinit = ga100_devinit_new,
-	.fb = ga102_fb_new,
-	.gpio = ga102_gpio_new,
-	.i2c = gm200_i2c_new,
-	.ibus = gm200_ibus_new,
-	.imem = nv50_instmem_new,
-	.mc = ga100_mc_new,
-	.mmu = tu102_mmu_new,
-	.pci = gp100_pci_new,
-	.timer = gk20a_timer_new,
-	.disp = ga102_disp_new,
-	.dma = gv100_dma_new,
+	.bar      = { 0x00000001, tu102_bar_new },
+	.bios     = { 0x00000001, nvkm_bios_new },
+	.devinit  = { 0x00000001, ga100_devinit_new },
+	.fb       = { 0x00000001, ga102_fb_new },
+	.gpio     = { 0x00000001, ga102_gpio_new },
+	.i2c      = { 0x00000001, gm200_i2c_new },
+	.imem     = { 0x00000001, nv50_instmem_new },
+	.mc       = { 0x00000001, ga100_mc_new },
+	.mmu      = { 0x00000001, tu102_mmu_new },
+	.pci      = { 0x00000001, gp100_pci_new },
+	.privring = { 0x00000001, gm200_privring_new },
+	.timer    = { 0x00000001, gk20a_timer_new },
+	.top      = { 0x00000001, ga100_top_new },
+	.disp     = { 0x00000001, ga102_disp_new },
+	.dma      = { 0x00000001, gv100_dma_new },
 };
 
 static int
@@ -2726,97 +2643,24 @@ nvkm_device_event_func = {
 };
 
 struct nvkm_subdev *
-nvkm_device_subdev(struct nvkm_device *device, int index)
+nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
 {
-	struct nvkm_engine *engine;
-
-	if (device->disable_mask & (1ULL << index))
-		return NULL;
-
-	switch (index) {
-#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
-	_(ACR     , device->acr     , &device->acr->subdev);
-	_(BAR     , device->bar     , &device->bar->subdev);
-	_(VBIOS   , device->bios    , &device->bios->subdev);
-	_(BUS     , device->bus     , &device->bus->subdev);
-	_(CLK     , device->clk     , &device->clk->subdev);
-	_(DEVINIT , device->devinit , &device->devinit->subdev);
-	_(FAULT   , device->fault   , &device->fault->subdev);
-	_(FB      , device->fb      , &device->fb->subdev);
-	_(FUSE    , device->fuse    , &device->fuse->subdev);
-	_(GPIO    , device->gpio    , &device->gpio->subdev);
-	_(GSP     , device->gsp     , &device->gsp->subdev);
-	_(I2C     , device->i2c     , &device->i2c->subdev);
-	_(IBUS    , device->ibus    ,  device->ibus);
-	_(ICCSENSE, device->iccsense, &device->iccsense->subdev);
-	_(INSTMEM , device->imem    , &device->imem->subdev);
-	_(LTC     , device->ltc     , &device->ltc->subdev);
-	_(MC      , device->mc      , &device->mc->subdev);
-	_(MMU     , device->mmu     , &device->mmu->subdev);
-	_(MXM     , device->mxm     ,  device->mxm);
-	_(PCI     , device->pci     , &device->pci->subdev);
-	_(PMU     , device->pmu     , &device->pmu->subdev);
-	_(THERM   , device->therm   , &device->therm->subdev);
-	_(TIMER   , device->timer   , &device->timer->subdev);
-	_(TOP     , device->top     , &device->top->subdev);
-	_(VOLT    , device->volt    , &device->volt->subdev);
-#undef _
-	default:
-		engine = nvkm_device_engine(device, index);
-		if (engine)
-			return &engine->subdev;
-		break;
+	struct nvkm_subdev *subdev;
+
+	list_for_each_entry(subdev, &device->subdev, head) {
+		if (subdev->type == type && subdev->inst == inst)
+			return subdev;
 	}
+
 	return NULL;
 }
 
 struct nvkm_engine *
-nvkm_device_engine(struct nvkm_device *device, int index)
+nvkm_device_engine(struct nvkm_device *device, int type, int inst)
 {
-	if (device->disable_mask & (1ULL << index))
-		return NULL;
-
-	switch (index) {
-#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
-	_(BSP    , device->bsp     ,  device->bsp);
-	_(CE0    , device->ce[0]   ,  device->ce[0]);
-	_(CE1    , device->ce[1]   ,  device->ce[1]);
-	_(CE2    , device->ce[2]   ,  device->ce[2]);
-	_(CE3    , device->ce[3]   ,  device->ce[3]);
-	_(CE4    , device->ce[4]   ,  device->ce[4]);
-	_(CE5    , device->ce[5]   ,  device->ce[5]);
-	_(CE6    , device->ce[6]   ,  device->ce[6]);
-	_(CE7    , device->ce[7]   ,  device->ce[7]);
-	_(CE8    , device->ce[8]   ,  device->ce[8]);
-	_(CIPHER , device->cipher  ,  device->cipher);
-	_(DISP   , device->disp    , &device->disp->engine);
-	_(DMAOBJ , device->dma     , &device->dma->engine);
-	_(FIFO   , device->fifo    , &device->fifo->engine);
-	_(GR     , device->gr      , &device->gr->engine);
-	_(IFB    , device->ifb     ,  device->ifb);
-	_(ME     , device->me      ,  device->me);
-	_(MPEG   , device->mpeg    ,  device->mpeg);
-	_(MSENC  , device->msenc   ,  device->msenc);
-	_(MSPDEC , device->mspdec  ,  device->mspdec);
-	_(MSPPP  , device->msppp   ,  device->msppp);
-	_(MSVLD  , device->msvld   ,  device->msvld);
-	_(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
-	_(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
-	_(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
-	_(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
-	_(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
-	_(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
-	_(PM     , device->pm      , &device->pm->engine);
-	_(SEC    , device->sec     ,  device->sec);
-	_(SEC2   , device->sec2    , &device->sec2->engine);
-	_(SW     , device->sw      , &device->sw->engine);
-	_(VIC    , device->vic     ,  device->vic);
-	_(VP     , device->vp      ,  device->vp);
-#undef _
-	default:
-		WARN_ON(1);
-		break;
-	}
+	struct nvkm_subdev *subdev = nvkm_device_subdev(device, type, inst);
+	if (subdev && subdev->func == &nvkm_engine)
+		return container_of(subdev, struct nvkm_engine, subdev);
 	return NULL;
 }
 
@@ -2825,7 +2669,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
 {
 	const char *action = suspend ? "suspend" : "fini";
 	struct nvkm_subdev *subdev;
-	int ret, i;
+	int ret;
 	s64 time;
 
 	nvdev_trace(device, "%s running...\n", action);
@@ -2833,12 +2677,10 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
 
 	nvkm_acpi_fini(device);
 
-	for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
-		if ((subdev = nvkm_device_subdev(device, i))) {
-			ret = nvkm_subdev_fini(subdev, suspend);
-			if (ret && suspend)
-				goto fail;
-		}
+	list_for_each_entry_reverse(subdev, &device->subdev, head) {
+		ret = nvkm_subdev_fini(subdev, suspend);
+		if (ret && suspend)
+			goto fail;
 	}
 
 	nvkm_therm_clkgate_fini(device->therm, suspend);
@@ -2851,13 +2693,11 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
 	return 0;
 
 fail:
-	do {
-		if ((subdev = nvkm_device_subdev(device, i))) {
-			int rret = nvkm_subdev_init(subdev);
-			if (rret)
-				nvkm_fatal(subdev, "failed restart, %d\n", ret);
-		}
-	} while (++i < NVKM_SUBDEV_NR);
+	list_for_each_entry_from(subdev, &device->subdev, head) {
+		int rret = nvkm_subdev_init(subdev);
+		if (rret)
+			nvkm_fatal(subdev, "failed restart, %d\n", ret);
+	}
 
 	nvdev_trace(device, "%s failed with %d\n", action, ret);
 	return ret;
@@ -2867,7 +2707,7 @@ static int
 nvkm_device_preinit(struct nvkm_device *device)
 {
 	struct nvkm_subdev *subdev;
-	int ret, i;
+	int ret;
 	s64 time;
 
 	nvdev_trace(device, "preinit running...\n");
@@ -2879,15 +2719,13 @@ nvkm_device_preinit(struct nvkm_device *device)
 			goto fail;
 	}
 
-	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
-		if ((subdev = nvkm_device_subdev(device, i))) {
-			ret = nvkm_subdev_preinit(subdev);
-			if (ret)
-				goto fail;
-		}
+	list_for_each_entry(subdev, &device->subdev, head) {
+		ret = nvkm_subdev_preinit(subdev);
+		if (ret)
+			goto fail;
 	}
 
-	ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+	ret = nvkm_devinit_post(device->devinit);
 	if (ret)
 		goto fail;
 
@@ -2904,7 +2742,7 @@ int
 nvkm_device_init(struct nvkm_device *device)
 {
 	struct nvkm_subdev *subdev;
-	int ret, i;
+	int ret;
 	s64 time;
 
 	ret = nvkm_device_preinit(device);
@@ -2922,12 +2760,10 @@ nvkm_device_init(struct nvkm_device *device)
 			goto fail;
 	}
 
-	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
-		if ((subdev = nvkm_device_subdev(device, i))) {
-			ret = nvkm_subdev_init(subdev);
-			if (ret)
-				goto fail_subdev;
-		}
+	list_for_each_entry(subdev, &device->subdev, head) {
+		ret = nvkm_subdev_init(subdev);
+		if (ret)
+			goto fail_subdev;
 	}
 
 	nvkm_acpi_init(device);
@@ -2938,11 +2774,8 @@ nvkm_device_init(struct nvkm_device *device)
 	return 0;
 
 fail_subdev:
-	do {
-		if ((subdev = nvkm_device_subdev(device, i)))
-			nvkm_subdev_fini(subdev, false);
-	} while (--i >= 0);
-
+	list_for_each_entry_from(subdev, &device->subdev, head)
+		nvkm_subdev_fini(subdev, false);
 fail:
 	nvkm_device_fini(device, false);
 
@@ -2954,15 +2787,12 @@ void
 nvkm_device_del(struct nvkm_device **pdevice)
 {
 	struct nvkm_device *device = *pdevice;
-	int i;
+	struct nvkm_subdev *subdev, *subtmp;
 	if (device) {
 		mutex_lock(&nv_devices_mutex);
-		device->disable_mask = 0;
-		for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
-			struct nvkm_subdev *subdev =
-				nvkm_device_subdev(device, i);
+
+		list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
 			nvkm_subdev_del(&subdev);
-		}
 
 		nvkm_event_fini(&device->event);
 
@@ -3021,7 +2851,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 	struct nvkm_subdev *subdev;
 	u64 mmio_base, mmio_size;
 	u32 boot0, boot1, strap;
-	int ret = -EEXIST, i;
+	int ret = -EEXIST, j;
 	unsigned chipset;
 
 	mutex_lock(&nv_devices_mutex);
@@ -3038,6 +2868,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 	device->name = name;
 	list_add_tail(&device->head, &nv_devices);
 	device->debug = nvkm_dbgopt(device->dbgopt, "device");
+	INIT_LIST_HEAD(&device->subdev);
 
 	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
 	if (ret)
@@ -3271,88 +3102,46 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 
 	mutex_init(&device->mutex);
 
-	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
-#define _(s,m) case s:                                                         \
-	if (device->chip->m && (subdev_mask & (1ULL << (s)))) {                \
-		ret = device->chip->m(device, (s), &device->m);                \
-		if (ret) {                                                     \
-			subdev = nvkm_device_subdev(device, (s));              \
-			nvkm_subdev_del(&subdev);                              \
-			device->m = NULL;                                      \
-			if (ret != -ENODEV) {                                  \
-				nvdev_error(device, "%s ctor failed, %d\n",    \
-					    nvkm_subdev_name[s], ret);         \
-				goto done;                                     \
-			}                                                      \
-		}                                                              \
-	}                                                                      \
-	break
-		switch (i) {
-		_(NVKM_SUBDEV_ACR     ,      acr);
-		_(NVKM_SUBDEV_BAR     ,      bar);
-		_(NVKM_SUBDEV_VBIOS   ,     bios);
-		_(NVKM_SUBDEV_BUS     ,      bus);
-		_(NVKM_SUBDEV_CLK     ,      clk);
-		_(NVKM_SUBDEV_DEVINIT ,  devinit);
-		_(NVKM_SUBDEV_FAULT   ,    fault);
-		_(NVKM_SUBDEV_FB      ,       fb);
-		_(NVKM_SUBDEV_FUSE    ,     fuse);
-		_(NVKM_SUBDEV_GPIO    ,     gpio);
-		_(NVKM_SUBDEV_GSP     ,      gsp);
-		_(NVKM_SUBDEV_I2C     ,      i2c);
-		_(NVKM_SUBDEV_IBUS    ,     ibus);
-		_(NVKM_SUBDEV_ICCSENSE, iccsense);
-		_(NVKM_SUBDEV_INSTMEM ,     imem);
-		_(NVKM_SUBDEV_LTC     ,      ltc);
-		_(NVKM_SUBDEV_MC      ,       mc);
-		_(NVKM_SUBDEV_MMU     ,      mmu);
-		_(NVKM_SUBDEV_MXM     ,      mxm);
-		_(NVKM_SUBDEV_PCI     ,      pci);
-		_(NVKM_SUBDEV_PMU     ,      pmu);
-		_(NVKM_SUBDEV_THERM   ,    therm);
-		_(NVKM_SUBDEV_TIMER   ,    timer);
-		_(NVKM_SUBDEV_TOP     ,      top);
-		_(NVKM_SUBDEV_VOLT    ,     volt);
-		_(NVKM_ENGINE_BSP     ,      bsp);
-		_(NVKM_ENGINE_CE0     ,    ce[0]);
-		_(NVKM_ENGINE_CE1     ,    ce[1]);
-		_(NVKM_ENGINE_CE2     ,    ce[2]);
-		_(NVKM_ENGINE_CE3     ,    ce[3]);
-		_(NVKM_ENGINE_CE4     ,    ce[4]);
-		_(NVKM_ENGINE_CE5     ,    ce[5]);
-		_(NVKM_ENGINE_CE6     ,    ce[6]);
-		_(NVKM_ENGINE_CE7     ,    ce[7]);
-		_(NVKM_ENGINE_CE8     ,    ce[8]);
-		_(NVKM_ENGINE_CIPHER  ,   cipher);
-		_(NVKM_ENGINE_DISP    ,     disp);
-		_(NVKM_ENGINE_DMAOBJ  ,      dma);
-		_(NVKM_ENGINE_FIFO    ,     fifo);
-		_(NVKM_ENGINE_GR      ,       gr);
-		_(NVKM_ENGINE_IFB     ,      ifb);
-		_(NVKM_ENGINE_ME      ,       me);
-		_(NVKM_ENGINE_MPEG    ,     mpeg);
-		_(NVKM_ENGINE_MSENC   ,    msenc);
-		_(NVKM_ENGINE_MSPDEC  ,   mspdec);
-		_(NVKM_ENGINE_MSPPP   ,    msppp);
-		_(NVKM_ENGINE_MSVLD   ,    msvld);
-		_(NVKM_ENGINE_NVENC0  , nvenc[0]);
-		_(NVKM_ENGINE_NVENC1  , nvenc[1]);
-		_(NVKM_ENGINE_NVENC2  , nvenc[2]);
-		_(NVKM_ENGINE_NVDEC0  , nvdec[0]);
-		_(NVKM_ENGINE_NVDEC1  , nvdec[1]);
-		_(NVKM_ENGINE_NVDEC2  , nvdec[2]);
-		_(NVKM_ENGINE_PM      ,       pm);
-		_(NVKM_ENGINE_SEC     ,      sec);
-		_(NVKM_ENGINE_SEC2    ,     sec2);
-		_(NVKM_ENGINE_SW      ,       sw);
-		_(NVKM_ENGINE_VIC     ,      vic);
-		_(NVKM_ENGINE_VP      ,       vp);
-		default:
-			WARN_ON(1);
-			continue;
-		}
-#undef _
+#define NVKM_LAYOUT_ONCE(type,data,ptr)                                                      \
+	if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) {                     \
+		WARN_ON(device->chip->ptr.inst != 0x00000001);                               \
+		ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr);              \
+		subdev = nvkm_device_subdev(device, (type), 0);                              \
+		if (ret) {                                                                   \
+			nvkm_subdev_del(&subdev);                                            \
+			device->ptr = NULL;                                                  \
+			if (ret != -ENODEV) {                                                \
+				nvdev_error(device, "%s ctor failed: %d\n",                  \
+					    nvkm_subdev_type[(type)], ret);                  \
+				goto done;                                                   \
+			}                                                                    \
+		} else {                                                                     \
+			subdev->pself = (void **)&device->ptr;                               \
+		}                                                                            \
+	}
+#define NVKM_LAYOUT_INST(type,data,ptr,cnt)                                                  \
+	WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1));             \
+	for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) {            \
+		if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) {    \
+			int inst = (device->chip->ptr.inst == 1) ? -1 : (j);                 \
+			ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \
+			subdev = nvkm_device_subdev(device, (type), (j));                    \
+			if (ret) {                                                           \
+				nvkm_subdev_del(&subdev);                                    \
+				device->ptr[j] = NULL;                                       \
+				if (ret != -ENODEV) {                                        \
+					nvdev_error(device, "%s%d ctor failed: %d\n",        \
+						    nvkm_subdev_type[(type)], (j), ret);     \
+					goto done;                                           \
+				}                                                            \
+			} else {                                                             \
+				subdev->pself = (void **)&device->ptr[j];                    \
+			}                                                                    \
+		}                                                                            \
 	}
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
 
 	ret = 0;
 done:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 54eab5e042307c3b6d8df3af5df668e7d138f866..93949b3c721432be017c3d20bde7a355b6b48c94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -15,7 +15,6 @@
 #include <subdev/gpio.h>
 #include <subdev/gsp.h>
 #include <subdev/i2c.h>
-#include <subdev/ibus.h>
 #include <subdev/iccsense.h>
 #include <subdev/instmem.h>
 #include <subdev/ltc.h>
@@ -24,6 +23,7 @@
 #include <subdev/mxm.h>
 #include <subdev/pci.h>
 #include <subdev/pmu.h>
+#include <subdev/privring.h>
 #include <subdev/therm.h>
 #include <subdev/timer.h>
 #include <subdev/top.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 1478947987860d664a89247afaecd8e7d46c9dc7..fea9d8f2b10cbcfe83a390d197dd83e1a1fbb1ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -43,15 +43,15 @@ static int
 nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
 {
 	struct nvkm_subdev *subdev;
-	enum nvkm_devidx subidx;
+	enum nvkm_subdev_type type;
 
 	switch (mthd & NV_DEVICE_INFO_UNIT) {
-	case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break;
+	case NV_DEVICE_HOST(0): type = NVKM_ENGINE_FIFO; break;
 	default:
 		return -EINVAL;
 	}
 
-	subdev = nvkm_device_subdev(device, subidx);
+	subdev = nvkm_device_subdev(device, type, 0);
 	if (subdev)
 		return nvkm_subdev_info(subdev, mthd, data);
 	return -ENODEV;
@@ -66,37 +66,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
 			args->mthd = NV_DEVICE_INFO_INVALID;
 		return;
 	}
-
-	switch (args->mthd) {
-#define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i;                   \
-	for (_i = (B), args->data = 0ULL; _i <= (C); _i++) {                   \
-		if (nvkm_device_engine(device, _i))                            \
-			args->data |= BIT_ULL(_i);                             \
-	}                                                                      \
-}
-#define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A   , NVKM_ENGINE_##A)
-#define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST)
-	case ENGINE_A(SW    ); break;
-	case ENGINE_A(GR    ); break;
-	case ENGINE_A(MPEG  ); break;
-	case ENGINE_A(ME    ); break;
-	case ENGINE_A(CIPHER); break;
-	case ENGINE_A(BSP   ); break;
-	case ENGINE_A(VP    ); break;
-	case ENGINE_B(CE    ); break;
-	case ENGINE_A(SEC   ); break;
-	case ENGINE_A(MSVLD ); break;
-	case ENGINE_A(MSPDEC); break;
-	case ENGINE_A(MSPPP ); break;
-	case ENGINE_A(MSENC ); break;
-	case ENGINE_A(VIC   ); break;
-	case ENGINE_A(SEC2  ); break;
-	case ENGINE_B(NVDEC ); break;
-	case ENGINE_B(NVENC ); break;
-	default:
-		args->mthd = NV_DEVICE_INFO_INVALID;
-		break;
-	}
+	args->mthd = NV_DEVICE_INFO_INVALID;
 }
 
 static int
@@ -357,7 +327,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
 	int i;
 
 	for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
-		if (!(engine = nvkm_device_engine(device, i)) ||
+		if (!(engine = nvkm_device_engine(device, i, 0)) ||
 		    !(engine->func->base.sclass))
 			continue;
 		oclass->engine = engine;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index cbd33e87b799a70b6085707de74bafaefe047e24..5daa777552767caf53ab04af65d6568efe0d1d22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -149,10 +149,10 @@ static void
 nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
 {
 	struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
-	mutex_lock(&disp->engine.subdev.mutex);
-	if (disp->client == oproxy)
-		disp->client = NULL;
-	mutex_unlock(&disp->engine.subdev.mutex);
+	spin_lock(&disp->client.lock);
+	if (disp->client.object == oproxy)
+		disp->client.object = NULL;
+	spin_unlock(&disp->client.lock);
 }
 
 static const struct nvkm_oproxy_func
@@ -175,13 +175,13 @@ nvkm_disp_class_new(struct nvkm_device *device,
 		return ret;
 	*pobject = &oproxy->base;
 
-	mutex_lock(&disp->engine.subdev.mutex);
-	if (disp->client) {
-		mutex_unlock(&disp->engine.subdev.mutex);
+	spin_lock(&disp->client.lock);
+	if (disp->client.object) {
+		spin_unlock(&disp->client.lock);
 		return -EBUSY;
 	}
-	disp->client = oproxy;
-	mutex_unlock(&disp->engine.subdev.mutex);
+	disp->client.object = oproxy;
+	spin_unlock(&disp->client.lock);
 
 	return sclass->ctor(disp, oclass, data, size, &oproxy->object);
 }
@@ -473,21 +473,22 @@ nvkm_disp = {
 
 int
 nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_disp *disp)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_disp *disp)
 {
 	disp->func = func;
 	INIT_LIST_HEAD(&disp->head);
 	INIT_LIST_HEAD(&disp->ior);
 	INIT_LIST_HEAD(&disp->outp);
 	INIT_LIST_HEAD(&disp->conn);
-	return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
+	spin_lock_init(&disp->client.lock);
+	return nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
 }
 
 int
 nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_disp **pdisp)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
 {
 	if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_disp_ctor(func, device, index, *pdisp);
+	return nvkm_disp_ctor(func, device, type, inst, *pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 50e3539f33d22daa631e90a0bbcc8236f9899ff5..a7a7eb0415159e548bba8f3d043b33770980f734 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -278,7 +278,7 @@ nv50_disp_chan_child_get(struct nvkm_object *object, int index,
 	const struct nvkm_device_oclass *oclass = NULL;
 
 	if (chan->func->bind)
-		sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+		sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0);
 	else
 		sclass->engine = NULL;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 731f188fc1ee7d69fe959dd28d8d67c0855d8890..156bbe8b2de3e2107f8aa5df88682afb7069364b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -41,7 +41,8 @@ g84_disp = {
 };
 
 int
-g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+g84_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&g84_disp, device, index, pdisp);
+	return nv50_disp_new_(&g84_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index def54fe1951ec36641ec6a140b86bcff5e3f4c7c..3425b5d3bc72f65ef19e5a69f503669cf4737c46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -41,7 +41,8 @@ g94_disp = {
 };
 
 int
-g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&g94_disp, device, index, pdisp);
+	return nv50_disp_new_(&g94_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
index aa2e5645fe365929b44849d8407d5443ad0103f7..68aa52588d92fe202a15328ce74a8c33dc9fdb8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -40,7 +40,8 @@ ga102_disp = {
 };
 
 int
-ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+	return nv50_disp_new_(&ga102_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index e675d9b9d5d70f92b342bbea98a233449ffb8b85..a6bafe7fea1f2bd34f30c8b2e5d4902b739b027c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -266,7 +266,8 @@ gf119_disp = {
 };
 
 int
-gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gf119_disp, device, index, pdisp);
+	return nv50_disp_new_(&gf119_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 4c3439b1a62da6d7472ed7869b2cd3c2576f3ac5..3b79cf233ac5a7044cff1535ddfd59b9ef3e96d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -41,7 +41,8 @@ gk104_disp = {
 };
 
 int
-gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gk104_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gk104_disp, device, index, pdisp);
+	return nv50_disp_new_(&gk104_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index bc6f4750c942eb57e8c34f1f5c0b90787e94921e..988eb12237a6edf976c865fc9ad6ba31ded6cec5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -41,7 +41,8 @@ gk110_disp = {
 };
 
 int
-gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gk110_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gk110_disp, device, index, pdisp);
+	return nv50_disp_new_(&gk110_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 031cf6b03a76680f045b222440622df752f9731c..5d8108feeacdb9cdc0102582eff2be33b7116b33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -41,7 +41,8 @@ gm107_disp = {
 };
 
 int
-gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm107_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gm107_disp, device, index, pdisp);
+	return nv50_disp_new_(&gm107_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index ec9c33a5162d798e87c816fbb8e03b7e626b5588..f7bb660874768e32526121d7385eee7cda8fc3ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -41,7 +41,8 @@ gm200_disp = {
 };
 
 int
-gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gm200_disp, device, index, pdisp);
+	return nv50_disp_new_(&gm200_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 8471de3f3b616cf54a4742c500efafa164ab4c15..af0ca812a39497c147a2957c5eb44ca6b7d6a12e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -40,7 +40,8 @@ gp100_disp = {
 };
 
 int
-gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gp100_disp, device, index, pdisp);
+	return nv50_disp_new_(&gp100_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index a3779c5046ead129dd42aa87b801f66f774d7d36..065fea1bdfd12b996268d99aab34b3ca6d6241d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -67,7 +67,8 @@ gp102_disp = {
 };
 
 int
-gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gp102_disp, device, index, pdisp);
+	return nv50_disp_new_(&gp102_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index f80183701f448dc36b7a921e2d811a2ae41e9feb..22bc269df64acba2aae6525bafced3e13ccd0094 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -41,7 +41,8 @@ gt200_disp = {
 };
 
 int
-gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gt200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gt200_disp, device, index, pdisp);
+	return nv50_disp_new_(&gt200_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 7581efc1357e90d355625374097e43dd5da67df8..63a912b174d7fff0636866e74599e6436912d45f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -41,7 +41,8 @@ gt215_disp = {
 };
 
 int
-gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gt215_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gt215_disp, device, index, pdisp);
+	return nv50_disp_new_(&gt215_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index c1032527f79111aaa237cfd51e33b01f574eebf4..53879d5271cf730b0352b72990d6b95929798a3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -441,7 +441,8 @@ gv100_disp = {
 };
 
 int
-gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gv100_disp, device, index, pdisp);
+	return nv50_disp_new_(&gv100_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index cfdce23ab83a6a4bacb3f60ae3ba4f54556c1fc9..762a59f24bbb4f68c07c9ce4a1c4e9ec706a5e61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -39,7 +39,8 @@ mcp77_disp = {
 };
 
 int
-mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+mcp77_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&mcp77_disp, device, index, pdisp);
+	return nv50_disp_new_(&mcp77_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 85d9329cfa0e1507d258b8473f16a2178a1046b6..e5c58aae15de64960421fdae2572c2a345e3184f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -39,7 +39,8 @@ mcp89_disp = {
 };
 
 int
-mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+mcp89_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&mcp89_disp, device, index, pdisp);
+	return nv50_disp_new_(&mcp89_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index b780ba1a3bc7fa31f2338bf184955dfc18e80be2..a12097db2c2a76f96d1750134284a2177a8dc415 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -64,11 +64,12 @@ nv04_disp = {
 };
 
 int
-nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+nv04_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_disp **pdisp)
 {
 	int ret, i;
 
-	ret = nvkm_disp_new_(&nv04_disp, device, index, pdisp);
+	ret = nvkm_disp_new_(&nv04_disp, device, type, inst, pdisp);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index e21556bf2cb1a1442d4c8d85908a14f03f06c02a..3f20e49070ced83aa61a577d76e2db5338feb892 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -154,7 +154,7 @@ nv50_disp_ = {
 
 int
 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_disp **pdisp)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
 {
 	struct nv50_disp *disp;
 	int ret;
@@ -164,7 +164,7 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
 	disp->func = func;
 	*pdisp = &disp->base;
 
-	ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
+	ret = nvkm_disp_ctor(&nv50_disp_, device, type, inst, &disp->base);
 	if (ret)
 		return ret;
 
@@ -769,7 +769,8 @@ nv50_disp = {
 };
 
 int
-nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&nv50_disp, device, index, pdisp);
+	return nv50_disp_new_(&nv50_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index db31b37752a270b163b9b1ea8366546bef2470fa..025cacd7c3b012567f27a52db045f4bf27ff2115 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -47,8 +47,8 @@ void nv50_disp_super_2_1(struct nv50_disp *, struct nvkm_head *);
 void nv50_disp_super_2_2(struct nv50_disp *, struct nvkm_head *);
 void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *);
 
-int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
-		   int index, struct nvkm_disp **);
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_disp **);
 
 struct nv50_disp_func {
 	int (*init)(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index f815a534288097599d850415f0081df8a6cd39a6..ec57d8b6bce9311dde08d326f482c15f0e282278 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -4,10 +4,10 @@
 #include <engine/disp.h>
 #include "outp.h"
 
-int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
-		   int index, struct nvkm_disp *);
-int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
-		   int index, struct nvkm_disp **);
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_disp **);
 void nvkm_disp_vblank(struct nvkm_disp *, int head);
 
 struct nvkm_disp_func {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 4c85d1d4fbd4265cabd179923519412766473114..f5f8dc8e8f35d315b8e7114ee1b28dc9573feb5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -146,7 +146,8 @@ tu102_disp = {
 };
 
 int
-tu102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&tu102_disp, device, index, pdisp);
+	return nv50_disp_new_(&tu102_disp, device, type, inst, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index 11b7b8fd5ddaf356b5a9bee8dbde507c70cf92ac..425cde35f12842267bc5c8178b49eb6e85be4b70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -104,7 +104,7 @@ nvkm_dma = {
 
 int
 nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_dma **pdma)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_dma **pdma)
 {
 	struct nvkm_dma *dma;
 
@@ -112,5 +112,5 @@ nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
 		return -ENOMEM;
 	dma->func = func;
 
-	return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine);
+	return nvkm_engine_ctor(&nvkm_dma, device, type, inst, true, &dma->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
index efec5d3221799133ab6e24af9987fe4b2513cd55..99a1e07fa204d7b9bfca201e4ab3b06fba08ac6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -30,7 +30,8 @@ gf100_dma = {
 };
 
 int
-gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gf100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_dma **pdma)
 {
-	return nvkm_dma_new_(&gf100_dma, device, index, pdma);
+	return nvkm_dma_new_(&gf100_dma, device, type, inst, pdma);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
index 34c766039aedbee2ee80d0556e5b7d5a64684ece..fd1d1fc22dc692245ea04ddd515e9a1355006468 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -30,7 +30,8 @@ gf119_dma = {
 };
 
 int
-gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gf119_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_dma **pdma)
 {
-	return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+	return nvkm_dma_new_(&gf119_dma, device, type, inst, pdma);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
index c65a4c2ea93d47d9611e931adcd66e592bed5c80..a5af0df306637b97ea58afb26019b1a63f65f12c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
@@ -28,7 +28,8 @@ gv100_dma = {
 };
 
 int
-gv100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gv100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_dma **pdma)
 {
-	return nvkm_dma_new_(&gv100_dma, device, index, pdma);
+	return nvkm_dma_new_(&gv100_dma, device, type, inst, pdma);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
index 30747a0ce488c8f9a582b08d6f5a8fdc317c186b..ea5a889f60c2c13cb717e60e070db673e8ad0dce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -30,7 +30,8 @@ nv04_dma = {
 };
 
 int
-nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+nv04_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_dma **pdma)
 {
-	return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+	return nvkm_dma_new_(&nv04_dma, device, type, inst, pdma);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
index 77aca7b71c83cbadf995253495d6f6a2c3088381..6e8f79660014148a90f36ecf6307996952a40ead 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -30,7 +30,8 @@ nv50_dma = {
 };
 
 int
-nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+nv50_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_dma **pdma)
 {
-	return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+	return nvkm_dma_new_(&nv50_dma, device, type, inst, pdma);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
index 0c9d9640a59d707abe1f4a70ae912ce1acbef4f7..d403bedb485aaca61cf36dffc502cdea5e0970b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -9,8 +9,8 @@ struct nvkm_dmaobj_func {
 		    struct nvkm_gpuobj **);
 };
 
-int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
-		  int index, struct nvkm_dma **);
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_dma **);
 
 struct nvkm_dma_func {
 	int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 8675613e142b64a54ac6a2acf8dd0afb934dc7e5..43b7dec45179609eab3d99b887e15dd8f40d1b88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -108,7 +108,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
 		}
 	}
 
-	if (nvkm_mc_enabled(device, engine->subdev.index)) {
+	if (nvkm_mc_enabled(device, engine->subdev.type, engine->subdev.inst)) {
 		nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
 		nvkm_wr32(device, base + 0x014, 0xffffffff);
 	}
@@ -335,9 +335,9 @@ nvkm_falcon = {
 };
 
 int
-nvkm_falcon_new_(const struct nvkm_falcon_func *func,
-		 struct nvkm_device *device, int index, bool enable,
-		 u32 addr, struct nvkm_engine **pengine)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
+		 struct nvkm_engine **pengine)
 {
 	struct nvkm_falcon *falcon;
 
@@ -351,6 +351,5 @@ nvkm_falcon_new_(const struct nvkm_falcon_func *func,
 	falcon->data.size = func->data.size;
 	*pengine = &falcon->engine;
 
-	return nvkm_engine_ctor(&nvkm_falcon, device, index,
-				enable, &falcon->engine);
+	return nvkm_engine_ctor(&nvkm_falcon, device, type, inst, enable, &falcon->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index c773caf21f6be068a464d6bf64f0a3449cc49382..2ed4ff05d2075e51f5d8b797f6d4926881e976a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -292,7 +292,7 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
 {
 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
 	switch (mthd) {
-	case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0;
+	case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0;
 	default:
 		if (fifo->func->info)
 			return fifo->func->info(fifo, mthd, data);
@@ -313,7 +313,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
 static void
 nvkm_fifo_preinit(struct nvkm_engine *engine)
 {
-	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
 }
 
 static int
@@ -334,6 +334,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
 	nvkm_event_fini(&fifo->kevent);
 	nvkm_event_fini(&fifo->cevent);
 	nvkm_event_fini(&fifo->uevent);
+	mutex_destroy(&fifo->mutex);
 	return data;
 }
 
@@ -351,13 +352,14 @@ nvkm_fifo = {
 
 int
 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
-	       int index, int nr, struct nvkm_fifo *fifo)
+	       enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo)
 {
 	int ret;
 
 	fifo->func = func;
 	INIT_LIST_HEAD(&fifo->chan);
 	spin_lock_init(&fifo->lock);
+	mutex_init(&fifo->mutex);
 
 	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
 		fifo->nr = NVKM_FIFO_CHID_NR;
@@ -365,7 +367,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
 		fifo->nr = nr;
 	bitmap_clear(fifo->mask, 0, fifo->nr);
 
-	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
+	ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index d83485385934d86dc475662ccfef1c87de01fe3d..8d957643940ae93a5e710b7beb0e0e7488bd5e28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -35,6 +35,15 @@ struct nvkm_fifo_chan_object {
 	int hash;
 };
 
+static struct nvkm_fifo_engn *
+nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
+{
+	int engi = chan->fifo->func->engine_id(chan->fifo, engine);
+	if (engi >= 0)
+		return &chan->engn[engi];
+	return NULL;
+}
+
 static int
 nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
 {
@@ -42,8 +51,8 @@ nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
 		container_of(base, typeof(*object), oproxy);
 	struct nvkm_engine *engine  = object->oproxy.object->engine;
 	struct nvkm_fifo_chan *chan = object->chan;
-	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
-	const char *name = nvkm_subdev_name[engine->subdev.index];
+	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+	const char *name = engine->subdev.name;
 	int ret = 0;
 
 	if (--engn->usecount)
@@ -75,8 +84,8 @@ nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
 		container_of(base, typeof(*object), oproxy);
 	struct nvkm_engine *engine  = object->oproxy.object->engine;
 	struct nvkm_fifo_chan *chan = object->chan;
-	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
-	const char *name = nvkm_subdev_name[engine->subdev.index];
+	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+	const char *name = engine->subdev.name;
 	int ret;
 
 	if (engn->usecount++)
@@ -108,7 +117,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
 		container_of(base, typeof(*object), oproxy);
 	struct nvkm_engine *engine  = object->oproxy.base.engine;
 	struct nvkm_fifo_chan *chan = object->chan;
-	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
 
 	if (chan->func->object_dtor)
 		chan->func->object_dtor(chan, object->hash);
@@ -118,7 +127,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
 			chan->func->engine_dtor(chan, engine);
 		nvkm_object_del(&engn->object);
 		if (chan->vmm)
-			atomic_dec(&chan->vmm->engref[engine->subdev.index]);
+			atomic_dec(&chan->vmm->engref[engine->subdev.type]);
 	}
 }
 
@@ -135,7 +144,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
 {
 	struct nvkm_engine *engine = oclass->engine;
 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
-	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
 	struct nvkm_fifo_chan_object *object;
 	int ret = 0;
 
@@ -152,7 +161,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
 		};
 
 		if (chan->vmm)
-			atomic_inc(&chan->vmm->engref[engine->subdev.index]);
+			atomic_inc(&chan->vmm->engref[engine->subdev.type]);
 
 		if (engine->func->fifo.cclass) {
 			ret = engine->func->fifo.cclass(chan, &cclass,
@@ -203,13 +212,12 @@ nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
 {
 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 	struct nvkm_fifo *fifo = chan->fifo;
-	struct nvkm_device *device = fifo->engine.subdev.device;
 	struct nvkm_engine *engine;
-	u64 mask = chan->engines;
-	int ret, i, c;
+	u32 engm = chan->engm;
+	int engi, ret, c;
 
-	for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
-		if (!(engine = nvkm_device_engine(device, i)))
+	for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) {
+		if (!(engine = fifo->func->id_engine(fifo, engi)))
 			continue;
 		oclass->engine = engine;
 		oclass->base.oclass = 0;
@@ -352,7 +360,7 @@ nvkm_fifo_chan_func = {
 int
 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
 		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
-		    u64 hvmm, u64 push, u64 engines, int bar, u32 base,
+		    u64 hvmm, u64 push, u32 engm, int bar, u32 base,
 		    u32 user, const struct nvkm_oclass *oclass,
 		    struct nvkm_fifo_chan *chan)
 {
@@ -365,7 +373,7 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
 	nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
 	chan->func = func;
 	chan->fifo = fifo;
-	chan->engines = engines;
+	chan->engm = engm;
 	INIT_LIST_HEAD(&chan->head);
 
 	/* instance memory */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 177e10562600a02f340dffe9e44ed640f78a4460..e53504354841b3282ecba8e1853b3b306c35d418 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -22,7 +22,7 @@ struct nvkm_fifo_chan_func {
 
 int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
 			u32 size, u32 align, bool zero, u64 vm, u64 push,
-			u64 engines, int bar, u32 base, u32 user,
+			u32 engm, int bar, u32 base, u32 user,
 			const struct nvkm_oclass *, struct nvkm_fifo_chan *);
 
 struct nvkm_fifo_chan_oclass {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index a5c998fe4485a938eb0f00351c14f98054c39db9..353b77d9b3dcffe35f12172e36e24b75cd454950 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -44,30 +44,10 @@ g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
 	return -EINVAL;
 }
 
-static int
-g84_fifo_chan_engine(struct nvkm_engine *engine)
-{
-	switch (engine->subdev.index) {
-	case NVKM_ENGINE_GR    : return 0;
-	case NVKM_ENGINE_MPEG  :
-	case NVKM_ENGINE_MSPPP : return 1;
-	case NVKM_ENGINE_CE0   : return 2;
-	case NVKM_ENGINE_VP    :
-	case NVKM_ENGINE_MSPDEC: return 3;
-	case NVKM_ENGINE_CIPHER:
-	case NVKM_ENGINE_SEC   : return 4;
-	case NVKM_ENGINE_BSP   :
-	case NVKM_ENGINE_MSVLD : return 5;
-	default:
-		WARN_ON(1);
-		return 0;
-	}
-}
-
 static int
 g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
 {
-	switch (engine->subdev.index) {
+	switch (engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW    : return -1;
 	case NVKM_ENGINE_GR    : return 0x0020;
@@ -79,7 +59,7 @@ g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
 	case NVKM_ENGINE_MSVLD : return 0x0080;
 	case NVKM_ENGINE_CIPHER:
 	case NVKM_ENGINE_SEC   : return 0x00a0;
-	case NVKM_ENGINE_CE0   : return 0x00c0;
+	case NVKM_ENGINE_CE    : return 0x00c0;
 	default:
 		WARN_ON(1);
 		return -1;
@@ -102,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
 	if (offset < 0)
 		return 0;
 
-	engn = g84_fifo_chan_engine(engine);
+	engn = fifo->base.func->engine_id(&fifo->base, engine);
 	save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
 	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
 	done = nvkm_msec(device, 2000,
@@ -134,7 +114,7 @@ g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
 			  struct nvkm_engine *engine)
 {
 	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
-	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
 	u64 limit, start;
 	int offset;
 
@@ -162,12 +142,11 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
 			  struct nvkm_object *object)
 {
 	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
-	int engn = engine->subdev.index;
 
 	if (g84_fifo_chan_engine_addr(engine) < 0)
 		return 0;
 
-	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+	return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
 }
 
 static int
@@ -178,14 +157,14 @@ g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
 	u32 handle = object->handle;
 	u32 context;
 
-	switch (object->engine->subdev.index) {
+	switch (object->engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW    : context = 0x00000000; break;
 	case NVKM_ENGINE_GR    : context = 0x00100000; break;
 	case NVKM_ENGINE_MPEG  :
 	case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
 	case NVKM_ENGINE_ME    :
-	case NVKM_ENGINE_CE0   : context = 0x00300000; break;
+	case NVKM_ENGINE_CE    : context = 0x00300000; break;
 	case NVKM_ENGINE_VP    :
 	case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
 	case NVKM_ENGINE_CIPHER:
@@ -241,20 +220,20 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
 
 	ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
 				  0x10000, 0x1000, false, vmm, push,
-				  (1ULL << NVKM_ENGINE_BSP) |
-				  (1ULL << NVKM_ENGINE_CE0) |
-				  (1ULL << NVKM_ENGINE_CIPHER) |
-				  (1ULL << NVKM_ENGINE_DMAOBJ) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_ME) |
-				  (1ULL << NVKM_ENGINE_MPEG) |
-				  (1ULL << NVKM_ENGINE_MSPDEC) |
-				  (1ULL << NVKM_ENGINE_MSPPP) |
-				  (1ULL << NVKM_ENGINE_MSVLD) |
-				  (1ULL << NVKM_ENGINE_SEC) |
-				  (1ULL << NVKM_ENGINE_SW) |
-				  (1ULL << NVKM_ENGINE_VIC) |
-				  (1ULL << NVKM_ENGINE_VP),
+				  BIT(G84_FIFO_ENGN_SW) |
+				  BIT(G84_FIFO_ENGN_GR) |
+				  BIT(G84_FIFO_ENGN_MPEG) |
+				  BIT(G84_FIFO_ENGN_MSPPP) |
+				  BIT(G84_FIFO_ENGN_ME) |
+				  BIT(G84_FIFO_ENGN_CE0) |
+				  BIT(G84_FIFO_ENGN_VP) |
+				  BIT(G84_FIFO_ENGN_MSPDEC) |
+				  BIT(G84_FIFO_ENGN_CIPHER) |
+				  BIT(G84_FIFO_ENGN_SEC) |
+				  BIT(G84_FIFO_ENGN_VIC) |
+				  BIT(G84_FIFO_ENGN_BSP) |
+				  BIT(G84_FIFO_ENGN_MSVLD) |
+				  BIT(G84_FIFO_ENGN_DMA),
 				  0, 0xc00000, 0x2000, oclass, &chan->base);
 	chan->fifo = fifo;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index 7c125a15f9630a5e1615db4635d03624aab6a503..f7ac1061fa84936520f72ad111f4b9f1092b3a56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -12,10 +12,17 @@ struct gf100_fifo_chan {
 	struct list_head head;
 	bool killed;
 
-	struct {
+#define GF100_FIFO_ENGN_GR     0
+#define GF100_FIFO_ENGN_MSPDEC 1
+#define GF100_FIFO_ENGN_MSPPP  2
+#define GF100_FIFO_ENGN_MSVLD  3
+#define GF100_FIFO_ENGN_CE0    4
+#define GF100_FIFO_ENGN_CE1    5
+#define GF100_FIFO_ENGN_SW     15
+	struct gf100_fifo_engn {
 		struct nvkm_gpuobj *inst;
 		struct nvkm_vma *vma;
-	} engn[NVKM_SUBDEV_NR];
+	} engn[NVKM_FIFO_ENGN_NR];
 };
 
 extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 22698661aa8584ff284c7e5597c4cc5173ae07bc..cfbe096e604f5777cf1310582cd417d74365e259 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -16,10 +16,11 @@ struct gk104_fifo_chan {
 
 	struct nvkm_memory *mthd;
 
-	struct {
+#define GK104_FIFO_ENGN_SW 15
+	struct gk104_fifo_engn {
 		struct nvkm_gpuobj *inst;
 		struct nvkm_vma *vma;
-	} engn[NVKM_SUBDEV_NR];
+	} engn[NVKM_FIFO_ENGN_NR];
 };
 
 extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
@@ -29,6 +30,7 @@ int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
 void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
 void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
 void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
+struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *);
 int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
 				  struct nvkm_object *);
 void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
index 60ca79465aff037bca6c963a3fc1f688c13c1a81..727bc8976b401ff3da295ac4f862300cb7217c17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -9,7 +9,11 @@ struct nv04_fifo_chan {
 	struct nvkm_fifo_chan base;
 	struct nv04_fifo *fifo;
 	u32 ramfc;
-	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+#define NV04_FIFO_ENGN_SW   0
+#define NV04_FIFO_ENGN_GR   1
+#define NV04_FIFO_ENGN_MPEG 2
+#define NV04_FIFO_ENGN_DMA  3
+	struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
 };
 
 extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
index 85f7dbf53c997bc1bd62ea7008e8b91b350e7323..c44d7c81dd52a531d5f4fd323ab1056d39f1c43f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -31,7 +31,7 @@
 static int
 nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
 {
-	switch (engine->subdev.index) {
+	switch (engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW    : return -1;
 	case NVKM_ENGINE_GR    : return 0x0000;
@@ -42,6 +42,15 @@ nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
 	}
 }
 
+struct nvkm_gpuobj **
+nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine)
+{
+	int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+	if (engi >= 0)
+		return &chan->engn[engi];
+	return NULL;
+}
+
 static int
 nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
 			   struct nvkm_engine *engine, bool suspend)
@@ -103,7 +112,7 @@ nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
 			   struct nvkm_engine *engine)
 {
 	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
-	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
 	u64 limit, start;
 	int offset;
 
@@ -130,7 +139,7 @@ nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
 			   struct nvkm_engine *engine)
 {
 	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
-	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+	nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine));
 }
 
 static int
@@ -139,12 +148,11 @@ nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
 			   struct nvkm_object *object)
 {
 	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
-	int engn = engine->subdev.index;
 
 	if (nv50_fifo_chan_engine_addr(engine) < 0)
 		return 0;
 
-	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+	return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
 }
 
 void
@@ -162,7 +170,7 @@ nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
 	u32 handle = object->handle;
 	u32 context;
 
-	switch (object->engine->subdev.index) {
+	switch (object->engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW    : context = 0x00000000; break;
 	case NVKM_ENGINE_GR    : context = 0x00100000; break;
@@ -240,10 +248,10 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
 
 	ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
 				  0x10000, 0x1000, false, vmm, push,
-				  (1ULL << NVKM_ENGINE_DMAOBJ) |
-				  (1ULL << NVKM_ENGINE_SW) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_MPEG),
+				  BIT(NV50_FIFO_ENGN_SW) |
+				  BIT(NV50_FIFO_ENGN_GR) |
+				  BIT(NV50_FIFO_ENGN_MPEG) |
+				  BIT(NV50_FIFO_ENGN_DMA),
 				  0, 0xc00000, 0x2000, oclass, &chan->base);
 	chan->fifo = fifo;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index 5735ff72a9d1258247737614f2c1d29360fa10f7..af8bdf27555234e91a02a0914d71b2a7c2e5f0fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -15,13 +15,33 @@ struct nv50_fifo_chan {
 	struct nvkm_gpuobj *pgd;
 	struct nvkm_ramht *ramht;
 
-	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+#define NV50_FIFO_ENGN_SW   0
+#define NV50_FIFO_ENGN_GR   1
+#define NV50_FIFO_ENGN_MPEG 2
+#define NV50_FIFO_ENGN_DMA  3
+
+#define G84_FIFO_ENGN_SW     0
+#define G84_FIFO_ENGN_GR     1
+#define G84_FIFO_ENGN_MPEG   2
+#define G84_FIFO_ENGN_MSPPP  2
+#define G84_FIFO_ENGN_ME     3
+#define G84_FIFO_ENGN_CE0    3
+#define G84_FIFO_ENGN_VP     4
+#define G84_FIFO_ENGN_MSPDEC 4
+#define G84_FIFO_ENGN_CIPHER 5
+#define G84_FIFO_ENGN_SEC    5
+#define G84_FIFO_ENGN_VIC    5
+#define G84_FIFO_ENGN_BSP    6
+#define G84_FIFO_ENGN_MSVLD  6
+#define G84_FIFO_ENGN_DMA    7
+	struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
 };
 
 int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
 			const struct nvkm_oclass *, struct nv50_fifo_chan *);
 void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
 void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *);
 void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
 void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index c213122cf08858cf3920cc7c05a2ec5e4dfd7692..dbcdc5fab990bf060ea62ef41d7dbe590340f8d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -38,9 +38,9 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
 	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
 
-	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	mutex_lock(&chan->fifo->base.mutex);
 	nvkm_ramht_remove(imem->ramht, cookie);
-	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	mutex_unlock(&chan->fifo->base.mutex);
 }
 
 static int
@@ -53,7 +53,7 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
 	u32 handle  = object->handle;
 	int hash;
 
-	switch (object->engine->subdev.index) {
+	switch (object->engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
 	case NVKM_ENGINE_GR    : context |= 0x00010000; break;
@@ -63,10 +63,10 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
 		return -EINVAL;
 	}
 
-	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	mutex_lock(&chan->fifo->base.mutex);
 	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
 				 handle, context);
-	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	mutex_unlock(&chan->fifo->base.mutex);
 	return hash;
 }
 
@@ -191,9 +191,9 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
 				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
-				  (1ULL << NVKM_ENGINE_DMAOBJ) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_SW),
+				  BIT(NV04_FIFO_ENGN_SW) |
+				  BIT(NV04_FIFO_ENGN_GR) |
+				  BIT(NV04_FIFO_ENGN_DMA),
 				  0, 0x800000, 0x10000, oclass, &chan->base);
 	chan->fifo = fifo;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
index f5f355ff005da617a9ed7f83dfae06f3bb4c15cf..07d80d54a07cf8d61edd87f2a0ea83563f5782bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -62,9 +62,9 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
 				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
-				  (1ULL << NVKM_ENGINE_DMAOBJ) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_SW),
+				  BIT(NV04_FIFO_ENGN_SW) |
+				  BIT(NV04_FIFO_ENGN_GR) |
+				  BIT(NV04_FIFO_ENGN_DMA),
 				  0, 0x800000, 0x10000, oclass, &chan->base);
 	chan->fifo = fifo;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
index 7edc6a564b5da60504e933e77f0ddf8781c9c640..edd70a11421858da961e90b0b34683864eb7b468 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -62,10 +62,10 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
 				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
-				  (1ULL << NVKM_ENGINE_DMAOBJ) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
-				  (1ULL << NVKM_ENGINE_SW),
+				  BIT(NV04_FIFO_ENGN_SW) |
+				  BIT(NV04_FIFO_ENGN_GR) |
+				  BIT(NV04_FIFO_ENGN_MPEG) | /* NV31- */
+				  BIT(NV04_FIFO_ENGN_DMA),
 				  0, 0x800000, 0x10000, oclass, &chan->base);
 	chan->fifo = fifo;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
index 5f722c6e8a2f4652eec5dde550d0a4792cd86c8b..0411fb9084577f4aa0556296640196c1c9facd32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -35,7 +35,7 @@
 static bool
 nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
 {
-	switch (engine->subdev.index) {
+	switch (engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW:
 		return false;
@@ -55,6 +55,15 @@ nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
 	}
 }
 
+static struct nvkm_gpuobj **
+nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine)
+{
+	int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+	if (engi >= 0)
+		return &chan->engn[engi];
+	return NULL;
+}
+
 static int
 nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
 			  struct nvkm_engine *engine, bool suspend)
@@ -99,7 +108,7 @@ nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
 
 	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
 		return 0;
-	inst = chan->engn[engine->subdev.index]->addr >> 4;
+	inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4;
 
 	spin_lock_irqsave(&fifo->base.lock, flags);
 	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
@@ -121,7 +130,7 @@ nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
 			  struct nvkm_engine *engine)
 {
 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
-	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+	nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine));
 }
 
 static int
@@ -130,13 +139,12 @@ nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
 			  struct nvkm_object *object)
 {
 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
-	const int engn = engine->subdev.index;
 	u32 reg, ctx;
 
 	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
 		return 0;
 
-	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+	return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine));
 }
 
 static int
@@ -149,7 +157,7 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
 	u32 handle  = object->handle;
 	int hash;
 
-	switch (object->engine->subdev.index) {
+	switch (object->engine->subdev.type) {
 	case NVKM_ENGINE_DMAOBJ:
 	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
 	case NVKM_ENGINE_GR    : context |= 0x00100000; break;
@@ -159,10 +167,10 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
 		return -EINVAL;
 	}
 
-	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	mutex_lock(&chan->fifo->base.mutex);
 	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
 				 handle, context);
-	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	mutex_unlock(&chan->fifo->base.mutex);
 	return hash;
 }
 
@@ -209,10 +217,10 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
 				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
-				  (1ULL << NVKM_ENGINE_DMAOBJ) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_MPEG) |
-				  (1ULL << NVKM_ENGINE_SW),
+				  BIT(NV04_FIFO_ENGN_SW) |
+				  BIT(NV04_FIFO_ENGN_GR) |
+				  BIT(NV04_FIFO_ENGN_MPEG) |
+				  BIT(NV04_FIFO_ENGN_DMA),
 				  0, 0xc00000, 0x1000, oclass, &chan->base);
 	chan->fifo = fifo;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index ff7b529764fe03935a50f7d66b30e25d4497999b..c0a7d0f21dacdecbbc57275e2115d0634df84ba6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -38,12 +38,82 @@ g84_fifo_uevent_init(struct nvkm_fifo *fifo)
 	nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
 }
 
+static struct nvkm_engine *
+g84_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_engine *engine;
+	enum nvkm_subdev_type type;
+
+	switch (engi) {
+	case G84_FIFO_ENGN_SW    : type = NVKM_ENGINE_SW; break;
+	case G84_FIFO_ENGN_GR    : type = NVKM_ENGINE_GR; break;
+	case G84_FIFO_ENGN_MPEG  :
+		if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPPP, 0)))
+			return engine;
+		type = NVKM_ENGINE_MPEG;
+		break;
+	case G84_FIFO_ENGN_ME    :
+		if ((engine = nvkm_device_engine(device, NVKM_ENGINE_CE, 0)))
+			return engine;
+		type = NVKM_ENGINE_ME;
+		break;
+	case G84_FIFO_ENGN_VP    :
+		if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPDEC, 0)))
+			return engine;
+		type = NVKM_ENGINE_VP;
+		break;
+	case G84_FIFO_ENGN_CIPHER:
+		if ((engine = nvkm_device_engine(device, NVKM_ENGINE_VIC, 0)))
+			return engine;
+		if ((engine = nvkm_device_engine(device, NVKM_ENGINE_SEC, 0)))
+			return engine;
+		type = NVKM_ENGINE_CIPHER;
+		break;
+	case G84_FIFO_ENGN_BSP   :
+		if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSVLD, 0)))
+			return engine;
+		type = NVKM_ENGINE_BSP;
+		break;
+	case G84_FIFO_ENGN_DMA   : type = NVKM_ENGINE_DMAOBJ; break;
+	default:
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+static int
+g84_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+	switch (engine->subdev.type) {
+	case NVKM_ENGINE_SW    : return G84_FIFO_ENGN_SW;
+	case NVKM_ENGINE_GR    : return G84_FIFO_ENGN_GR;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return G84_FIFO_ENGN_MPEG;
+	case NVKM_ENGINE_CE    : return G84_FIFO_ENGN_CE0;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return G84_FIFO_ENGN_VP;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return G84_FIFO_ENGN_CIPHER;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return G84_FIFO_ENGN_BSP;
+	case NVKM_ENGINE_DMAOBJ: return G84_FIFO_ENGN_DMA;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
 static const struct nvkm_fifo_func
 g84_fifo = {
 	.dtor = nv50_fifo_dtor,
 	.oneinit = nv50_fifo_oneinit,
 	.init = nv50_fifo_init,
 	.intr = nv04_fifo_intr,
+	.engine_id = g84_fifo_engine_id,
+	.id_engine = g84_fifo_id_engine,
 	.pause = nv04_fifo_pause,
 	.start = nv04_fifo_start,
 	.uevent_init = g84_fifo_uevent_init,
@@ -56,7 +126,8 @@ g84_fifo = {
 };
 
 int
-g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_fifo **pfifo)
 {
-	return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
+	return nv50_fifo_new_(&g84_fifo, device, type, inst, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 5a39e51d42d738fe5800fc2a97feb8255c901b8a..8b4f36b3e34b5d8960835376ed848fba4b035c45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -57,7 +57,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
 	int nr = 0;
 	int target;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&fifo->base.mutex);
 	cur = fifo->runlist.mem[fifo->runlist.active];
 	fifo->runlist.active = !fifo->runlist.active;
 
@@ -73,7 +73,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
 	case NVKM_MEM_TARGET_VRAM: target = 0; break;
 	case NVKM_MEM_TARGET_NCOH: target = 3; break;
 	default:
-		mutex_unlock(&subdev->mutex);
+		mutex_unlock(&fifo->base.mutex);
 		WARN_ON(1);
 		return;
 	}
@@ -86,59 +86,61 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
 			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
 			       msecs_to_jiffies(2000)) == 0)
 		nvkm_error(subdev, "runlist update timeout\n");
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
 void
 gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
 {
-	mutex_lock(&fifo->base.engine.subdev.mutex);
+	mutex_lock(&fifo->base.mutex);
 	list_del_init(&chan->head);
-	mutex_unlock(&fifo->base.engine.subdev.mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
 void
 gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
 {
-	mutex_lock(&fifo->base.engine.subdev.mutex);
+	mutex_lock(&fifo->base.mutex);
 	list_add_tail(&chan->head, &fifo->chan);
-	mutex_unlock(&fifo->base.engine.subdev.mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
-static inline int
-gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
+static struct nvkm_engine *
+gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
 {
-	switch (engn) {
-	case NVKM_ENGINE_GR    : engn = 0; break;
-	case NVKM_ENGINE_MSVLD : engn = 1; break;
-	case NVKM_ENGINE_MSPPP : engn = 2; break;
-	case NVKM_ENGINE_MSPDEC: engn = 3; break;
-	case NVKM_ENGINE_CE0   : engn = 4; break;
-	case NVKM_ENGINE_CE1   : engn = 5; break;
+	enum nvkm_subdev_type type;
+	int inst;
+
+	switch (engi) {
+	case GF100_FIFO_ENGN_GR    : type = NVKM_ENGINE_GR    ; inst = 0; break;
+	case GF100_FIFO_ENGN_MSPDEC: type = NVKM_ENGINE_MSPDEC; inst = 0; break;
+	case GF100_FIFO_ENGN_MSPPP : type = NVKM_ENGINE_MSPPP ; inst = 0; break;
+	case GF100_FIFO_ENGN_MSVLD : type = NVKM_ENGINE_MSVLD ; inst = 0; break;
+	case GF100_FIFO_ENGN_CE0   : type = NVKM_ENGINE_CE    ; inst = 0; break;
+	case GF100_FIFO_ENGN_CE1   : type = NVKM_ENGINE_CE    ; inst = 1; break;
+	case GF100_FIFO_ENGN_SW    : type = NVKM_ENGINE_SW    ; inst = 0; break;
 	default:
-		return -1;
+		WARN_ON(1);
+		return NULL;
 	}
 
-	return engn;
+	return nvkm_device_engine(fifo->engine.subdev.device, type, inst);
 }
 
-static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
+static int
+gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = fifo->base.engine.subdev.device;
-
-	switch (engn) {
-	case 0: engn = NVKM_ENGINE_GR; break;
-	case 1: engn = NVKM_ENGINE_MSVLD; break;
-	case 2: engn = NVKM_ENGINE_MSPPP; break;
-	case 3: engn = NVKM_ENGINE_MSPDEC; break;
-	case 4: engn = NVKM_ENGINE_CE0; break;
-	case 5: engn = NVKM_ENGINE_CE1; break;
+	switch (engine->subdev.type) {
+	case NVKM_ENGINE_GR    : return GF100_FIFO_ENGN_GR;
+	case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC;
+	case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP;
+	case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD;
+	case NVKM_ENGINE_CE    : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst;
+	case NVKM_ENGINE_SW    : return GF100_FIFO_ENGN_SW;
 	default:
-		return NULL;
+		WARN_ON(1);
+		return -1;
 	}
-
-	return nvkm_device_engine(device, engn);
 }
 
 static void
@@ -148,20 +150,17 @@ gf100_fifo_recover_work(struct work_struct *w)
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	unsigned long flags;
-	u32 engn, engm = 0;
-	u64 mask, todo;
+	u32 engm, engn, todo;
 
 	spin_lock_irqsave(&fifo->base.lock, flags);
-	mask = fifo->recover.mask;
+	engm = fifo->recover.mask;
 	fifo->recover.mask = 0ULL;
 	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
-	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
-		engm |= 1 << gf100_fifo_engidx(fifo, engn);
 	nvkm_mask(device, 0x002630, engm, engm);
 
-	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
-		if ((engine = nvkm_device_engine(device, engn))) {
+	for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT_ULL(engn)) {
+		if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) {
 			nvkm_subdev_fini(&engine->subdev, false);
 			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
@@ -179,17 +178,18 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
+	int engi = gf100_fifo_engine_id(&fifo->base, engine);
 
 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
-		   nvkm_subdev_name[engine->subdev.index], chid);
+		   engine->subdev.name, chid);
 	assert_spin_locked(&fifo->base.lock);
 
 	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
 	list_del_init(&chan->head);
 	chan->killed = true;
 
-	if (engine != &fifo->base.engine)
-		fifo->recover.mask |= 1ULL << engine->subdev.index;
+	if (engi >= 0 && engi != GF100_FIFO_ENGN_SW)
+		fifo->recover.mask |= BIT(engi);
 	schedule_work(&fifo->recover.work);
 	nvkm_fifo_kevent(&fifo->base, chid);
 }
@@ -205,8 +205,8 @@ gf100_fifo_fault_engine[] = {
 	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PCOUNTER" },
 	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
-	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
-	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
+	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
+	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
 	{ 0x17, "PMU" },
 	{}
 };
@@ -286,7 +286,7 @@ gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 			break;
 		default:
-			engine = nvkm_device_engine(device, eu->data2);
+			engine = nvkm_device_engine(device, eu->data2, eu->inst);
 			break;
 		}
 	}
@@ -335,7 +335,7 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 		if (busy && unk0 && unk1) {
 			list_for_each_entry(chan, &fifo->chan, head) {
 				if (chan->base.chid == chid) {
-					engine = gf100_fifo_engine(fifo, engn);
+					engine = gf100_fifo_id_engine(&fifo->base, engn);
 					if (!engine)
 						break;
 					gf100_fifo_recover(fifo, engine, chan);
@@ -673,6 +673,8 @@ gf100_fifo = {
 	.fini = gf100_fifo_fini,
 	.intr = gf100_fifo_intr,
 	.fault = gf100_fifo_fault,
+	.engine_id = gf100_fifo_engine_id,
+	.id_engine = gf100_fifo_id_engine,
 	.uevent_init = gf100_fifo_uevent_init,
 	.uevent_fini = gf100_fifo_uevent_fini,
 	.chan = {
@@ -682,7 +684,8 @@ gf100_fifo = {
 };
 
 int
-gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
 	struct gf100_fifo *fifo;
 
@@ -692,5 +695,5 @@ gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 	INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
 	*pfifo = &fifo->base;
 
-	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+	return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 5d4b695cab8e5840e3e81d4890b72ab2457ddefc..69da601f1754a5cfe4b852a120bc3f7be2c617e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -36,19 +36,7 @@
 #include <nvif/class.h>
 #include <nvif/cl0080.h>
 
-struct gk104_fifo_engine_status {
-	bool busy;
-	bool faulted;
-	bool chsw;
-	bool save;
-	bool load;
-	struct {
-		bool tsg;
-		u32 id;
-	} prev, next, *chan;
-};
-
-static void
+void
 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
 			 struct gk104_fifo_engine_status *status)
 {
@@ -95,7 +83,7 @@ gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
 		   status->chan == &status->next ? "*" : " ");
 }
 
-static int
+int
 gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		     void *argv, u32 argc, struct nvkm_object **pobject)
 {
@@ -112,7 +100,7 @@ gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 	return -EINVAL;
 }
 
-static int
+int
 gk104_fifo_class_get(struct nvkm_fifo *base, int index,
 		     struct nvkm_oclass *oclass)
 {
@@ -134,14 +122,14 @@ gk104_fifo_class_get(struct nvkm_fifo *base, int index,
 	return c;
 }
 
-static void
+void
 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->engine.subdev.device;
 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
 }
 
-static void
+void
 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->engine.subdev.device;
@@ -180,12 +168,11 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
 {
 	const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
 	struct gk104_fifo_chan *chan;
-	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_memory *mem;
 	struct nvkm_fifo_cgrp *cgrp;
 	int nr = 0;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&fifo->base.mutex);
 	mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
 	fifo->runlist[runl].next = !fifo->runlist[runl].next;
 
@@ -203,27 +190,27 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
 	nvkm_done(mem);
 
 	func->commit(fifo, runl, mem, nr);
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
 void
 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 {
 	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
-	mutex_lock(&fifo->base.engine.subdev.mutex);
+	mutex_lock(&fifo->base.mutex);
 	if (!list_empty(&chan->head)) {
 		list_del_init(&chan->head);
 		if (cgrp && !--cgrp->chan_nr)
 			list_del_init(&cgrp->head);
 	}
-	mutex_unlock(&fifo->base.engine.subdev.mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
 void
 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 {
 	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
-	mutex_lock(&fifo->base.engine.subdev.mutex);
+	mutex_lock(&fifo->base.mutex);
 	if (cgrp) {
 		if (!cgrp->chan_nr++)
 			list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
@@ -231,7 +218,7 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 	} else {
 		list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
 	}
-	mutex_unlock(&fifo->base.engine.subdev.mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
 void
@@ -271,6 +258,30 @@ gk104_fifo_pbdma = {
 	.init = gk104_fifo_pbdma_init,
 };
 
+struct nvkm_engine *
+gk104_fifo_id_engine(struct nvkm_fifo *base, int engi)
+{
+	return gk104_fifo(base)->engine[engi].engine;
+}
+
+int
+gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	int engn;
+
+	if (engine->subdev.type == NVKM_ENGINE_SW)
+		return GK104_FIFO_ENGN_SW;
+
+	for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+		if (fifo->engine[engn].engine == engine)
+			return engn;
+	}
+
+	WARN_ON(1);
+	return -1;
+}
+
 static void
 gk104_fifo_recover_work(struct work_struct *w)
 {
@@ -422,11 +433,12 @@ gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
 	 * called from the fault handler already.
 	 */
 	if (!status.faulted && engine) {
-		mmui = nvkm_top_fault_id(device, engine->subdev.index);
+		mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
 		if (mmui < 0) {
 			const struct nvkm_enum *en = fifo->func->fault.engine;
 			for (; en && en->name; en++) {
-				if (en->data2 == engine->subdev.index) {
+				if (en->data2 == engine->subdev.type &&
+				    en->inst  == engine->subdev.inst) {
 					mmui = en->value;
 					break;
 				}
@@ -471,8 +483,8 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
 	struct nvkm_engine *engine = NULL;
 	struct nvkm_fifo_chan *chan;
 	unsigned long flags;
-	char ct[8] = "HUB/", en[16] = "";
-	int engn;
+	const char *en = "";
+	char ct[8] = "HUB/";
 
 	er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
 	ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
@@ -496,23 +508,20 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 			break;
 		default:
-			engine = nvkm_device_engine(device, ee->data2);
+			engine = nvkm_device_engine(device, ee->data2, 0);
 			break;
 		}
 	}
 
 	if (ee == NULL) {
-		enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
-		if (engidx < NVKM_SUBDEV_NR) {
-			const char *src = nvkm_subdev_name[engidx];
-			char *dst = en;
-			do {
-				*dst++ = toupper(*src++);
-			} while(*src);
-			engine = nvkm_device_engine(device, engidx);
+		struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+		if (subdev) {
+			if (subdev->func == &nvkm_engine)
+				engine = container_of(subdev, typeof(*engine), subdev);
+			en = engine->subdev.name;
 		}
 	} else {
-		snprintf(en, sizeof(en), "%s", ee->name);
+		en = ee->name;
 	}
 
 	spin_lock_irqsave(&fifo->base.lock, flags);
@@ -535,11 +544,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
 	 * correct engine(s), but just in case we can't find the channel
 	 * information...
 	 */
-	for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
-		if (fifo->engine[engn].engine == engine) {
+	if (engine) {
+		int engn = fifo->base.func->engine_id(&fifo->base, engine);
+		if (engn >= 0 && engn != GK104_FIFO_ENGN_SW)
 			gk104_fifo_recover_engn(fifo, engn);
-			break;
-		}
 	}
 
 	spin_unlock_irqrestore(&fifo->base.lock, flags);
@@ -556,7 +564,7 @@ gk104_fifo_bind_reason[] = {
 	{}
 };
 
-static void
+void
 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
 {
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -627,7 +635,7 @@ gk104_fifo_intr_sched(struct gk104_fifo *fifo)
 	}
 }
 
-static void
+void
 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
 {
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -637,7 +645,7 @@ gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
 	nvkm_wr32(device, 0x00256c, stat);
 }
 
-static void
+void
 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
 {
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -680,7 +688,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
 	{}
 };
 
-static void
+void
 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
 {
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -729,7 +737,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
 	{}
 };
 
-static void
+void
 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
 {
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -750,7 +758,7 @@ gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
 	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
 }
 
-static void
+void
 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
@@ -763,7 +771,7 @@ gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
 	}
 }
 
-static void
+void
 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
 {
 	nvkm_fifo_uevent(&fifo->base);
@@ -861,7 +869,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
 	}
 }
 
-static void
+void
 gk104_fifo_fini(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -871,24 +879,46 @@ gk104_fifo_fini(struct nvkm_fifo *base)
 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
 
-static int
+int
 gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	switch (mthd) {
-	case NV_DEVICE_FIFO_RUNLISTS:
+	case NV_DEVICE_HOST_RUNLISTS:
 		*data = (1ULL << fifo->runlist_nr) - 1;
 		return 0;
-	case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
-	     NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
-		int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
-		if (runl < fifo->runlist_nr) {
-			unsigned long engm = fifo->runlist[runl].engm;
+	case NV_DEVICE_HOST_RUNLIST_ENGINES: {
+		if (*data < fifo->runlist_nr) {
+			unsigned long engm = fifo->runlist[*data].engm;
 			struct nvkm_engine *engine;
+			int engn;
 			*data = 0;
 			for_each_set_bit(engn, &engm, fifo->engine_nr) {
-				if ((engine = fifo->engine[engn].engine))
-					*data |= BIT_ULL(engine->subdev.index);
+				if ((engine = fifo->engine[engn].engine)) {
+#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
+					switch (engine->subdev.type) {
+					CASE(SW    );
+					CASE(GR    );
+					CASE(MPEG  );
+					CASE(ME    );
+					CASE(CIPHER);
+					CASE(BSP   );
+					CASE(VP    );
+					CASE(CE    );
+					CASE(SEC   );
+					CASE(MSVLD );
+					CASE(MSPDEC);
+					CASE(MSPPP );
+					CASE(MSENC );
+					CASE(VIC   );
+					CASE(SEC2  );
+					CASE(NVDEC );
+					CASE(NVENC );
+					default:
+						WARN_ON(1);
+						break;
+					}
+				}
 			}
 			return 0;
 		}
@@ -899,15 +929,15 @@ gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
 	}
 }
 
-static int
+int
 gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
-	int engn, runl, pbid, ret, i, j;
-	enum nvkm_devidx engidx;
+	struct nvkm_top_device *tdev;
+	int pbid, ret, i, j;
 	u32 *map;
 
 	fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
@@ -921,25 +951,41 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 		map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
 
 	/* Determine runlist configuration from topology device info. */
-	i = 0;
-	while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
+	list_for_each_entry(tdev, &device->top->device, head) {
+		const int engn = tdev->engine;
+		char _en[16], *en;
+
+		if (engn < 0)
+			continue;
+
 		/* Determine which PBDMA handles requests for this engine. */
 		for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
-			if (map[j] & (1 << runl)) {
+			if (map[j] & BIT(tdev->runlist)) {
 				pbid = j;
 				break;
 			}
 		}
 
+		fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
+		if (!fifo->engine[engn].engine) {
+			snprintf(_en, sizeof(_en), "%s, %d",
+				 nvkm_subdev_type[tdev->type], tdev->inst);
+			en = _en;
+		} else {
+			en = fifo->engine[engn].engine->subdev.name;
+		}
+
 		nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
-			   engn, runl, pbid, nvkm_subdev_name[engidx]);
+			   tdev->engine, tdev->runlist, pbid, en);
 
-		fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
-		fifo->engine[engn].runl = runl;
+		fifo->engine[engn].runl = tdev->runlist;
 		fifo->engine[engn].pbid = pbid;
 		fifo->engine_nr = max(fifo->engine_nr, engn + 1);
-		fifo->runlist[runl].engm |= 1 << engn;
-		fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
+		fifo->runlist[tdev->runlist].engm |= BIT(engn);
+		fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
+		if (tdev->type == NVKM_ENGINE_GR)
+			fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW);
+		fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
 	}
 
 	kfree(map);
@@ -974,7 +1020,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 	return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
 }
 
-static void
+void
 gk104_fifo_init(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1006,7 +1052,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
 	nvkm_wr32(device, 0x002140, 0x7fffffff);
 }
 
-static void *
+void *
 gk104_fifo_dtor(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1033,6 +1079,8 @@ gk104_fifo_ = {
 	.fini = gk104_fifo_fini,
 	.intr = gk104_fifo_intr,
 	.fault = gk104_fifo_fault,
+	.engine_id = gk104_fifo_engine_id,
+	.id_engine = gk104_fifo_id_engine,
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.recover_chan = gk104_fifo_recover_chan,
@@ -1042,7 +1090,7 @@ gk104_fifo_ = {
 
 int
 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
-		int index, int nr, struct nvkm_fifo **pfifo)
+		enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo)
 {
 	struct gk104_fifo *fifo;
 
@@ -1052,7 +1100,7 @@ gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
 	INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
 	*pfifo = &fifo->base;
 
-	return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
+	return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base);
 }
 
 const struct nvkm_enum
@@ -1084,12 +1132,12 @@ gk104_fifo_fault_engine[] = {
 	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PERF" },
 	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
-	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
-	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
+	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
+	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
 	{ 0x17, "PMU" },
 	{ 0x18, "PTP" },
 	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
-	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
+	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
 	{}
 };
 
@@ -1191,7 +1239,8 @@ gk104_fifo = {
 };
 
 int
-gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gk104_fifo, device, type, inst, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 6407a4a174cf7d6f52b8d20b4393fc01b8fa686a..f2d12ae739449ab4e19b703ba06ec9fa86b2e804 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -35,6 +35,7 @@ struct gk104_fifo {
 		struct list_head cgrp;
 		struct list_head chan;
 		u32 engm;
+		u32 engm_sw;
 	} runlist[16];
 	int runlist_nr;
 
@@ -87,11 +88,43 @@ struct gk104_fifo_func {
 	bool cgrp_force;
 };
 
-int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
+struct gk104_fifo_engine_status {
+	bool busy;
+	bool faulted;
+	bool chsw;
+	bool save;
+	bool load;
+	struct {
+		bool tsg;
+		u32 id;
+	} prev, next, *chan;
+};
+
+int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type,
 		    int index, int nr, struct nvkm_fifo **);
 void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+			      struct gk104_fifo_engine_status *status);
+void gk104_fifo_intr_bind(struct gk104_fifo *fifo);
+void gk104_fifo_intr_chsw(struct gk104_fifo *fifo);
+void gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo);
+void gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
+void gk104_fifo_intr_engine(struct gk104_fifo *fifo);
+void *gk104_fifo_dtor(struct nvkm_fifo *base);
+int gk104_fifo_oneinit(struct nvkm_fifo *base);
+int gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data);
+void gk104_fifo_init(struct nvkm_fifo *base);
+void gk104_fifo_fini(struct nvkm_fifo *base);
+int gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+			 void *argv, u32 argc, struct nvkm_object **pobject);
+int gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+			 struct nvkm_oclass *oclass);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *fifo);
+void gk104_fifo_uevent_init(struct nvkm_fifo *fifo);
 
 extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
 int gk104_fifo_pbdma_nr(struct gk104_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index f820969e4405df8908298bcef62d2a928f74b1c5..915278c7e0128c8feaa4dbbaa03404693e1e0fe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -60,7 +60,8 @@ gk110_fifo = {
 };
 
 int
-gk110_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gk110_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gk110_fifo, device, type, inst, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 2f54787b5fd0b4285d021514f41ab0541b5e2225..cb703693de52c0d52681367376e27cf41f715bdc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -57,7 +57,8 @@ gk208_fifo = {
 };
 
 int
-gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+	return gk104_fifo_new_(&gk208_fifo, device, type, inst, 1024, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index a814c4e0ed3e5ebff69518833d54dc6a92407394..6e35cf44c640f8de9364fe7185d4f7f2fbfca469 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -38,7 +38,8 @@ gk20a_fifo = {
 };
 
 int
-gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+	return gk104_fifo_new_(&gk20a_fifo, device, type, inst, 128, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index c2a2e4572f6cb1a54d6f8ccd673f08dc1e813ce3..7af6e687d47491d83af7954f499ffbeafe1b580f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -106,7 +106,8 @@ gm107_fifo = {
 };
 
 int
-gm107_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gm107_fifo, device, index, 2048, pfifo);
+	return gk104_fifo_new_(&gm107_fifo, device, type, inst, 2048, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b8cfe3b28c4fd5302a682b41bd6314667d39df93..573658cb6c73f4c4c9cb03965c44f9eaf6cd9e11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -54,7 +54,8 @@ gm200_fifo = {
 };
 
 int
-gm200_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gm200_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gm200_fifo, device, type, inst, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 70b4feebc1faca571b0db72e3bcc4844439a16cc..556c97e54f143fb0394f9e21d7e277cd8b9a2b2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -38,7 +38,8 @@ gm20b_fifo = {
 };
 
 int
-gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm20b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+	return gk104_fifo_new_(&gm20b_fifo, device, type, inst, 512, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 2c7a0176b3c840a13a3e50742cb460d1f0bd58f8..6b46b6b65b87724ad73158a38420be93a46a0c86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -91,7 +91,8 @@ gp100_fifo = {
 };
 
 int
-gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gp100_fifo, device, type, inst, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 8c65ad4feedbeb0bbf1bb5c44369a415672c5267..7a5929cb4d29b8ad737caef43ea93c559766eac7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -39,7 +39,8 @@ gp10b_fifo = {
 };
 
 int
-gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gp10b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo);
+	return gk104_fifo_new_(&gp10b_fifo, device, type, inst, 512, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index 75f9632789b33aeaa1528e2a548deb436f2ccd46..4e78bbe3b94bb29f17715a6e321f21de44752cc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -52,11 +52,10 @@ gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
 static u32
 gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
 {
-	switch (engine->subdev.index) {
+	switch (engine->subdev.type) {
 	case NVKM_ENGINE_SW    : return 0;
 	case NVKM_ENGINE_GR    : return 0x0210;
-	case NVKM_ENGINE_CE0   : return 0x0230;
-	case NVKM_ENGINE_CE1   : return 0x0240;
+	case NVKM_ENGINE_CE    : return 0x0230 + (engine->subdev.inst * 0x10);
 	case NVKM_ENGINE_MSPDEC: return 0x0250;
 	case NVKM_ENGINE_MSPPP : return 0x0260;
 	case NVKM_ENGINE_MSVLD : return 0x0270;
@@ -66,6 +65,15 @@ gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
 	}
 }
 
+static struct gf100_fifo_engn *
+gf100_fifo_gpfifo_engine(struct gf100_fifo_chan *chan, struct nvkm_engine *engine)
+{
+	int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+	if (engi >= 0)
+		return &chan->engn[engi];
+	return NULL;
+}
+
 static int
 gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine, bool suspend)
@@ -77,7 +85,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 	struct nvkm_gpuobj *inst = chan->base.inst;
 	int ret = 0;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&chan->fifo->base.mutex);
 	nvkm_wr32(device, 0x002634, chan->base.chid);
 	if (nvkm_msec(device, 2000,
 		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -87,7 +95,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 			   chan->base.chid, chan->base.object.client->name);
 		ret = -ETIMEDOUT;
 	}
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&chan->fifo->base.mutex);
 
 	if (ret && suspend)
 		return ret;
@@ -108,13 +116,13 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 {
 	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
 	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
 	struct nvkm_gpuobj *inst = chan->base.inst;
 
 	if (offset) {
-		u64 addr = chan->engn[engine->subdev.index].vma->addr;
 		nvkm_kmap(inst);
-		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
-		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(engn->vma->addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(engn->vma->addr));
 		nvkm_done(inst);
 	}
 
@@ -126,8 +134,9 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
 	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
-	nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
-	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+	struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
+	nvkm_vmm_put(chan->base.vmm, &engn->vma);
+	nvkm_gpuobj_del(&engn->inst);
 }
 
 static int
@@ -136,23 +145,21 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
 			      struct nvkm_object *object)
 {
 	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
-	int engn = engine->subdev.index;
+	struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
 	int ret;
 
 	if (!gf100_fifo_gpfifo_engine_addr(engine))
 		return 0;
 
-	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
-			   &chan->engn[engn].vma);
+	ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
 	if (ret)
 		return ret;
 
-	return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
-			       chan->engn[engn].vma, NULL, 0);
+	return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
 }
 
 static void
@@ -243,13 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
 				  0x1000, 0x1000, true, args->v0.vmm, 0,
-				  (1ULL << NVKM_ENGINE_CE0) |
-				  (1ULL << NVKM_ENGINE_CE1) |
-				  (1ULL << NVKM_ENGINE_GR) |
-				  (1ULL << NVKM_ENGINE_MSPDEC) |
-				  (1ULL << NVKM_ENGINE_MSPPP) |
-				  (1ULL << NVKM_ENGINE_MSVLD) |
-				  (1ULL << NVKM_ENGINE_SW),
+				  BIT(GF100_FIFO_ENGN_GR) |
+				  BIT(GF100_FIFO_ENGN_MSPDEC) |
+				  BIT(GF100_FIFO_ENGN_MSPPP) |
+				  BIT(GF100_FIFO_ENGN_MSVLD) |
+				  BIT(GF100_FIFO_ENGN_CE0) |
+				  BIT(GF100_FIFO_ENGN_CE1) |
+				  BIT(GF100_FIFO_ENGN_SW),
 				  1, fifo->user.bar->addr, 0x1000,
 				  oclass, &chan->base);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 728a1edbf98c8cce3f7cb81ea8912b8097d293b5..b6900a52bcce5ae714b8029acd322a42ad2654ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -65,19 +65,18 @@ int
 gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
 {
 	int ret;
-	mutex_lock(&chan->base.fifo->engine.subdev.mutex);
+	mutex_lock(&chan->base.fifo->mutex);
 	ret = gk104_fifo_gpfifo_kick_locked(chan);
-	mutex_unlock(&chan->base.fifo->engine.subdev.mutex);
+	mutex_unlock(&chan->base.fifo->mutex);
 	return ret;
 }
 
 static u32
 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
 {
-	switch (engine->subdev.index) {
+	switch (engine->subdev.type) {
 	case NVKM_ENGINE_SW    :
-	case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST:
-		return 0;
+	case NVKM_ENGINE_CE    : return 0;
 	case NVKM_ENGINE_GR    : return 0x0210;
 	case NVKM_ENGINE_SEC   : return 0x0220;
 	case NVKM_ENGINE_MSPDEC: return 0x0250;
@@ -85,15 +84,26 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
 	case NVKM_ENGINE_MSVLD : return 0x0270;
 	case NVKM_ENGINE_VIC   : return 0x0280;
 	case NVKM_ENGINE_MSENC : return 0x0290;
-	case NVKM_ENGINE_NVDEC0: return 0x02100270;
-	case NVKM_ENGINE_NVENC0: return 0x02100290;
-	case NVKM_ENGINE_NVENC1: return 0x0210;
+	case NVKM_ENGINE_NVDEC : return 0x02100270;
+	case NVKM_ENGINE_NVENC :
+		if (engine->subdev.inst)
+			return 0x0210;
+		return 0x02100290;
 	default:
 		WARN_ON(1);
 		return 0;
 	}
 }
 
+struct gk104_fifo_engn *
+gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine)
+{
+	int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+	if (engi >= 0)
+		return &chan->engn[engi];
+	return NULL;
+}
+
 static int
 gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine, bool suspend)
@@ -126,13 +136,13 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
 	struct nvkm_gpuobj *inst = chan->base.inst;
 	u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
 
 	if (offset) {
-		u64   addr = chan->engn[engine->subdev.index].vma->addr;
-		u32 datalo = lower_32_bits(addr) | 0x00000004;
-		u32 datahi = upper_32_bits(addr);
+		u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004;
+		u32 datahi = upper_32_bits(engn->vma->addr);
 		nvkm_kmap(inst);
 		nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
 		nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
@@ -151,8 +161,9 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
-	nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
-	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+	nvkm_vmm_put(chan->base.vmm, &engn->vma);
+	nvkm_gpuobj_del(&engn->inst);
 }
 
 int
@@ -161,23 +172,21 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
 			      struct nvkm_object *object)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
-	int engn = engine->subdev.index;
+	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
 	int ret;
 
 	if (!gk104_fifo_gpfifo_engine_addr(engine))
 		return 0;
 
-	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
-			   &chan->engn[engn].vma);
+	ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
 	if (ret)
 		return ret;
 
-	return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
-			       chan->engn[engn].vma, NULL, 0);
+	return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
 }
 
 void
@@ -247,23 +256,12 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 {
 	struct gk104_fifo_chan *chan;
 	int runlist = ffs(*runlists) -1, ret, i;
-	unsigned long engm;
-	u64 subdevs = 0;
 	u64 usermem;
 
 	if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
 		return -EINVAL;
 	*runlists = BIT_ULL(runlist);
 
-	engm = fifo->runlist[runlist].engm;
-	for_each_set_bit(i, &engm, fifo->engine_nr) {
-		if (fifo->engine[i].engine)
-			subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
-	}
-
-	if (subdevs & BIT_ULL(NVKM_ENGINE_GR))
-		subdevs |= BIT_ULL(NVKM_ENGINE_SW);
-
 	/* Allocate the channel. */
 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
 		return -ENOMEM;
@@ -273,7 +271,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 	INIT_LIST_HEAD(&chan->head);
 
 	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
-				  0x1000, 0x1000, true, vmm, 0, subdevs,
+				  0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw,
 				  1, fifo->user.bar->addr, 0x200,
 				  oclass, &chan->base);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index a7462cf59d65cb1c6afc5388d5f3f9e211db64d4..ee4967b706a7d1991ef583b743bc7e8774d74a5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -44,7 +44,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
 	int ret;
 
 	/* Block runlist to prevent the channel from being rescheduled. */
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&chan->fifo->base.mutex);
 	nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
 
 	/* Preempt the channel. */
@@ -58,7 +58,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
 
 	/* Resume runlist. */
 	nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&chan->fifo->base.mutex);
 	return ret;
 }
 
@@ -70,8 +70,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 	struct nvkm_gpuobj *inst = chan->base.inst;
 	int ret;
 
-	if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
-	    engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+	if (engine->subdev.type == NVKM_ENGINE_CE)
 		return gk104_fifo_gpfifo_kick(chan);
 
 	ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
@@ -90,17 +89,15 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
 	struct nvkm_gpuobj *inst = chan->base.inst;
-	u64 addr;
 
-	if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
-	    engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+	if (engine->subdev.type == NVKM_ENGINE_CE)
 		return 0;
 
-	addr = chan->engn[engine->subdev.index].vma->addr;
 	nvkm_kmap(inst);
-	nvkm_wo32(inst, 0x210, lower_32_bits(addr) | 0x00000004);
-	nvkm_wo32(inst, 0x214, upper_32_bits(addr));
+	nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
+	nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr));
 	nvkm_done(inst);
 
 	return gv100_fifo_gpfifo_engine_valid(chan, false, true);
@@ -129,8 +126,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct gk104_fifo_chan *chan;
 	int runlist = ffs(*runlists) -1, ret, i;
-	unsigned long engm;
-	u64 subdevs = 0;
 	u64 usermem, mthd;
 	u32 size;
 
@@ -138,12 +133,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
 		return -EINVAL;
 	*runlists = BIT_ULL(runlist);
 
-	engm = fifo->runlist[runlist].engm;
-	for_each_set_bit(i, &engm, fifo->engine_nr) {
-		if (fifo->engine[i].engine)
-			subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
-	}
-
 	/* Allocate the channel. */
 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
 		return -ENOMEM;
@@ -153,7 +142,7 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
 	INIT_LIST_HEAD(&chan->head);
 
 	ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
-				  0, subdevs, 1, fifo->user.bar->addr, 0x200,
+				  0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200,
 				  oclass, &chan->base);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 6ee1bb32a071c0bd9c51e0cf3063049d206e06f2..70e16a91ac12ad6b3ddaf577b20479e16e6eaf64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -301,7 +301,8 @@ gv100_fifo = {
 };
 
 int
-gv100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gv100_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gv100_fifo, device, type, inst, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index c1d1b1aa5bc662afef97f383eff8cd2959d6c482..c6730c124769ca7c7850e91bad5bfd6c40a1c20c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -94,6 +94,38 @@ __releases(fifo->base.lock)
 	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
+struct nvkm_engine *
+nv04_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+	enum nvkm_subdev_type type;
+
+	switch (engi) {
+	case NV04_FIFO_ENGN_SW  : type = NVKM_ENGINE_SW; break;
+	case NV04_FIFO_ENGN_GR  : type = NVKM_ENGINE_GR; break;
+	case NV04_FIFO_ENGN_MPEG: type = NVKM_ENGINE_MPEG; break;
+	case NV04_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+	default:
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+int
+nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+	switch (engine->subdev.type) {
+	case NVKM_ENGINE_SW    : return NV04_FIFO_ENGN_SW;
+	case NVKM_ENGINE_GR    : return NV04_FIFO_ENGN_GR;
+	case NVKM_ENGINE_MPEG  : return NV04_FIFO_ENGN_MPEG;
+	case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
 static const char *
 nv_dma_state_err(u32 state)
 {
@@ -326,7 +358,7 @@ nv04_fifo_init(struct nvkm_fifo *base)
 
 int
 nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
-	       int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+	       enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc,
 	       struct nvkm_fifo **pfifo)
 {
 	struct nv04_fifo *fifo;
@@ -337,7 +369,7 @@ nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
 	fifo->ramfc = ramfc;
 	*pfifo = &fifo->base;
 
-	ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
+	ret = nvkm_fifo_ctor(func, device, type, inst, nr, &fifo->base);
 	if (ret)
 		return ret;
 
@@ -349,6 +381,8 @@ static const struct nvkm_fifo_func
 nv04_fifo = {
 	.init = nv04_fifo_init,
 	.intr = nv04_fifo_intr,
+	.engine_id = nv04_fifo_engine_id,
+	.id_engine = nv04_fifo_id_engine,
 	.pause = nv04_fifo_pause,
 	.start = nv04_fifo_start,
 	.chan = {
@@ -358,8 +392,8 @@ nv04_fifo = {
 };
 
 int
-nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_fifo **pfifo)
 {
-	return nv04_fifo_new_(&nv04_fifo, device, index, 16,
-			      nv04_fifo_ramfc, pfifo);
+	return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e5ecceee77ae768d6da413a9b53fa6da48041c14..3f23bcde4a540cc3c9a4384744f675120d08a023 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -17,8 +17,7 @@ struct nv04_fifo {
 	const struct nv04_fifo_ramfc *ramfc;
 };
 
-int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
-		   int index, int nr, const struct nv04_fifo_ramfc *,
-		   struct nvkm_fifo **);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   int nr, const struct nv04_fifo_ramfc *, struct nvkm_fifo **);
 void nv04_fifo_init(struct nvkm_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index f9a87deb2b3dc3fd1f11e926bdb1bec17cf56b2b..f8887f0f2f82ac9223bdda847e99a603a49b6068 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -43,6 +43,8 @@ static const struct nvkm_fifo_func
 nv10_fifo = {
 	.init = nv04_fifo_init,
 	.intr = nv04_fifo_intr,
+	.engine_id = nv04_fifo_engine_id,
+	.id_engine = nv04_fifo_id_engine,
 	.pause = nv04_fifo_pause,
 	.start = nv04_fifo_start,
 	.chan = {
@@ -52,8 +54,8 @@ nv10_fifo = {
 };
 
 int
-nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_fifo **pfifo)
 {
-	return nv04_fifo_new_(&nv10_fifo, device, index, 32,
-			      nv10_fifo_ramfc, pfifo);
+	return nv04_fifo_new_(&nv10_fifo, device, type, inst, 32, nv10_fifo_ramfc, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index f6d383a21222e07b48ea569801ae227ba734c6b4..3f94c7b5b054b7983f0621efa1206e5275a5aa2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -81,6 +81,8 @@ static const struct nvkm_fifo_func
 nv17_fifo = {
 	.init = nv17_fifo_init,
 	.intr = nv04_fifo_intr,
+	.engine_id = nv04_fifo_engine_id,
+	.id_engine = nv04_fifo_id_engine,
 	.pause = nv04_fifo_pause,
 	.start = nv04_fifo_start,
 	.chan = {
@@ -90,8 +92,8 @@ nv17_fifo = {
 };
 
 int
-nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_fifo **pfifo)
 {
-	return nv04_fifo_new_(&nv17_fifo, device, index, 32,
-			      nv17_fifo_ramfc, pfifo);
+	return nv04_fifo_new_(&nv17_fifo, device, type, inst, 32, nv17_fifo_ramfc, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 2d61fd832ddb0d882d223b3073dd4fcc3cdb7009..f9ea46809bc05aac502eef61bb9723827765a9bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -112,6 +112,8 @@ static const struct nvkm_fifo_func
 nv40_fifo = {
 	.init = nv40_fifo_init,
 	.intr = nv04_fifo_intr,
+	.engine_id = nv04_fifo_engine_id,
+	.id_engine = nv04_fifo_id_engine,
 	.pause = nv04_fifo_pause,
 	.start = nv04_fifo_start,
 	.chan = {
@@ -121,8 +123,8 @@ nv40_fifo = {
 };
 
 int
-nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_fifo **pfifo)
 {
-	return nv04_fifo_new_(&nv40_fifo, device, index, 32,
-			      nv40_fifo_ramfc, pfifo);
+	return nv04_fifo_new_(&nv40_fifo, device, type, inst, 32, nv40_fifo_ramfc, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index fa6e094d8068679b209542c4a0707e7eed643970..be94156ea2488551c4e6cfd4b77ddd93a211e53a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -51,9 +51,9 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
 void
 nv50_fifo_runlist_update(struct nv50_fifo *fifo)
 {
-	mutex_lock(&fifo->base.engine.subdev.mutex);
+	mutex_lock(&fifo->base.mutex);
 	nv50_fifo_runlist_update_locked(fifo);
-	mutex_unlock(&fifo->base.engine.subdev.mutex);
+	mutex_unlock(&fifo->base.mutex);
 }
 
 int
@@ -107,7 +107,7 @@ nv50_fifo_dtor(struct nvkm_fifo *base)
 
 int
 nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_fifo **pfifo)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
 {
 	struct nv50_fifo *fifo;
 	int ret;
@@ -116,7 +116,7 @@ nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
 		return -ENOMEM;
 	*pfifo = &fifo->base;
 
-	ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
+	ret = nvkm_fifo_ctor(func, device, type, inst, 128, &fifo->base);
 	if (ret)
 		return ret;
 
@@ -131,6 +131,8 @@ nv50_fifo = {
 	.oneinit = nv50_fifo_oneinit,
 	.init = nv50_fifo_init,
 	.intr = nv04_fifo_intr,
+	.engine_id = nv04_fifo_engine_id,
+	.id_engine = nv04_fifo_id_engine,
 	.pause = nv04_fifo_pause,
 	.start = nv04_fifo_start,
 	.chan = {
@@ -141,7 +143,8 @@ nv50_fifo = {
 };
 
 int
-nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_fifo **pfifo)
 {
-	return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
+	return nv50_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 87d30b6bd2ea8f929a97b2dd24f3ab83a53109f0..0111e7e5a4e38802d2aba577dcd5c0ea2f462599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -10,8 +10,8 @@ struct nv50_fifo {
 	int cur_runlist;
 };
 
-int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
-		   int index, struct nvkm_fifo **);
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_fifo **);
 
 void *nv50_fifo_dtor(struct nvkm_fifo *);
 int nv50_fifo_oneinit(struct nvkm_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 0ef8baab513e5ff4e8411da035d72b8994e53454..899272801a8bfd088a2c9347ff5bcbe17a295424 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -4,8 +4,8 @@
 #define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
 #include <engine/fifo.h>
 
-int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
-		   int index, int nr, struct nvkm_fifo *);
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   int nr, struct nvkm_fifo *);
 void nvkm_fifo_uevent(struct nvkm_fifo *);
 void nvkm_fifo_cevent(struct nvkm_fifo *);
 void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
@@ -23,6 +23,8 @@ struct nvkm_fifo_func {
 	void (*fini)(struct nvkm_fifo *);
 	void (*intr)(struct nvkm_fifo *);
 	void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *);
+	int (*engine_id)(struct nvkm_fifo *, struct nvkm_engine *);
+	struct nvkm_engine *(*id_engine)(struct nvkm_fifo *, int engi);
 	void (*pause)(struct nvkm_fifo *, unsigned long *);
 	void (*start)(struct nvkm_fifo *, unsigned long *);
 	void (*uevent_init)(struct nvkm_fifo *);
@@ -35,8 +37,13 @@ struct nvkm_fifo_func {
 };
 
 void nv04_fifo_intr(struct nvkm_fifo *);
+int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *nv04_fifo_id_engine(struct nvkm_fifo *, int);
 void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
 void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
 
 void gf100_fifo_intr_fault(struct nvkm_fifo *, int);
+
+int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *gk104_fifo_id_engine(struct nvkm_fifo *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 005f3e1729b95515ab1da6b001817c13824f30be..e417044cc3474ba043a252ccc8689dfec3cb38c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -24,7 +24,13 @@
 #include "changk104.h"
 #include "user.h"
 
+#include <core/client.h>
 #include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/top.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
 
@@ -109,8 +115,363 @@ tu102_fifo = {
 	.cgrp_force = true,
 };
 
+static void
+tu102_fifo_recover_work(struct work_struct *w)
+{
+	struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
+	unsigned long flags;
+	u32 engm, runm, todo;
+	int engn, runl;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	runm = fifo->recover.runm;
+	engm = fifo->recover.engm;
+	fifo->recover.engm = 0;
+	fifo->recover.runm = 0;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+	nvkm_mask(device, 0x002630, runm, runm);
+
+	for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+		if ((engine = fifo->engine[engn].engine)) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
+		}
+	}
+
+	for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+		gk104_fifo_runlist_update(fifo, runl);
+
+	nvkm_mask(device, 0x002630, runm, 0x00000000);
+}
+
+static void tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
+static void
+tu102_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
+{
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 runm = BIT(runl);
+
+	assert_spin_locked(&fifo->base.lock);
+	if (fifo->recover.runm & runm)
+		return;
+	fifo->recover.runm |= runm;
+
+	/* Block runlist to prevent channel assignment(s) from changing. */
+	nvkm_mask(device, 0x002630, runm, runm);
+
+	/* Schedule recovery. */
+	nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+	schedule_work(&fifo->recover.work);
+}
+
+static struct gk104_fifo_chan *
+tu102_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+	struct gk104_fifo_chan *chan;
+	struct nvkm_fifo_cgrp *cgrp;
+
+	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+		if (chan->base.chid == chid) {
+			list_del_init(&chan->head);
+			return chan;
+		}
+	}
+
+	list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+		if (cgrp->id == chid) {
+			chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+			list_del_init(&chan->head);
+			if (!--cgrp->chan_nr)
+				list_del_init(&cgrp->head);
+			return chan;
+		}
+	}
+
+	return NULL;
+}
+
+static void
+tu102_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32  stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+	const u32  runl = (stat & 0x000f0000) >> 16;
+	const bool used = (stat & 0x00000001);
+	unsigned long engn, engm = fifo->runlist[runl].engm;
+	struct gk104_fifo_chan *chan;
+
+	assert_spin_locked(&fifo->base.lock);
+	if (!used)
+		return;
+
+	/* Lookup SW state for channel, and mark it as dead. */
+	chan = tu102_fifo_recover_chid(fifo, runl, chid);
+	if (chan) {
+		chan->killed = true;
+		nvkm_fifo_kevent(&fifo->base, chid);
+	}
+
+	/* Disable channel. */
+	nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+	nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+	/* Block channel assignments from changing during recovery. */
+	tu102_fifo_recover_runl(fifo, runl);
+
+	/* Schedule recovery for any engines the channel is on. */
+	for_each_set_bit(engn, &engm, fifo->engine_nr) {
+		struct gk104_fifo_engine_status status;
+
+		gk104_fifo_engine_status(fifo, engn, &status);
+		if (!status.chan || status.chan->id != chid)
+			continue;
+		tu102_fifo_recover_engn(fifo, engn);
+	}
+}
+
+static void
+tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 runl = fifo->engine[engn].runl;
+	const u32 engm = BIT(engn);
+	struct gk104_fifo_engine_status status;
+
+	assert_spin_locked(&fifo->base.lock);
+	if (fifo->recover.engm & engm)
+		return;
+	fifo->recover.engm |= engm;
+
+	/* Block channel assignments from changing during recovery. */
+	tu102_fifo_recover_runl(fifo, runl);
+
+	/* Determine which channel (if any) is currently on the engine. */
+	gk104_fifo_engine_status(fifo, engn, &status);
+	if (status.chan) {
+		/* The channel is not longer viable, kill it. */
+		tu102_fifo_recover_chan(&fifo->base, status.chan->id);
+	}
+
+	/* Preempt the runlist */
+	nvkm_wr32(device, 0x2638, BIT(runl));
+
+	/* Schedule recovery. */
+	nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
+	schedule_work(&fifo->recover.work);
+}
+
+static void
+tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const struct nvkm_enum *er, *ee, *ec, *ea;
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	const char *en = "";
+	char ct[8] = "HUB/";
+	int engn;
+
+	er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+	ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+	if (info->hub) {
+		ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+	} else {
+		ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+		snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+	}
+	ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+	if (ee && ee->data2) {
+		switch (ee->data2) {
+		case NVKM_SUBDEV_BAR:
+			nvkm_bar_bar1_reset(device);
+			break;
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_bar_bar2_reset(device);
+			break;
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+			break;
+		default:
+			engine = nvkm_device_engine(device, ee->data2, 0);
+			break;
+		}
+	}
+
+	if (ee == NULL) {
+		struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+		if (subdev) {
+			if (subdev->func == &nvkm_engine)
+				engine = container_of(subdev, typeof(*engine), subdev);
+			en = engine->subdev.name;
+		}
+	} else {
+		en = ee->name;
+	}
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+	nvkm_error(subdev,
+		   "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+		   "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+		   info->access, ea ? ea->name : "", info->addr,
+		   info->engine, ee ? ee->name : en,
+		   info->client, ct, ec ? ec->name : "",
+		   info->reason, er ? er->name : "", chan ? chan->chid : -1,
+		   info->inst, chan ? chan->object.client->name : "unknown");
+
+	/* Kill the channel that caused the fault. */
+	if (chan)
+		tu102_fifo_recover_chan(&fifo->base, chan->chid);
+
+	/* Channel recovery will probably have already done this for the
+	 * correct engine(s), but just in case we can't find the channel
+	 * information...
+	 */
+	for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+		if (fifo->engine[engn].engine == engine) {
+			tu102_fifo_recover_engn(fifo, engn);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_ctxsw_timeout(struct gk104_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	unsigned long flags, engm;
+	u32 engn;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+
+	engm = nvkm_rd32(device, 0x2a30);
+	nvkm_wr32(device, 0x2a30, engm);
+
+	for_each_set_bit(engn, &engm, 32)
+		tu102_fifo_recover_engn(fifo, engn);
+
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_sched(struct gk104_fifo *fifo)
+{
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
+	u32 code = intr & 0x000000ff;
+
+	nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
+}
+
+static void
+tu102_fifo_intr(struct nvkm_fifo *base)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+	if (stat & 0x00000001) {
+		gk104_fifo_intr_bind(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000002) {
+		tu102_fifo_intr_ctxsw_timeout(fifo);
+		stat &= ~0x00000002;
+	}
+
+	if (stat & 0x00000100) {
+		tu102_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
+		stat &= ~0x00000100;
+	}
+
+	if (stat & 0x00010000) {
+		gk104_fifo_intr_chsw(fifo);
+		nvkm_wr32(device, 0x002100, 0x00010000);
+		stat &= ~0x00010000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 mask = nvkm_rd32(device, 0x0025a0);
+
+		while (mask) {
+			u32 unit = __ffs(mask);
+
+			gk104_fifo_intr_pbdma_0(fifo, unit);
+			gk104_fifo_intr_pbdma_1(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
+			mask &= ~(1 << unit);
+		}
+		stat &= ~0x20000000;
+	}
+
+	if (stat & 0x40000000) {
+		gk104_fifo_intr_runlist(fifo);
+		stat &= ~0x40000000;
+	}
+
+	if (stat & 0x80000000) {
+		nvkm_wr32(device, 0x002100, 0x80000000);
+		gk104_fifo_intr_engine(fifo);
+		stat &= ~0x80000000;
+	}
+
+	if (stat) {
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
+	}
+}
+
+static const struct nvkm_fifo_func
+tu102_fifo_ = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.info = gk104_fifo_info,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = tu102_fifo_intr,
+	.fault = tu102_fifo_fault,
+	.engine_id = gk104_fifo_engine_id,
+	.id_engine = gk104_fifo_id_engine,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.recover_chan = tu102_fifo_recover_chan,
+	.class_get = gk104_fifo_class_get,
+	.class_new = gk104_fifo_class_new,
+};
+
 int
-tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&tu102_fifo, device, index, 4096, pfifo);
+	struct gk104_fifo *fifo;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	fifo->func = &tu102_fifo;
+	INIT_WORK(&fifo->recover.work, tu102_fifo_recover_work);
+	*pfifo = &fifo->base;
+
+	return nvkm_fifo_ctor(&tu102_fifo_, device, type, inst, 4096, &fifo->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index d41fb94524e948072b9048d4a432cecfbe34ed65..61759f54406e40a328cea3a05fdaa71a02f68af6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -175,8 +175,8 @@ nvkm_gr = {
 
 int
 nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
-	     int index, bool enable, struct nvkm_gr *gr)
+	     enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_gr *gr)
 {
 	gr->func = func;
-	return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine);
+	return nvkm_engine_ctor(&nvkm_gr, device, type, inst, enable, &gr->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
index da1ba74682b4448955e4e81d0ffd2a48282c3f2b..65c332118fd66c6b1e0b71386a9a366cd7702d80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -192,7 +192,7 @@ g84_gr = {
 };
 
 int
-g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+g84_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv50_gr_new_(&g84_gr, device, index, pgr);
+	return nv50_gr_new_(&g84_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 749f73fc45a84224e932639fb72518e6a80639ae..397ff4fe9df89fac4b6b01d1e19a2c03409fc442 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2087,8 +2087,8 @@ gf100_gr_flcn = {
 };
 
 int
-gf100_gr_new_(const struct gf100_gr_fwif *fwif,
-	      struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif, struct nvkm_device *device,
+	      enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
 	struct gf100_gr *gr;
 	int ret;
@@ -2097,7 +2097,7 @@ gf100_gr_new_(const struct gf100_gr_fwif *fwif,
 		return -ENOMEM;
 	*pgr = &gr->base;
 
-	ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
+	ret = nvkm_gr_ctor(&gf100_gr_, device, type, inst, true, &gr->base);
 	if (ret)
 		return ret;
 
@@ -2483,7 +2483,7 @@ gf100_gr_fwif[] = {
 };
 
 int
-gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gf100_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index dfd5dd74f0d59d35ce4de4dc4d3fa550dd223ea5..c0038f906135567c64694cfa49d743f1cb5c1dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -416,6 +416,6 @@ void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
 extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
 extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
 
-int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 0536fe8b2b9258b0f32b049cadb6723ae57552d4..3acd99c306f201f80f97e854b166cf455c6758a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -152,7 +152,7 @@ gf104_gr_fwif[] = {
 };
 
 int
-gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gf104_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 14284b06112f7a2926a8a0ac46738d9d4ef22978..030640bb3dca073c4592dbb7502b3a100c83f907 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -151,7 +151,7 @@ gf108_gr_fwif[] = {
 };
 
 int
-gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gf108_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 280752551a3ad1853be03f1ba7e4cfb5bf0f1eef..616e2def1865f8ceae3819b00936c0852c42f18e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -127,7 +127,7 @@ gf110_gr_fwif[] = {
 };
 
 int
-gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gf110_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 235c3fbe4b957b56ab41a2715ee0a4d9e4f60a1c..669e7536970e6142f015ec0a3d436316efc573fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -192,7 +192,7 @@ gf117_gr_fwif[] = {
 };
 
 int
-gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf117_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gf117_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 7eac385ece972c04c63d93859b0d1a4e6d1600e0..5b09bda8110c3539f1f8ed32f58337c2f4201309 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -218,7 +218,7 @@ gf119_gr_fwif[] = {
 };
 
 int
-gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf119_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gf119_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 89f51d76082bcc5e7b3a8bafca16e3edc7fa4130..b680eaa0f3502e2936080583edca7a3585b8ecd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -497,7 +497,7 @@ gk104_gr_fwif[] = {
 };
 
 int
-gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gk104_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 735f05e54d62af1714e820c0294ed6601dcc961a..103e06a77e658ca9f108c3fc0259043715a05138 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -393,7 +393,7 @@ gk110_gr_fwif[] = {
 };
 
 int
-gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gk110_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index adc971be8f3b5c641670897ea80d50149b0a8096..034d0b11a17d451f88b04093d9680037cada8c77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -144,7 +144,8 @@ gk110b_gr_fwif[] = {
 };
 
 int
-gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk110b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gk110b_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index aa0eff6795ac76b5d847421fbacd255b44d4df50..116d682f9f962523dfeb3fa1655de26115c6e7f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -202,7 +202,7 @@ gk208_gr_fwif[] = {
 };
 
 int
-gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk208_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gk208_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 6d4d7285161018a5e301787fd9c3a69456df086f..be0b2cefd8e8aad984946be55d8759cf0c364287 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -357,7 +357,7 @@ gk20a_gr_fwif[] = {
 };
 
 int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gk20a_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 09bb78ba9d0005c2ef5eb7d0a863e19b11094dbe..310987174cb566bdd27d50029c1a594e02834c8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -437,7 +437,7 @@ gm107_gr_fwif[] = {
 };
 
 int
-gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gm107_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 81513704751895594e3c40a918ef518bac40ef7b..5c38ff0fe7f9dde24985ea459b148b3750a1797b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -288,7 +288,7 @@ gm200_gr_fwif[] = {
 };
 
 int
-gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 1aab691fa71c9a25193e431b057c712ff08d08d3..ec1c46e47e00618f6e15f64b82481587b5caec48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -181,7 +181,7 @@ gm20b_gr_fwif[] = {
 };
 
 int
-gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm20b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gm20b_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index ddba7ce937c7769833d406926cae82c451c53495..0550dd6f46f130a8d545cfd99fd26e86b2dd6709 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -156,7 +156,7 @@ gp100_gr_fwif[] = {
 };
 
 int
-gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gp100_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index c083f3757ff74e5fda9c4a64520687a0da0a2b46..5b001f374be0b7992b8b819535b366abd058e5d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -152,7 +152,7 @@ gp102_gr_fwif[] = {
 };
 
 int
-gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gp102_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index f6a31e9a8cc8be828055c43d7039f16118a2acab..2655574ec63b089faa27236127870e7e2d678e52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -93,7 +93,7 @@ gp104_gr_fwif[] = {
 };
 
 int
-gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gp104_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 2c80c6a75b56f22e31b834b592badae16de32c3f..adabc04d4f3a96fdb27681c5f2ae2982503b8cc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -82,7 +82,7 @@ gp107_gr_fwif[] = {
 };
 
 int
-gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gp107_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
index 2be8f416dd6f8c09a7d8af0eb2a47c445847e38b..7310f0466bb70579aaa7af29fa10c2de670c9a79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -92,7 +92,7 @@ gp108_gr_fwif[] = {
 };
 
 int
-gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gp108_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 6edc4bc7ed44cce28a7b616d77a432419af085b5..e13683b6e7b104022b8db3b6970be24aaf706509 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -94,7 +94,7 @@ gp10b_gr_fwif[] = {
 };
 
 int
-gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp10b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gp10b_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
index c711a55ce392f3f8f37d70448d4a68c999253283..1dfc65d45b5223d1b015888ea002b461565c1b57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -43,7 +43,7 @@ gt200_gr = {
 };
 
 int
-gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gt200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv50_gr_new_(&gt200_gr, device, index, pgr);
+	return nv50_gr_new_(&gt200_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
index fa103df32ec755e796691bcbef16c7e8ac36f030..fcb5ead345a38407a0c53690a5acc2a19482af7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -44,7 +44,7 @@ gt215_gr = {
 };
 
 int
-gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gt215_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv50_gr_new_(&gt215_gr, device, index, pgr);
+	return nv50_gr_new_(&gt215_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 2189a8f4e644b8c88b19b434a603683103095ed0..4d043c1173eafa53f871ffee81d1f37885c161e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -141,7 +141,7 @@ gv100_gr_fwif[] = {
 };
 
 int
-gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gv100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(gv100_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
index eb1a90644752bb55da6531953bb069d638f35dae..cf782b64f62ef8c3b39e15731fbe716e0a8fb943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -42,7 +42,7 @@ mcp79_gr = {
 };
 
 int
-mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+mcp79_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+	return nv50_gr_new_(&mcp79_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
index c91eb56e93279e9723659f9d4668c9934a2ff592..6f90a6395453c3ef8a4080d03b3a9a31a9ad6c89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -44,7 +44,7 @@ mcp89_gr = {
 };
 
 int
-mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+mcp89_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+	return nv50_gr_new_(&mcp89_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 9c2e985dc079e9b142add0d32646ef554fc0ed85..0bc1a238de435f0485ba304196f1df31bf16c2e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -1413,7 +1413,7 @@ nv04_gr = {
 };
 
 int
-nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv04_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
 	struct nv04_gr *gr;
 
@@ -1422,5 +1422,5 @@ nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 	spin_lock_init(&gr->lock);
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(&nv04_gr, device, index, true, &gr->base);
+	return nvkm_gr_ctor(&nv04_gr, device, type, inst, true, &gr->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 4ebbfbdd8240455c2a17fa58ac650f668897ded6..942450b33bc60d73dcc931ee0bceb1c86ef02862 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -1173,7 +1173,7 @@ nv10_gr_init(struct nvkm_gr *base)
 
 int
 nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_gr **pgr)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
 	struct nv10_gr *gr;
 
@@ -1182,7 +1182,7 @@ nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
 	spin_lock_init(&gr->lock);
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(func, device, index, true, &gr->base);
+	return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
@@ -1215,7 +1215,7 @@ nv10_gr = {
 };
 
 int
-nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv10_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv10_gr_new_(&nv10_gr, device, index, pgr);
+	return nv10_gr_new_(&nv10_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index 4327baea02af5722344bbb704e05f8e7617f8262..5cfe927c9123ea64999af16df0a839f37d74315b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -3,7 +3,7 @@
 #define __NV10_GR_H__
 #include "priv.h"
 
-int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		 struct nvkm_gr **);
 int nv10_gr_init(struct nvkm_gr *);
 void nv10_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
index 3e2c6856b4c4e4bb92a67c8d56b17e8df47b3ed6..69ece259df86580e69e46ab445ff08e99dbb483c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -53,7 +53,7 @@ nv15_gr = {
 };
 
 int
-nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv15_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv10_gr_new_(&nv15_gr, device, index, pgr);
+	return nv10_gr_new_(&nv15_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
index 12437d085a7380da0944efca8872220b25613731..e39dfc7d407719de533b20824105820d7450ebe5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -53,7 +53,7 @@ nv17_gr = {
 };
 
 int
-nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv17_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv10_gr_new_(&nv17_gr, device, index, pgr);
+	return nv10_gr_new_(&nv17_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index d837630a362589892c28e3653533d55d9b10eed9..6bff10cee71b9e9ca307352c2b41bb44f3e76f66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -330,7 +330,7 @@ nv20_gr_dtor(struct nvkm_gr *base)
 
 int
 nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_gr **pgr)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
 	struct nv20_gr *gr;
 
@@ -338,7 +338,7 @@ nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
 		return -ENOMEM;
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(func, device, index, true, &gr->base);
+	return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
@@ -370,7 +370,7 @@ nv20_gr = {
 };
 
 int
-nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv20_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv20_gr_new_(&nv20_gr, device, index, pgr);
+	return nv20_gr_new_(&nv20_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index e57407a8a7c3b57724d58b652c06229d2cdd1286..c0d2be53413e0dbdde1c2c88d6fba624c4af3ac2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -9,8 +9,8 @@ struct nv20_gr {
 	struct nvkm_memory *ctxtab;
 };
 
-int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
-		 int, struct nvkm_gr **);
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		 struct nvkm_gr **);
 void *nv20_gr_dtor(struct nvkm_gr *);
 int nv20_gr_oneinit(struct nvkm_gr *);
 int nv20_gr_init(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index 32d29d3faee08dd368940662ab0a32be061936e3..f3a56f17d94aeda0b38b017dda83f9cf3126e617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -129,7 +129,7 @@ nv25_gr = {
 };
 
 int
-nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv25_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv20_gr_new_(&nv25_gr, device, index, pgr);
+	return nv20_gr_new_(&nv25_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index f941062c66f0d349e6b7a66b04d26af2be752620..f268d2642d2953e53fcf5d005b32a2a86060dd2d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -120,7 +120,7 @@ nv2a_gr = {
 };
 
 int
-nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv2a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv20_gr_new_(&nv2a_gr, device, index, pgr);
+	return nv20_gr_new_(&nv2a_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 785ec956df0fb0c39c5703d2343a6a455772f474..e5737cdf2fa143f0d53b4dea0b3e591fefe07609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -194,7 +194,7 @@ nv30_gr = {
 };
 
 int
-nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv30_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv20_gr_new_(&nv30_gr, device, index, pgr);
+	return nv20_gr_new_(&nv30_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index bd610d75c6771b88976c6fdee977b02286090e44..1ab2da8ebf4eb2d3429fabd5069ebc27b88ecd50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -131,7 +131,7 @@ nv34_gr = {
 };
 
 int
-nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv34_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv20_gr_new_(&nv34_gr, device, index, pgr);
+	return nv20_gr_new_(&nv34_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 89db7f523037411ffd80529533a660251804f4ff..591260f5676b76709dfb2f33abc30bfdbf8b0f89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -131,7 +131,7 @@ nv35_gr = {
 };
 
 int
-nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv35_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv20_gr_new_(&nv35_gr, device, index, pgr);
+	return nv20_gr_new_(&nv35_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 5f1ad8344ea9d2c4b9f79c6eda0f11db1a5b5571..67f3535ff97e1b7b7cf6d3fffe088f66ba54d12d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -429,7 +429,7 @@ nv40_gr_init(struct nvkm_gr *base)
 
 int
 nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_gr **pgr)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
 	struct nv40_gr *gr;
 
@@ -438,7 +438,7 @@ nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
 	*pgr = &gr->base;
 	INIT_LIST_HEAD(&gr->chan);
 
-	return nvkm_gr_ctor(func, device, index, true, &gr->base);
+	return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
@@ -470,7 +470,7 @@ nv40_gr = {
 };
 
 int
-nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv40_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv40_gr_new_(&nv40_gr, device, index, pgr);
+	return nv40_gr_new_(&nv40_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index e6128791b2d2df1b650acc8dd850f185546e307d..f3d3d3a5ae5b2a5eaab52c4e8bbc008e76d58fef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -10,7 +10,7 @@ struct nv40_gr {
 	struct list_head chan;
 };
 
-int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		 struct nvkm_gr **);
 int nv40_gr_init(struct nvkm_gr *);
 void nv40_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
index 45ff80254eb404ea641d83e600aa72cda21a6357..22b6a38a703197d47e4b574cb562e4c3eb204f76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -102,7 +102,7 @@ nv44_gr = {
 };
 
 int
-nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv44_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv40_gr_new_(&nv44_gr, device, index, pgr);
+	return nv40_gr_new_(&nv44_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index df16ffda17491d29f6706de8208e56fdafaa68a8..563a10097e9530a6c6e15419ce4ac3f0f7661f02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -761,7 +761,7 @@ nv50_gr_init(struct nvkm_gr *base)
 
 int
 nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_gr **pgr)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
 	struct nv50_gr *gr;
 
@@ -770,7 +770,7 @@ nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
 	spin_lock_init(&gr->lock);
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(func, device, index, true, &gr->base);
+	return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
@@ -790,7 +790,7 @@ nv50_gr = {
 };
 
 int
-nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv50_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return nv50_gr_new_(&nv50_gr, device, index, pgr);
+	return nv50_gr_new_(&nv50_gr, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 465f4da0ddfc16dd7fe1d4f09775d80139b41eb7..84388c42e5c6882d4e41e0f79f8400258ae6e4d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -11,7 +11,7 @@ struct nv50_gr {
 	u32 size;
 };
 
-int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		 struct nvkm_gr **);
 int nv50_gr_init(struct nvkm_gr *);
 void nv50_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 3b30f24032cca49caf705bbf8bd6edae4c4d71cc..9b2c66e8be9092e730fbecbdd907c55712f821e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -7,8 +7,8 @@
 struct nvkm_fb_tile;
 struct nvkm_fifo_chan;
 
-int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
-		 int index, bool enable, struct nvkm_gr *);
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		 bool enable, struct nvkm_gr *);
 
 bool nv04_gr_idle(struct nvkm_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 6039f9948aa2d8f8770a801d42aaea9bd8d8dc0a..1a8a21844e12e0d3288133f534220e7ca4378c63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -198,7 +198,7 @@ tu102_gr_fwif[] = {
 };
 
 int
-tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+	return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index c0e11a071843b22470af3fd82119d3b4c96215e7..0fcc0ffa1e4095b2d45307546cf29fea3583712a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -37,7 +37,8 @@ g84_mpeg = {
 };
 
 int
-g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+g84_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_engine **pmpeg)
 {
-	return nvkm_engine_new_(&g84_mpeg, device, index, true, pmpeg);
+	return nvkm_engine_new_(&g84_mpeg, device, type, inst, true, pmpeg);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 7fea7d45202f1a7a2cee1996dd8b45b9c0dd38df..b1054db4c1b85370cd84bfc5207e65634bf05188 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -274,7 +274,7 @@ nv31_mpeg_ = {
 
 int
 nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_engine **pmpeg)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_engine **pmpeg)
 {
 	struct nv31_mpeg *mpeg;
 
@@ -283,8 +283,7 @@ nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
 	mpeg->func = func;
 	*pmpeg = &mpeg->engine;
 
-	return nvkm_engine_ctor(&nv31_mpeg_, device, index,
-				true, &mpeg->engine);
+	return nvkm_engine_ctor(&nv31_mpeg_, device, type, inst, true, &mpeg->engine);
 }
 
 static const struct nv31_mpeg_func
@@ -293,7 +292,8 @@ nv31_mpeg = {
 };
 
 int
-nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv31_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_engine **pmpeg)
 {
-	return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+	return nv31_mpeg_new_(&nv31_mpeg, device, type, inst, pmpeg);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index b3e13153885896422470e712780ca09c649c3873..9f30aaaf809efdec566532445d37f50d0fea2e09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -11,8 +11,8 @@ struct nv31_mpeg {
 	struct nv31_mpeg_chan *chan;
 };
 
-int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
-		   int index, struct nvkm_engine **);
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_engine **);
 
 struct nv31_mpeg_func {
 	bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index b5ec7c504dc64c5d0c85aff4c65ccdfced3c83fe..179167484ef1804f49d32fdb2b2e18ec49002886 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -76,7 +76,8 @@ nv40_mpeg = {
 };
 
 int
-nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv40_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_engine **pmpeg)
 {
-	return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
+	return nv31_mpeg_new_(&nv40_mpeg, device, type, inst, pmpeg);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index c3cf02ed468ea1ccf6212a5e9c42dbee00fa612e..521ce43a2871e7f576c80d9ad9966e6ed955b805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -203,7 +203,8 @@ nv44_mpeg = {
 };
 
 int
-nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv44_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_engine **pmpeg)
 {
 	struct nv44_mpeg *mpeg;
 
@@ -212,5 +213,5 @@ nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 	INIT_LIST_HEAD(&mpeg->chan);
 	*pmpeg = &mpeg->engine;
 
-	return nvkm_engine_ctor(&nv44_mpeg, device, index, true, &mpeg->engine);
+	return nvkm_engine_ctor(&nv44_mpeg, device, type, inst, true, &mpeg->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index 6df880a390193a2b22598bb022312ca96b216d0e..e6374f36961cda8489e6bacb77219876bf32ea42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -129,7 +129,8 @@ nv50_mpeg = {
 };
 
 int
-nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv50_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_engine **pmpeg)
 {
-	return nvkm_engine_new_(&nv50_mpeg, device, index, true, pmpeg);
+	return nvkm_engine_new_(&nv50_mpeg, device, type, inst, true, pmpeg);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
index 80211f76093bdca030c29a0d044b71adc2402ca2..842fcfbd28b83815ad3984f3db75cafe11c6949d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -24,9 +24,8 @@
 #include "priv.h"
 
 int
-nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
-		 struct nvkm_device *device, int index,
-		 struct nvkm_engine **pengine)
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
 {
-	return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+	return nvkm_falcon_new_(func, device, type, inst, true, 0x085000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index f30cf1dcfb30b999bac4f9f5a864465046de167a..ecb06d68f544fa191dd635b041fe42a9a439a81d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -43,8 +43,8 @@ g98_mspdec = {
 };
 
 int
-g98_mspdec_new(struct nvkm_device *device, int index,
-	     struct nvkm_engine **pengine)
+g98_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_engine **pengine)
 {
-	return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
+	return nvkm_mspdec_new_(&g98_mspdec, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index cfe1aa81bd144f503d0ca318e9c5d2279f8ef0f8..0a69bd767d698ef724701c8711cfae495dc66ead 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -43,8 +43,8 @@ gf100_mspdec = {
 };
 
 int
-gf100_mspdec_new(struct nvkm_device *device, int index,
+gf100_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_engine **pengine)
 {
-	return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
+	return nvkm_mspdec_new_(&gf100_mspdec, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 24272b4927bcdf41a46a146a2e1f07bd316668e1..a08991dca4281cc67970ee0c255b8e67fe9b3111 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -35,8 +35,8 @@ gk104_mspdec = {
 };
 
 int
-gk104_mspdec_new(struct nvkm_device *device, int index,
+gk104_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_engine **pengine)
 {
-	return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
+	return nvkm_mspdec_new_(&gk104_mspdec, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
index cf6e59ad6ee296dfab8de7a3e19ab3e3e3db6893..791fb03a32ad3a6488380e5287ddb346c229f128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -35,8 +35,8 @@ gt215_mspdec = {
 };
 
 int
-gt215_mspdec_new(struct nvkm_device *device, int index,
-	     struct nvkm_engine **pengine)
+gt215_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		 struct nvkm_engine **pengine)
 {
-	return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
+	return nvkm_mspdec_new_(&gt215_mspdec, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
index 86445a2600d0edc93f64aaa08c29189421ff8a69..2bc5537d40a3e777bfa19d1e0d029c732fc19052 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -3,8 +3,8 @@
 #define __NVKM_MSPDEC_PRIV_H__
 #include <engine/mspdec.h>
 
-int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
-		     int index, struct nvkm_engine **);
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		     int, struct nvkm_engine **);
 
 void g98_mspdec_init(struct nvkm_falcon *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
index bfae5e60e9259e88ffbf487e37e88f98d17de78a..45a9411ab2e28dd9c9d0e7046dd0334bf668aea0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -25,7 +25,7 @@
 
 int
 nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
-		int index, struct nvkm_engine **pengine)
+		enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
 {
-	return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+	return nvkm_falcon_new_(func, device, type, inst, true, 0x086000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index c45dbf79d1f9d1977da44da1306b1dd52846da7b..160120b9bd641ee2c24b8da57da71183e2e538b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -43,8 +43,8 @@ g98_msppp = {
 };
 
 int
-g98_msppp_new(struct nvkm_device *device, int index,
+g98_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	      struct nvkm_engine **pengine)
 {
-	return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
+	return nvkm_msppp_new_(&g98_msppp, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 803c62ab516e107858f5c396e27c45be4755ce59..debed9ae873197c38a370ed922b612cc7776a7ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -43,8 +43,8 @@ gf100_msppp = {
 };
 
 int
-gf100_msppp_new(struct nvkm_device *device, int index,
+gf100_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_engine **pengine)
 {
-	return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
+	return nvkm_msppp_new_(&gf100_msppp, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
index 49cbf72cee4bd6ac587317be85dd339551f33c20..a2fd736fef943adeed8650c4139b185c679f07d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -35,8 +35,8 @@ gt215_msppp = {
 };
 
 int
-gt215_msppp_new(struct nvkm_device *device, int index,
-	      struct nvkm_engine **pengine)
+gt215_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		struct nvkm_engine **pengine)
 {
-	return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+	return nvkm_msppp_new_(&gt215_msppp, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
index f20b10915db262600688e74f5e08fa49b4209ab2..582ab8ce14255072401b052f709996320c2ccbbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -3,8 +3,8 @@
 #define __NVKM_MSPPP_PRIV_H__
 #include <engine/msppp.h>
 
-int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
-		    int index, struct nvkm_engine **);
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		    int, struct nvkm_engine **);
 
 void g98_msppp_init(struct nvkm_falcon *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
index 745bbb653dc0890540f34bd4d5ee1e3204545057..7be42b980e577dcb7e364da48eb20ad73113b3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -25,7 +25,7 @@
 
 int
 nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
-		int index, struct nvkm_engine **pengine)
+		enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
 {
-	return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+	return nvkm_falcon_new_(func, device, type, inst, true, 0x084000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index 4a2a9f0494af8aa625903ca5a7531808089ea03f..cfa2065319a6617c74483be34cc94d1ffcb3cb45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -43,8 +43,8 @@ g98_msvld = {
 };
 
 int
-g98_msvld_new(struct nvkm_device *device, int index,
+g98_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	      struct nvkm_engine **pengine)
 {
-	return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
+	return nvkm_msvld_new_(&g98_msvld, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index 1695e532c081f543fd3b0b7166188737e56804f4..8d58ad8e04d33460a15daaa916f92a89d9e434ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -43,8 +43,8 @@ gf100_msvld = {
 };
 
 int
-gf100_msvld_new(struct nvkm_device *device, int index,
+gf100_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_engine **pengine)
 {
-	return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
+	return nvkm_msvld_new_(&gf100_msvld, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index b640cd63ebe8593f96bbb75503385e2090673c70..b28be28046f1e042705181d7ff2141c9d8a8f58e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -35,8 +35,8 @@ gk104_msvld = {
 };
 
 int
-gk104_msvld_new(struct nvkm_device *device, int index,
+gk104_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_engine **pengine)
 {
-	return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
+	return nvkm_msvld_new_(&gk104_msvld, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
index 201e8ef3519ec0941f2c0dde67733e91aff76d79..d7489f972c997341e59a24cdc880908b55c47466 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -35,8 +35,8 @@ gt215_msvld = {
 };
 
 int
-gt215_msvld_new(struct nvkm_device *device, int index,
-	      struct nvkm_engine **pengine)
+gt215_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		struct nvkm_engine **pengine)
 {
-	return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+	return nvkm_msvld_new_(&gt215_msvld, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
index a0f540ef257b8dbc418cc780f5644f0ca4bfc63e..16c30b62ab09985923a2af1b6273ce2c3c0c656b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -35,8 +35,8 @@ mcp89_msvld = {
 };
 
 int
-mcp89_msvld_new(struct nvkm_device *device, int index,
-	      struct nvkm_engine **pengine)
+mcp89_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		struct nvkm_engine **pengine)
 {
-	return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+	return nvkm_msvld_new_(&mcp89_msvld, device, type, inst, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
index 5cd1e83badbbb8656846144cefec7351570e1f83..f729d919b0540f7524e22bed810633b9792366d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -3,8 +3,8 @@
 #define __NVKM_MSVLD_PRIV_H__
 #include <engine/msvld.h>
 
-int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
-		    int index, struct nvkm_engine **);
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		    int, struct nvkm_engine **);
 
 void g98_msvld_init(struct nvkm_falcon *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 9b23c1b70ebfe07c9820e9976045b435b2cf33d8..b0181cc5953bc827a8477d360c7089925cd670ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -37,7 +37,7 @@ nvkm_nvdec = {
 
 int
 nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
-		int index, struct nvkm_nvdec **pnvdec)
+		enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
 {
 	struct nvkm_nvdec *nvdec;
 	int ret;
@@ -45,7 +45,7 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
 	if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
 		return -ENOMEM;
 
-	ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+	ret = nvkm_engine_ctor(&nvkm_nvdec, device, type, inst, true,
 			       &nvdec->engine);
 	if (ret)
 		return ret;
@@ -57,5 +57,5 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
 	nvdec->func = fwif->func;
 
 	return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
-				nvkm_subdev_name[index], 0, &nvdec->falcon);
+				nvdec->engine.subdev.name, 0, &nvdec->falcon);
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index 0ab27ab4d8ee6d94e3541bc9cfec475d1a5c792c..8c44ce44a6d7b45c70627ebb1a855c56db8a9189 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -56,8 +56,8 @@ gm107_nvdec_fwif[] = {
 };
 
 int
-gm107_nvdec_new(struct nvkm_device *device, int index,
+gm107_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_nvdec **pnvdec)
 {
-	return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+	return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, pnvdec);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index e14da8b000d02f815b6356756b982b1a06d4e4fd..0920f6a887e2a1f4b047507cb62dd00b678b7b44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -14,6 +14,6 @@ struct nvkm_nvdec_fwif {
 	const struct nvkm_nvdec_func *func;
 };
 
-int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
-		    struct nvkm_device *, int, struct nvkm_nvdec **);
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
+		    enum nvkm_subdev_type, int, struct nvkm_nvdec **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
index 484100e156680fcf0c05413d370e3077d810ba26..c39e797dc7c9425d7c9ceab5329ec681e0a5c162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -39,7 +39,7 @@ nvkm_nvenc = {
 
 int
 nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
-		int index, struct nvkm_nvenc **pnvenc)
+		enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
 {
 	struct nvkm_nvenc *nvenc;
 	int ret;
@@ -47,7 +47,7 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
 	if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
 		return -ENOMEM;
 
-	ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+	ret = nvkm_engine_ctor(&nvkm_nvenc, device, type, inst, true,
 			       &nvenc->engine);
 	if (ret)
 		return ret;
@@ -59,5 +59,5 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
 	nvenc->func = fwif->func;
 
 	return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
-				nvkm_subdev_name[index], 0, &nvenc->falcon);
+				nvenc->engine.subdev.name, 0, &nvenc->falcon);
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
index d249c8ffb2d5c3b9374b1b1eb98a2ef32f0ad795..f44d41bf203409462743e48751a6b6d41216cc84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -56,8 +56,8 @@ gm107_nvenc_fwif[] = {
 };
 
 int
-gm107_nvenc_new(struct nvkm_device *device, int index,
+gm107_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_nvenc **pnvenc)
 {
-	return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+	return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 100fa5ebbeefad4e93c6d70f899d4bda92dc3521..4130a2bfbb4f79b060eae00eb552f26a26a058d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -14,6 +14,6 @@ struct nvkm_nvenc_fwif {
 	const struct nvkm_nvenc_func *func;
 };
 
-int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
 		    int, struct nvkm_nvenc **pnvenc);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index b2785bee418efdfc239bf5e30df43f3e8b4b7a5f..8fe0444f761e5aa4fff4175d5ff96a9b418a479e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -628,10 +628,10 @@ nvkm_perfmon_dtor(struct nvkm_object *object)
 {
 	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
 	struct nvkm_pm *pm = perfmon->pm;
-	mutex_lock(&pm->engine.subdev.mutex);
-	if (pm->perfmon == &perfmon->object)
-		pm->perfmon = NULL;
-	mutex_unlock(&pm->engine.subdev.mutex);
+	spin_lock(&pm->client.lock);
+	if (pm->client.object == &perfmon->object)
+		pm->client.object = NULL;
+	spin_unlock(&pm->client.lock);
 	return perfmon;
 }
 
@@ -671,11 +671,11 @@ nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
 	if (ret)
 		return ret;
 
-	mutex_lock(&pm->engine.subdev.mutex);
-	if (pm->perfmon == NULL)
-		pm->perfmon = *pobject;
-	ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
-	mutex_unlock(&pm->engine.subdev.mutex);
+	spin_lock(&pm->client.lock);
+	if (pm->client.object == NULL)
+		pm->client.object = *pobject;
+	ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
+	spin_unlock(&pm->client.lock);
 	return ret;
 }
 
@@ -858,10 +858,11 @@ nvkm_pm = {
 
 int
 nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_pm *pm)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
 {
 	pm->func = func;
 	INIT_LIST_HEAD(&pm->domains);
 	INIT_LIST_HEAD(&pm->sources);
-	return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
+	spin_lock_init(&pm->client.lock);
+	return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index 6e441ddafd86fdea39feec0967fed4755cc1c998..0086d00eb16230b009fa0706cd4dc199f139c05b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -159,7 +159,7 @@ g84_pm[] = {
 };
 
 int
-g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return nv40_pm_new_(g84_pm, device, index, ppm);
+	return nv40_pm_new_(g84_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index fe2532ee4145b33effa772d583426dcd646eb39d..8e02701def8e8e3d43b1ef57134e01f78ad8a6f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -187,7 +187,7 @@ gf100_pm_ = {
 
 int
 gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_pm **ppm)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
 	struct nvkm_pm *pm;
 	u32 mask;
@@ -196,7 +196,7 @@ gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
 	if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
 		return -ENOMEM;
 
-	ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
+	ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
 	if (ret)
 		return ret;
 
@@ -237,7 +237,7 @@ gf100_pm = {
 };
 
 int
-gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return gf100_pm_new_(&gf100_pm, device, index, ppm);
+	return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 461bb219b1c0fb64eb7614c3a19d58f00004217d..bc4b014c4e8ebbaae1a6c094067d681b041c75a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -9,8 +9,8 @@ struct gf100_pm_func {
 	const struct nvkm_specdom *doms_part;
 };
 
-int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
-		  int index, struct nvkm_pm **);
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_pm **);
 
 extern const struct nvkm_funcdom gf100_perfctr_func;
 extern const struct nvkm_specdom gf100_pm_gpc[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
index 49b24c98a7f7fd221b4f7c0ea35e6f7ca47683ae..505565866b5911d518e5a546d888879a6b92d94a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -60,7 +60,7 @@ gf108_pm = {
 };
 
 int
-gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return gf100_pm_new_(&gf108_pm, device, index, ppm);
+	return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
index 9170025fc9887da8aab814d56c89c0208079dc5b..c61e8c010bb3a9a2df879438166a6285171dc3d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -74,7 +74,7 @@ gf117_pm = {
 };
 
 int
-gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return gf100_pm_new_(&gf117_pm, device, index, ppm);
+	return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 07f946d26ac6c5b68d54fd2fcb541aebe049a769..75bf3df1cb186e75a2648895f0edd39fe5ca0089 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -178,7 +178,7 @@ gk104_pm = {
 };
 
 int
-gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return gf100_pm_new_(&gk104_pm, device, index, ppm);
+	return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
index 5cf5dd536fd0fec9ebd1ccf00ed813188e28e78d..25874c5414865508dc17c9d55969541eef9f8d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -151,7 +151,7 @@ gt200_pm[] = {
 };
 
 int
-gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return nv40_pm_new_(gt200_pm, device, index, ppm);
+	return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index c9227ad41b04bf3a0ceebda93fd5345ca3a89eee..54c23e2b6645f367fd0113a8b60e656ac9b76198 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -132,7 +132,7 @@ gt215_pm[] = {
 };
 
 int
-gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return nv40_pm_new_(gt215_pm, device, index, ppm);
+	return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 3fda594700e023f71d3cc5b885ef80232e4873d9..eba5b3b79340455246ac812d756cc978a15596c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -80,7 +80,7 @@ nv40_pm_ = {
 
 int
 nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
-	     int index, struct nvkm_pm **ppm)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
 	struct nv40_pm *pm;
 	int ret;
@@ -89,7 +89,7 @@ nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
 		return -ENOMEM;
 	*ppm = &pm->base;
 
-	ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+	ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
 	if (ret)
 		return ret;
 
@@ -117,7 +117,7 @@ nv40_pm[] = {
 };
 
 int
-nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return nv40_pm_new_(nv40_pm, device, index, ppm);
+	return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 8ed19320fda10f9acb001c3bd6f01fa02678342a..afb79843723d95a889ab45ddc91e233905a10e70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -9,7 +9,7 @@ struct nv40_pm {
 	u32 sequence;
 };
 
-int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
-		 int index, struct nvkm_pm **);
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		 struct nvkm_pm **);
 extern const struct nvkm_funcdom nv40_perfctr_func;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index cc5a41d4c6f226a6ed55a440db1b8915d2918080..bbd3404901f95351b28d0552aafe4ad126909a8a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -169,7 +169,7 @@ nv50_pm[] = {
 };
 
 int
-nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
 {
-	return nv40_pm_new_(nv50_pm, device, index, ppm);
+	return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index cd6f8f79b235fe81a30c6a3e8b6bf7852fe3395e..6ae25d3e7f45c814759f8881827550dd80bde39a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -4,8 +4,8 @@
 #define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
 #include <engine/pm.h>
 
-int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
-		 int index, struct nvkm_pm *);
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		 struct nvkm_pm *);
 
 struct nvkm_pm_func {
 	void (*fini)(struct nvkm_pm *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 6d2a7f0afbb55d8bed5add0641a0f1a6c7cb5f37..1b87df03c8235fc729a81acf17140cf63841a332 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -74,9 +74,8 @@ g98_sec = {
 };
 
 int
-g98_sec_new(struct nvkm_device *device, int index,
+g98_sec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	    struct nvkm_engine **pengine)
 {
-	return nvkm_falcon_new_(&g98_sec, device, index,
-				true, 0x087000, pengine);
+	return nvkm_falcon_new_(&g98_sec, device, type, inst, true, 0x087000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 41318aa0d481db2ab31a7e481dd1ddeede400824..092c6d0b8e01e172260f7a9ac2ce0a4f6e5d6cdf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -85,7 +85,7 @@ nvkm_sec2 = {
 
 int
 nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
-	       int index, u32 addr, struct nvkm_sec2 **psec2)
+	       enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2)
 {
 	struct nvkm_sec2 *sec2;
 	int ret;
@@ -93,7 +93,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
 	if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
 		return -ENOMEM;
 
-	ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+	ret = nvkm_engine_ctor(&nvkm_sec2, device, type, inst, true, &sec2->engine);
 	if (ret)
 		return ret;
 
@@ -104,7 +104,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
 	sec2->func = fwif->func;
 
 	ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
-			       nvkm_subdev_name[index], addr, &sec2->falcon);
+			       sec2->engine.subdev.name, addr, &sec2->falcon);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index bccf7acb7f98e772699b54454f33bb3d3a3360dc..44e39f5743d59f8ae1b1724c2ca40944ca1080c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -343,7 +343,8 @@ gp102_sec2_fwif[] = {
 };
 
 int
-gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+gp102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_sec2 **psec2)
 {
-	return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
+	return nvkm_sec2_new_(gp102_sec2_fwif, device, type, inst, 0, psec2);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index e770c9497871bc09086f2b4a7f5ddefe688c2327..3e9f5c842f3c5ff876f58211c3395682ab52a42d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -36,7 +36,8 @@ gp108_sec2_fwif[] = {
 };
 
 int
-gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+gp108_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_sec2 **psec2)
 {
-	return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+	return nvkm_sec2_new_(gp108_sec2_fwif, device, type, inst, 0, psec2);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 8cbc0b7d0a2752e0485b4831905e333b79e7566f..af19229e885dd1edb9a890cd2d74fe3319366bc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -25,6 +25,6 @@ int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
 extern const struct nvkm_sec2_func gp102_sec2;
 extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
 
-int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
 		   int, u32 addr, struct nvkm_sec2 **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index a231c1c6c0a5ef1db04268918425dd87f0676601..f3faeb70557559049a111727e1a01edc4e0abfba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -72,10 +72,11 @@ tu102_sec2_fwif[] = {
 };
 
 int
-tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_sec2 **psec2)
 {
 	/* TOP info wasn't updated on Turing to reflect the PRI
 	 * address change for some reason.  We override it here.
 	 */
-	return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
+	return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
index 7be3198e11de402e413083729cacbda3f51181f7..14871d0bd74630e4f98ba6f287c81488c35c1513 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -97,7 +97,7 @@ nvkm_sw = {
 
 int
 nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_sw **psw)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
 {
 	struct nvkm_sw *sw;
 
@@ -106,5 +106,5 @@ nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
 	INIT_LIST_HEAD(&sw->chan);
 	sw->func = func;
 
-	return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine);
+	return nvkm_engine_ctor(&nvkm_sw, device, type, inst, true, &sw->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index ea8f4247b628d4acfe97c5b7494a51822f970c27..55abf839f29d5cd66050f4daf81d86233a3221c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -149,7 +149,7 @@ gf100_sw = {
 };
 
 int
-gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+gf100_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
 {
-	return nvkm_sw_new_(&gf100_sw, device, index, psw);
+	return nvkm_sw_new_(&gf100_sw, device, type, inst, psw);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index b6675fe1b0ce97876e567595c312a9f1aff032a4..4aa57573869c95b5152b07a82412594f65931123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -133,7 +133,7 @@ nv04_sw = {
 };
 
 int
-nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv04_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
 {
-	return nvkm_sw_new_(&nv04_sw, device, index, psw);
+	return nvkm_sw_new_(&nv04_sw, device, type, inst, psw);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index 09d22fcd194c0cf3b036e0da0524206e1264a693..e79e640ae535feb0606db25b75704dcb49a496eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -62,7 +62,7 @@ nv10_sw = {
 };
 
 int
-nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv10_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
 {
-	return nvkm_sw_new_(&nv10_sw, device, index, psw);
+	return nvkm_sw_new_(&nv10_sw, device, type, inst, psw);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 01573d187f2c66aac93139d6ecd7c06b5decc800..1fdd094c8b7e518a059943c9d50def310cc58129 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -142,7 +142,7 @@ nv50_sw = {
 };
 
 int
-nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv50_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
 {
-	return nvkm_sw_new_(&nv50_sw, device, index, psw);
+	return nvkm_sw_new_(&nv50_sw, device, type, inst, psw);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index 6d18fc6180f2904cf58ea3ea9c2140ebbb1b0f45..d9d83b1b884955e1916b3a479456f037b7540893 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -5,8 +5,8 @@
 #include <engine/sw.h>
 struct nvkm_sw_chan;
 
-int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
-		 int index, struct nvkm_sw **);
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		 struct nvkm_sw **);
 
 struct nvkm_sw_chan_sclass {
 	int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 7a96178786c4ca6a90bccc3bbd6ba86318801c63..b502266c76fd963f24458ecd6851bce5a234a0c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -36,8 +36,8 @@ g84_vp = {
 };
 
 int
-g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+g84_vp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	   struct nvkm_engine **pengine)
 {
-	return nvkm_xtensa_new_(&g84_vp, device, index,
-				true, 0x00f000, pengine);
+	return nvkm_xtensa_new_(&g84_vp, device, type, inst, true, 0x00f000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 70549381e082833ffcf4489c22d6c778babf5b6e..f7d3ba0afb556495979eced21bb8137bd6dbe737 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -175,9 +175,9 @@ nvkm_xtensa = {
 };
 
 int
-nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
-		 struct nvkm_device *device, int index, bool enable,
-		 u32 addr, struct nvkm_engine **pengine)
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
+		 struct nvkm_engine **pengine)
 {
 	struct nvkm_xtensa *xtensa;
 
@@ -187,6 +187,5 @@ nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
 	xtensa->addr = addr;
 	*pengine = &xtensa->engine;
 
-	return nvkm_engine_ctor(&nvkm_xtensa, device, index,
-				enable, &xtensa->engine);
+	return nvkm_engine_ctor(&nvkm_xtensa, device, type, inst, enable, &xtensa->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index c6a3448180d6fdb9102026bbbd17bffad1ca1775..262641a014b066ff346f888f889006f83aa0ecce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -88,13 +88,12 @@ int
 nvkm_falcon_enable(struct nvkm_falcon *falcon)
 {
 	struct nvkm_device *device = falcon->owner->device;
-	enum nvkm_devidx id = falcon->owner->index;
 	int ret;
 
-	nvkm_mc_enable(device, id);
+	nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
 	ret = falcon->func->enable(falcon);
 	if (ret) {
-		nvkm_mc_disable(device, id);
+		nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
 		return ret;
 	}
 
@@ -105,15 +104,14 @@ void
 nvkm_falcon_disable(struct nvkm_falcon *falcon)
 {
 	struct nvkm_device *device = falcon->owner->device;
-	enum nvkm_devidx id = falcon->owner->index;
 
 	/* already disabled, return or wait_idle will timeout */
-	if (!nvkm_mc_enabled(device, id))
+	if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst))
 		return;
 
 	falcon->func->disable(falcon);
 
-	nvkm_mc_disable(device, id);
+	nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
 }
 
 int
@@ -143,7 +141,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
 	u32 reg;
 
 	if (!falcon->addr) {
-		falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+		falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
 		if (WARN_ON(!falcon->addr))
 			return -ENODEV;
 	}
@@ -188,7 +186,7 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 	mutex_lock(&falcon->mutex);
 	if (falcon->user) {
 		nvkm_error(user, "%s falcon already acquired by %s!\n",
-			   falcon->name, nvkm_subdev_name[falcon->user->index]);
+			   falcon->name, falcon->user->name);
 		mutex_unlock(&falcon->mutex);
 		return -EBUSY;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index fb4fff1222afea261c23ade9b13dd95f44f7ddb4..2cb24fff7e321585ea48923335aac8cfd00c3ab9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -11,7 +11,6 @@ include $(src)/nvkm/subdev/fuse/Kbuild
 include $(src)/nvkm/subdev/gpio/Kbuild
 include $(src)/nvkm/subdev/gsp/Kbuild
 include $(src)/nvkm/subdev/i2c/Kbuild
-include $(src)/nvkm/subdev/ibus/Kbuild
 include $(src)/nvkm/subdev/iccsense/Kbuild
 include $(src)/nvkm/subdev/instmem/Kbuild
 include $(src)/nvkm/subdev/ltc/Kbuild
@@ -20,6 +19,7 @@ include $(src)/nvkm/subdev/mmu/Kbuild
 include $(src)/nvkm/subdev/mxm/Kbuild
 include $(src)/nvkm/subdev/pci/Kbuild
 include $(src)/nvkm/subdev/pmu/Kbuild
+include $(src)/nvkm/subdev/privring/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
 include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index c962df9910dd46a71658a23ce7cb6b586392ed12..af6cac696d434c8ac3785427940c477e97408b1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -410,14 +410,14 @@ nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
 
 int
 nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
-	      int index, struct nvkm_acr **pacr)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr)
 {
 	struct nvkm_acr *acr;
 	long wprfw;
 
 	if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+	nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev);
 	INIT_LIST_HEAD(&acr->hsfw);
 	INIT_LIST_HEAD(&acr->lsfw);
 	INIT_LIST_HEAD(&acr->hsf);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
index cd41b2e6cc8795feca6152c8188028be96163d22..cdb1ead26d84f0efb0076ef775dfb77b6b757675 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -262,7 +262,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
 	hsf->func->bld(acr, hsf);
 
 	/* Boot the falcon. */
-	nvkm_mc_intr_mask(device, falcon->owner->index, false);
+	nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, false);
 
 	nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
 	nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
@@ -279,7 +279,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
 		return -EIO;
 
 	nvkm_falcon_clear_interrupt(falcon, intr_clear);
-	nvkm_mc_intr_mask(device, falcon->owner->index, true);
+	nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, true);
 	return ret;
 }
 
@@ -478,7 +478,8 @@ gm200_acr_fwif[] = {
 };
 
 int
-gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gm200_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_acr **pacr)
 {
-	return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+	return nvkm_acr_new_(gm200_acr_fwif, device, type, inst, pacr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
index b1ecc58152ccab978ceb027f24d8399ace1c2b5c..54e996f2f630b22217675fd2437089f84ea96db7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -129,7 +129,8 @@ gm20b_acr_fwif[] = {
 };
 
 int
-gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gm20b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_acr **pacr)
 {
-	return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+	return nvkm_acr_new_(gm20b_acr_fwif, device, type, inst, pacr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
index 80eb9d8dbc8037f5de38aaf0f25027298cef1ad1..fb9132a39bb1a5be122cf23fcb074a901ee9a435 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -276,7 +276,8 @@ gp102_acr_fwif[] = {
 };
 
 int
-gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_acr **pacr)
 {
-	return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+	return nvkm_acr_new_(gp102_acr_fwif, device, type, inst, pacr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
index 67a7c141004bd30a2b820731a4633da1a8b2e47f..373d638a21777e1a5d20db13fd0a3551e0e6eb94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -106,7 +106,8 @@ gp108_acr_fwif[] = {
 };
 
 int
-gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp108_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_acr **pacr)
 {
-	return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+	return nvkm_acr_new_(gp108_acr_fwif, device, type, inst, pacr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
index 8249f0d2d81d7737281d125e058cc7b94dc56716..f03ba028867b6691899a4d329f9fa29f67d21d42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -52,7 +52,8 @@ gp10b_acr_fwif[] = {
 };
 
 int
-gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp10b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_acr **pacr)
 {
-	return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+	return nvkm_acr_new_(gp10b_acr_fwif, device, type, inst, pacr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
index d71af17a169a40e6837bffb56de71b93dbd27828..c30b841c9d352f5b1b1655979d47029cbc51922e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -135,8 +135,8 @@ int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
 extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
 void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
 
-int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
-		  struct nvkm_acr **);
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
+		  int inst, struct nvkm_acr **);
 int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
 
 struct nvkm_acr_lsf {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index c4981bce9a2b6a735de8e36c1d314fc416c1f5b7..05a87e77525f61e4c0fb3bc271ad0169f4e0edf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -224,7 +224,8 @@ tu102_acr_fwif[] = {
 };
 
 int
-tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_acr **pacr)
 {
-	return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+	return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 209a6a40834a0f21d6a341f008337a2730f848bc..d017a1b5e5dd55f23ce0ef7d752727b07ac86ab1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -134,9 +134,9 @@ nvkm_bar = {
 
 void
 nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_bar *bar)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_bar *bar)
 {
-	nvkm_subdev_ctor(&nvkm_bar, device, index, &bar->subdev);
+	nvkm_subdev_ctor(&nvkm_bar, device, type, inst, &bar->subdev);
 	bar->func = func;
 	spin_lock_init(&bar->lock);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
index 87f26f54b48184dc6437cd2e047cd5dc18afc4a9..77a41bcf860e9f39f03dd5c43addd24cc949504d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -56,7 +56,8 @@ g84_bar_func = {
 };
 
 int
-g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+g84_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_bar **pbar)
 {
-	return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+	return nv50_bar_new_(&g84_bar_func, device, type, inst, 0x200, pbar);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index a3dcb09a40ee374747901bfdd07c33a6859ad9b1..51070b7dda85a8a584bac3c0fddf002131b1aa00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -162,12 +162,12 @@ gf100_bar_dtor(struct nvkm_bar *base)
 
 int
 gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_bar **pbar)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
 {
 	struct gf100_bar *bar;
 	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_bar_ctor(func, device, index, &bar->base);
+	nvkm_bar_ctor(func, device, type, inst, &bar->base);
 	bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false);
 	*pbar = &bar->base;
 	return 0;
@@ -189,7 +189,8 @@ gf100_bar_func = {
 };
 
 int
-gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gf100_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bar **pbar)
 {
-	return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+	return gf100_bar_new_(&gf100_bar_func, device, type, inst, pbar);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 4ae4c7145712caf17178394feb39a08e9b504a64..328a68b418d931c9a19a890f42625b88f50a04f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -15,7 +15,7 @@ struct gf100_bar {
 	struct gf100_barN bar[2];
 };
 
-int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type,
 		   int, struct nvkm_bar **);
 void *gf100_bar_dtor(struct nvkm_bar *);
 int gf100_bar_oneinit(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 35878fb538f2c58fb1116c367c2314b3a391e7bd..eead8ab8839349429e7130f97eb59f5a8dbb21d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -32,9 +32,10 @@ gk20a_bar_func = {
 };
 
 int
-gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gk20a_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bar **pbar)
 {
-	int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+	int ret = gf100_bar_new_(&gk20a_bar_func, device, type, inst, pbar);
 	if (ret == 0)
 		(*pbar)->iomap_uncached = true;
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
index 3ddf9222d9351d633ff445545033c6b850a55a69..da95307a79129a81ac01fa77c2639e56ab2c2e3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
@@ -59,7 +59,8 @@ gm107_bar_func = {
 };
 
 int
-gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gm107_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bar **pbar)
 {
-	return gf100_bar_new_(&gm107_bar_func, device, index, pbar);
+	return gf100_bar_new_(&gm107_bar_func, device, type, inst, pbar);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
index 1ed6170891c4285d9a5408c4e0d445b42a5c3c01..4acdb4fb01070525d1fc98627a6fe832e05a4400 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -32,9 +32,10 @@ gm20b_bar_func = {
 };
 
 int
-gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gm20b_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bar **pbar)
 {
-	int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar);
+	int ret = gf100_bar_new_(&gm20b_bar_func, device, type, inst, pbar);
 	if (ret == 0)
 		(*pbar)->iomap_uncached = true;
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index f23a0ccc2becca6d733089941d4850d3b410d567..27d8a1be43e4ff85e8ab5ffe7138211884a4daac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -220,12 +220,12 @@ nv50_bar_dtor(struct nvkm_bar *base)
 
 int
 nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
-	      int index, u32 pgd_addr, struct nvkm_bar **pbar)
+	      enum nvkm_subdev_type type, int inst, u32 pgd_addr, struct nvkm_bar **pbar)
 {
 	struct nv50_bar *bar;
 	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_bar_ctor(func, device, index, &bar->base);
+	nvkm_bar_ctor(func, device, type, inst, &bar->base);
 	bar->pgd_addr = pgd_addr;
 	*pbar = &bar->base;
 	return 0;
@@ -248,7 +248,8 @@ nv50_bar_func = {
 };
 
 int
-nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+nv50_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_bar **pbar)
 {
-	return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+	return nv50_bar_new_(&nv50_bar_func, device, type, inst, 0x1400, pbar);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index e4193deb2e51ca70a5a5f128c5821724d3a01b85..dedee9394079585bbb8cc80191f425a2a3bf1c7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -16,7 +16,7 @@ struct nv50_bar {
 	struct nvkm_gpuobj *bar2;
 };
 
-int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type,
 		  int, u32 pgd_addr, struct nvkm_bar **);
 void *nv50_bar_dtor(struct nvkm_bar *);
 int nv50_bar_oneinit(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 869ad184f9231b9b06e79f70b92b767e105ce934..daebfc991c76e6b8e3f1a004a7e364807b2dfac5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -5,7 +5,7 @@
 #include <subdev/bar.h>
 
 void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
-		   int, struct nvkm_bar *);
+		   enum nvkm_subdev_type, int, struct nvkm_bar *);
 
 struct nvkm_bar_func {
 	void *(*dtor)(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index 798f65ec3a86c457ea6789205667f93d66d60bbb..c25ab407b85d9182ff26ba361ac23dadde8b87a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -92,7 +92,8 @@ tu102_bar = {
 };
 
 int
-tu102_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bar **pbar)
 {
-	return gf100_bar_new_(&tu102_bar, device, index, pbar);
+	return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index f3c30b2a788e8534fe72c23729779afd5e39d573..d0f52d59fc2f9f6eef9ef79e123ace125c9e1bc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -140,7 +140,8 @@ nvkm_bios = {
 };
 
 int
-nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
+nvkm_bios_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bios **pbios)
 {
 	struct nvkm_bios *bios;
 	struct nvbios_image image;
@@ -149,7 +150,7 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
 
 	if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_bios, device, index, &bios->subdev);
+	nvkm_subdev_ctor(&nvkm_bios, device, type, inst, &bios->subdev);
 
 	ret = nvbios_shadow(bios);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index 52ad73bce5febd0a1d8b166ca9422fa4e59e7c3f..0e5a46db52eaf795befb7bf8033e035596c11419 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -53,12 +53,12 @@ nvkm_bus = {
 
 int
 nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_bus **pbus)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus)
 {
 	struct nvkm_bus *bus;
 	if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_bus, device, index, &bus->subdev);
+	nvkm_subdev_ctor(&nvkm_bus, device, type, inst, &bus->subdev);
 	bus->func = func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index 9700b5c01cc69ca5243e339deec7a55922f0efb9..a0d6e2d3f804c9dca877d8f75fe449dab6beb176 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -58,7 +58,8 @@ g94_bus = {
 };
 
 int
-g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+g94_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_bus **pbus)
 {
-	return nvkm_bus_new_(&g94_bus, device, index, pbus);
+	return nvkm_bus_new_(&g94_bus, device, type, inst, pbus);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index e0930d5fdfb188ab7f7ecf9cb5860ffba4a5e541..53a6651ac22581dc29a2702613a2275dd99cf88f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -40,7 +40,7 @@ gf100_bus_intr(struct nvkm_bus *bus)
 			   (addr & 0x00000002) ? "write" : "read", data,
 			   (addr & 0x00fffffc),
 			   (stat & 0x00000002) ? "!ENGINE " : "",
-			   (stat & 0x00000004) ? "IBUS " : "",
+			   (stat & 0x00000004) ? "PRIVRING " : "",
 			   (stat & 0x00000008) ? "TIMEOUT " : "");
 
 		nvkm_wr32(device, 0x009084, 0x00000000);
@@ -69,7 +69,8 @@ gf100_bus = {
 };
 
 int
-gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_bus **pbus)
 {
-	return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+	return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 2b44ba5cf4b0bf2e76917d528510aeeee4282caa..cfed17c062ea8ef3841d6890ac3a2608b6e9b6c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -68,7 +68,8 @@ nv04_bus = {
 };
 
 int
-nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv04_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_bus **pbus)
 {
-	return nvkm_bus_new_(&nv04_bus, device, index, pbus);
+	return nvkm_bus_new_(&nv04_bus, device, type, inst, pbus);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index 5153d89e1f0b5f7a1f90da7460dc0029aabf30dd..ad8da523bb22e3e95862cf873126ae1451860394 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -82,7 +82,8 @@ nv31_bus = {
 };
 
 int
-nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv31_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_bus **pbus)
 {
-	return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+	return nvkm_bus_new_(&nv31_bus, device, type, inst, pbus);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 19e10fdc9291885b0af44cf79f643ce3939f2bdd..3a1e45adeedc1c6d36b82eec3477485f29254ace 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -99,7 +99,8 @@ nv50_bus = {
 };
 
 int
-nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv50_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_bus **pbus)
 {
-	return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+	return nvkm_bus_new_(&nv50_bus, device, type, inst, pbus);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
index 76f7ba1c64940820da5cb1dc015788fcfa7a50d6..2e9345b17cf808153ca22c934a2bc4a1b56977f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -11,7 +11,7 @@ struct nvkm_bus_func {
 	u32 hwsq_size;
 };
 
-int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  struct nvkm_bus **);
 
 void nv50_bus_init(struct nvkm_bus *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index dc184e857f857853fdca108b780e1155bac70321..57199be082fd361e45b9d131fa76148843eaedc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -649,7 +649,7 @@ nvkm_clk = {
 
 int
 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
-	      int index, bool allow_reclock, struct nvkm_clk *clk)
+	      enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk)
 {
 	struct nvkm_subdev *subdev = &clk->subdev;
 	struct nvkm_bios *bios = device->bios;
@@ -657,7 +657,7 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
 	const char *mode;
 	struct nvbios_vpstate_header h;
 
-	nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+	nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev);
 
 	if (bios && !nvbios_vpstate_parse(bios, &h)) {
 		struct nvbios_vpstate_entry base, boost;
@@ -716,9 +716,9 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
 
 int
 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
-	      int index, bool allow_reclock, struct nvkm_clk **pclk)
+	      enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
 {
 	if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+	return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index f97e3ec196bb03c9f35b80c5ac78aacc4a92eeb9..07157cf53c9e8992a9c02f2468d3bd74af15e1f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -41,8 +41,8 @@ g84_clk = {
 };
 
 int
-g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+g84_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_clk **pclk)
 {
-	return nv50_clk_new_(&g84_clk, device, index,
-			     (device->chipset >= 0x94), pclk);
+	return nv50_clk_new_(&g84_clk, device, type, inst, (device->chipset >= 0x94), pclk);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 7f67f9f5a550120e405337be5f5bc50bd7a419dd..6eea11aefb70e893814ab2aaffd25bedcdc541e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -468,7 +468,8 @@ gf100_clk = {
 };
 
 int
-gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_clk **pclk)
 {
 	struct gf100_clk *clk;
 
@@ -476,5 +477,5 @@ gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
+	return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 0b37e3da7feb6b8d0e7eecbfe64b7ee958fbe4b3..0d8e2ddcc5eebf366bccb57a0956796da7b788a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -504,7 +504,8 @@ gk104_clk = {
 };
 
 int
-gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_clk **pclk)
 {
 	struct gk104_clk *clk;
 
@@ -512,5 +513,5 @@ gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
+	return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 218893e3e5f92b95efa613e2f4269b4d19d0e8bb..d573fb0917fc535437a0b81bc3d88c56b036fb22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -610,10 +610,9 @@ gk20a_clk = {
 };
 
 int
-gk20a_clk_ctor(struct nvkm_device *device, int index,
-		const struct nvkm_clk_func *func,
-		const struct gk20a_clk_pllg_params *params,
-		struct gk20a_clk *clk)
+gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params,
+	       struct gk20a_clk *clk)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
 	int ret;
@@ -628,7 +627,7 @@ gk20a_clk_ctor(struct nvkm_device *device, int index,
 	clk->params = params;
 	clk->parent_rate = clk_get_rate(tdev->clk);
 
-	ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
+	ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
 	if (ret)
 		return ret;
 
@@ -639,7 +638,8 @@ gk20a_clk_ctor(struct nvkm_device *device, int index,
 }
 
 int
-gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_clk **pclk)
 {
 	struct gk20a_clk *clk;
 	int ret;
@@ -649,11 +649,9 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
-			      clk);
+	ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk);
 
 	clk->pl_to_div = pl_to_div;
 	clk->div_to_pl = div_to_pl;
-
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 0d1450972162cb158dc727bb964aed67bdd56446..286413ff4a9ec7f2273c9446ac7a15eb1a843aeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -146,8 +146,8 @@ gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
 			    clk->parent_rate / KHZ);
 }
 
-int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
-		    const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
+int gk20a_clk_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, const struct nvkm_clk_func *,
+		   const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
 void gk20a_clk_fini(struct nvkm_clk *);
 int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
 int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index b284e949f73240c2702e87113d696523b7baea73..a139dafffe0630126b56011ccea6896c3ef09e58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -908,7 +908,7 @@ gm20b_clk = {
 };
 
 static int
-gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		      struct nvkm_clk **pclk)
 {
 	struct gk20a_clk *clk;
@@ -919,12 +919,9 @@ gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
-			     &gm20b_pllg_params, clk);
-
+	ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk);
 	clk->pl_to_div = pl_to_div;
 	clk->div_to_pl = div_to_pl;
-
 	return ret;
 }
 
@@ -1014,7 +1011,8 @@ gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
 }
 
 int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_clk **pclk)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
 	struct gm20b_clk *clk;
@@ -1024,7 +1022,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 
 	/* Speedo 0 GPUs cannot use noise-aware PLL */
 	if (tdev->gpu_speedo_id == 0)
-		return gm20b_clk_new_speedo0(device, index, pclk);
+		return gm20b_clk_new_speedo0(device, type, inst, pclk);
 
 	/* Speedo >= 1, use NAPLL */
 	clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
@@ -1036,8 +1034,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 	/* duplicate the clock parameters since we will patch them below */
 	clk_params = (void *) (clk + 1);
 	*clk_params = gm20b_pllg_params;
-	ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
-			     &clk->base);
+	ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base);
 	if (ret)
 		return ret;
 
@@ -1050,7 +1047,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 	if (clk_params->max_m == 0) {
 		nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
 		kfree(clk);
-		return gm20b_clk_new_speedo0(device, index, pclk);
+		return gm20b_clk_new_speedo0(device, type, inst, pclk);
 	}
 
 	clk->base.pl_to_div = pl_to_div;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index f0a26881d9b9bc7f7eba0696ce7a6db96aa35623..b5f3969727a2045811843aba1d2d255cd7b13bf3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -537,7 +537,8 @@ gt215_clk = {
 };
 
 int
-gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gt215_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_clk **pclk)
 {
 	struct gt215_clk *clk;
 
@@ -545,5 +546,5 @@ gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
+	return nvkm_clk_ctor(&gt215_clk, device, type, inst, true, &clk->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 4884eb4a9221ad23ae732912aaa32c04e05d018e..81f103f88dc8bce5a8b28ab1a2183d890ef176b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -409,7 +409,8 @@ mcp77_clk = {
 };
 
 int
-mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_clk **pclk)
 {
 	struct mcp77_clk *clk;
 
@@ -417,5 +418,5 @@ mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
+	return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index b280f85e8827446f964d97996cf7213f90e958dd..ca13598c2caa46f9ae37e841046e1938d0d87015 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -72,9 +72,10 @@ nv04_clk = {
 };
 
 int
-nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv04_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_clk **pclk)
 {
-	int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+	int ret = nvkm_clk_new_(&nv04_clk, device, type, inst, false, pclk);
 	if (ret == 0) {
 		(*pclk)->pll_calc = nv04_clk_pll_calc;
 		(*pclk)->pll_prog = nv04_clk_pll_prog;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index 2ab9b9b84018661ad3c5a0afb0a5b17f4a5ba531..7ddd8cecb80510dce6c98f5bb70e0cb166d60bc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -218,7 +218,8 @@ nv40_clk = {
 };
 
 int
-nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_clk **pclk)
 {
 	struct nv40_clk *clk;
 
@@ -228,5 +229,5 @@ nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 	clk->base.pll_prog = nv04_clk_pll_prog;
 	*pclk = &clk->base;
 
-	return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
+	return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index da1770e47490e7006082a1421ab72c04ab28bc62..83067763c0ecfad0430259d54997289d9a11e43f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -507,14 +507,14 @@ nv50_clk_tidy(struct nvkm_clk *base)
 
 int
 nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
-	      int index, bool allow_reclock, struct nvkm_clk **pclk)
+	      enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
 {
 	struct nv50_clk *clk;
 	int ret;
 
 	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
 		return -ENOMEM;
-	ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+	ret = nvkm_clk_ctor(func, device, type, inst, allow_reclock, &clk->base);
 	*pclk = &clk->base;
 	if (ret)
 		return ret;
@@ -555,7 +555,8 @@ nv50_clk = {
 };
 
 int
-nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv50_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_clk **pclk)
 {
-	return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+	return nv50_clk_new_(&nv50_clk, device, type, inst, false, pclk);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 7c7713238ec4b0ec70fdb02c2bac6df7cca0e337..5b4cb7e5cff6e093e7ce69ee36a89383316cf34e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -20,7 +20,7 @@ struct nv50_clk {
 	struct nv50_clk_hwsq hwsq;
 };
 
-int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  bool, struct nvkm_clk **);
 int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
 int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
index 81dfb37480ae8c0d2b5afe64f79ea9abd4ae1191..810cc572cd30e128198ef6a24ab9394ef8f8dc17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -16,9 +16,9 @@ struct nvkm_clk_func {
 	struct nvkm_domain domains[];
 };
 
-int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  bool allow_reclock, struct nvkm_clk *);
-int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  bool allow_reclock, struct nvkm_clk **);
 
 int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index 4756019ddf3fba74bf69795a7fd567e38241d813..dd4981708fe40e90f1823a917d2e69f1bacc87b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -56,12 +56,12 @@ nvkm_devinit_disable(struct nvkm_devinit *init)
 }
 
 int
-nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
+nvkm_devinit_post(struct nvkm_devinit *init)
 {
 	int ret = 0;
 	if (init && init->func->post)
 		ret = init->func->post(init, init->post);
-	*disable = nvkm_devinit_disable(init);
+	nvkm_devinit_disable(init);
 	return ret;
 }
 
@@ -126,11 +126,10 @@ nvkm_devinit = {
 };
 
 void
-nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
-		  struct nvkm_device *device, int index,
-		  struct nvkm_devinit *init)
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+		  enum nvkm_subdev_type type, int inst, struct nvkm_devinit *init)
 {
-	nvkm_subdev_ctor(&nvkm_devinit, device, index, &init->subdev);
+	nvkm_subdev_ctor(&nvkm_devinit, device, type, inst, &init->subdev);
 	init->func = func;
 	init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index e895289bf3c176a16ad9fd57d0354b7dd012a0a1..c224702b7bedf5a19d0246e2d053329a9fe61a66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -35,18 +35,18 @@ g84_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVKM_ENGINE_MPEG);
-		disable |= (1ULL << NVKM_ENGINE_VP);
-		disable |= (1ULL << NVKM_ENGINE_BSP);
-		disable |= (1ULL << NVKM_ENGINE_CIPHER);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_VP, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVKM_ENGINE_DISP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVKM_ENGINE_BSP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVKM_ENGINE_CIPHER);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
 
 	return disable;
 }
@@ -61,8 +61,8 @@ g84_devinit = {
 };
 
 int
-g84_devinit_new(struct nvkm_device *device, int index,
+g84_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+	return nv50_devinit_new_(&g84_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index a9d45844df5a8fafca713afb1acc38da8d9b3b84..05729ca19e9a7e591202051d8c75bbcb93143396 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -35,17 +35,17 @@ g98_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
-		disable |= (1ULL << NVKM_ENGINE_MSVLD);
-		disable |= (1ULL << NVKM_ENGINE_MSPPP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVKM_ENGINE_DISP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVKM_ENGINE_SEC);
+		nvkm_subdev_disable(device, NVKM_ENGINE_SEC, 0);
 
 	return disable;
 }
@@ -60,8 +60,8 @@ g98_devinit = {
 };
 
 int
-g98_devinit_new(struct nvkm_device *device, int index,
+g98_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+	return nv50_devinit_new_(&g98_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
index 636a92128f6c8cab99160cee3f8c392ceb0f11df..6b280b05c4ca07c9bedf81b76c26827bbf2f937f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -70,7 +70,8 @@ ga100_devinit = {
 };
 
 int
-ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+	return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 8b1b34c3ad2624cfbda40054a7c5410b8a0364f6..051cfd6a5caf5ce82bd55cc8f52bc21bfd541516 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -71,21 +71,21 @@ gf100_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0ULL;
 
 	if (r022500 & 0x00000001)
-		disable |= (1ULL << NVKM_ENGINE_DISP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
 
 	if (r022500 & 0x00000002) {
-		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
-		disable |= (1ULL << NVKM_ENGINE_MSPPP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
 	}
 
 	if (r022500 & 0x00000004)
-		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
 	if (r022500 & 0x00000008)
-		disable |= (1ULL << NVKM_ENGINE_MSENC);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSENC, 0);
 	if (r022500 & 0x00000100)
-		disable |= (1ULL << NVKM_ENGINE_CE0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
 	if (r022500 & 0x00000200)
-		disable |= (1ULL << NVKM_ENGINE_CE1);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CE, 1);
 
 	return disable;
 }
@@ -114,8 +114,8 @@ gf100_devinit = {
 };
 
 int
-gf100_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+gf100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
+	return nv50_devinit_new_(&gf100_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 28ca01be3d3825b4fd249f85c87c41782b10d5f5..4323732a3cb22897b3ec035e4cfe7081847c1fb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -35,11 +35,11 @@ gm107_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0ULL;
 
 	if (r021c00 & 0x00000001)
-		disable |= (1ULL << NVKM_ENGINE_CE0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
 	if (r021c00 & 0x00000004)
-		disable |= (1ULL << NVKM_ENGINE_CE2);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
 	if (r021c04 & 0x00000001)
-		disable |= (1ULL << NVKM_ENGINE_DISP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
 
 	return disable;
 }
@@ -54,8 +54,8 @@ gm107_devinit = {
 };
 
 int
-gm107_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+gm107_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+	return nv50_devinit_new_(&gm107_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 59940dacc2ba028939d3822c0e736248a5af9359..a308b9bde4497e403377d8f048f8407ba63ef2c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -179,8 +179,8 @@ gm200_devinit = {
 };
 
 int
-gm200_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+gm200_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&gm200_devinit, device, index, pinit);
+	return nv50_devinit_new_(&gm200_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 9a8522fa9c6523ff16666456cb9ee02433dc86ad..dc026ac1b5950cdf6ff40895974890f3468ffc04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -71,16 +71,16 @@ gt215_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
-		disable |= (1ULL << NVKM_ENGINE_MSPPP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVKM_ENGINE_DISP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVKM_ENGINE_CE0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
 
 	return disable;
 }
@@ -146,8 +146,8 @@ gt215_devinit = {
 };
 
 int
-gt215_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+gt215_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+	return nv50_devinit_new_(&gt215_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
index fbde6828bd38ecc9e6080e877c4eac554d7a800b..b4d1688517d5a5df2ed32b68cfef0e5ed8e5abc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
@@ -72,8 +72,8 @@ gv100_devinit = {
 };
 
 int
-gv100_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+gv100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&gv100_devinit, device, index, pinit);
+	return nv50_devinit_new_(&gv100_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index ce4f718e98a174fb5b6a982fd3c7b8b90d8079f3..fb90d47e12252c32f966f896d672a9debc5a83bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -35,18 +35,18 @@ mcp89_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
-		disable |= (1ULL << NVKM_ENGINE_MSPPP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVKM_ENGINE_DISP);
+		nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVKM_ENGINE_VIC);
+		nvkm_subdev_disable(device, NVKM_ENGINE_VIC, 0);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVKM_ENGINE_CE0);
+		nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
 
 	return disable;
 }
@@ -61,8 +61,8 @@ mcp89_devinit = {
 };
 
 int
-mcp89_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+mcp89_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+	return nv50_devinit_new_(&mcp89_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 317ce9fb82251caeb9da9e53c94e4ad43d2386e6..88bc890f89a2b44e368eecb7f6565e9756490573 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -434,9 +434,8 @@ nv04_devinit_dtor(struct nvkm_devinit *base)
 }
 
 int
-nv04_devinit_new_(const struct nvkm_devinit_func *func,
-		  struct nvkm_device *device, int index,
-		  struct nvkm_devinit **pinit)
+nv04_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+		  enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
 {
 	struct nv04_devinit *init;
 
@@ -444,7 +443,7 @@ nv04_devinit_new_(const struct nvkm_devinit_func *func,
 		return -ENOMEM;
 	*pinit = &init->base;
 
-	nvkm_devinit_ctor(func, device, index, &init->base);
+	nvkm_devinit_ctor(func, device, type, inst, &init->base);
 	init->owner = -1;
 	return 0;
 }
@@ -459,8 +458,8 @@ nv04_devinit = {
 };
 
 int
-nv04_devinit_new(struct nvkm_device *device, int index,
+nv04_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_devinit **pinit)
 {
-	return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+	return nv04_devinit_new_(&nv04_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 15b029ddf6df2e58e78ae964297ef6242f33f59c..06ad8a606bb824066f2992b0eac9a782d9e3365e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -11,7 +11,7 @@ struct nv04_devinit {
 };
 
 int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
-		      int, struct nvkm_devinit **);
+		      enum nvkm_subdev_type, int, struct nvkm_devinit **);
 void *nv04_devinit_dtor(struct nvkm_devinit *);
 void nv04_devinit_preinit(struct nvkm_devinit *);
 void nv04_devinit_fini(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index 9891eadca1ce85545e5ecab77e8d29c79ce30a4c..1410befd2285a3a2f8336b531352519adc748ece 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -136,8 +136,8 @@ nv05_devinit = {
 };
 
 int
-nv05_devinit_new(struct nvkm_device *device, int index,
+nv05_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_devinit **pinit)
 {
-	return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+	return nv04_devinit_new_(&nv05_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 570822f83acf5f539fb2322493b5a7c926919b42..a6aa8786d610fd855efc77dafd667adec2ad63f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -106,8 +106,8 @@ nv10_devinit = {
 };
 
 int
-nv10_devinit_new(struct nvkm_device *device, int index,
+nv10_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_devinit **pinit)
 {
-	return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+	return nv04_devinit_new_(&nv10_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index fefafec7e2a7cb7b695dd7b5b6b6b80c1629b97d..4cc5ef9a5a63f3a0910d7b5440093c5cc657839f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -35,8 +35,8 @@ nv1a_devinit = {
 };
 
 int
-nv1a_devinit_new(struct nvkm_device *device, int index,
+nv1a_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_devinit **pinit)
 {
-	return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+	return nv04_devinit_new_(&nv1a_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 4ef04e0d8826117679bcd91ebb08569f5301db75..67f46df723e43c4b6ddf09c9f82214666650af4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -72,8 +72,8 @@ nv20_devinit = {
 };
 
 int
-nv20_devinit_new(struct nvkm_device *device, int index,
+nv20_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_devinit **pinit)
 {
-	return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+	return nv04_devinit_new_(&nv20_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index d7947c4391dcec107f896d685e558fc074ac8fba..380995d398b1c8c0bbe36be93962a1c6876a45eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -85,7 +85,7 @@ nv50_devinit_disable(struct nvkm_devinit *init)
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000))
-		disable |= (1ULL << NVKM_ENGINE_MPEG);
+		nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
 
 	return disable;
 }
@@ -101,8 +101,8 @@ nv50_devinit_preinit(struct nvkm_devinit *base)
 	 * missing, assume it's a secondary gpu which requires post
 	 */
 	if (!base->post) {
-		u64 disable = nvkm_devinit_disable(base);
-		if (disable & (1ULL << NVKM_ENGINE_DISP))
+		nvkm_devinit_disable(base);
+		if (!device->disp)
 			base->post = true;
 	}
 
@@ -148,9 +148,8 @@ nv50_devinit_init(struct nvkm_devinit *base)
 }
 
 int
-nv50_devinit_new_(const struct nvkm_devinit_func *func,
-		  struct nvkm_device *device, int index,
-		  struct nvkm_devinit **pinit)
+nv50_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+		  enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
 {
 	struct nv50_devinit *init;
 
@@ -158,7 +157,7 @@ nv50_devinit_new_(const struct nvkm_devinit_func *func,
 		return -ENOMEM;
 	*pinit = &init->base;
 
-	nvkm_devinit_ctor(func, device, index, &init->base);
+	nvkm_devinit_ctor(func, device, type, inst, &init->base);
 	return 0;
 }
 
@@ -172,8 +171,8 @@ nv50_devinit = {
 };
 
 int
-nv50_devinit_new(struct nvkm_device *device, int index,
+nv50_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+	return nv50_devinit_new_(&nv50_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index e8d37a6145a2645e7903c9756ea96c32c9b2a9e1..987a7f478b84bb11368026780e4f75f0b35b4b1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -9,7 +9,7 @@ struct nv50_devinit {
 	u32 r001540;
 };
 
-int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *, enum nvkm_subdev_type,
 		      int, struct nvkm_devinit **);
 void nv50_devinit_preinit(struct nvkm_devinit *);
 void nv50_devinit_init(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 05961e624264754078e6bc48bc669db3f978c54f..dd8b038a8ceee402f85f6b5421dcbe50356fca09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -16,7 +16,8 @@ struct nvkm_devinit_func {
 };
 
 void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
-		       int index, struct nvkm_devinit *);
+		       enum nvkm_subdev_type, int inst, struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
 
 int nv04_devinit_post(struct nvkm_devinit *, bool);
 int tu102_devinit_post(struct nvkm_devinit *, bool);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 9a469bf482f2f5bb13b270c625d9fc37b476df2c..634f64f88fc8be391f3efba4e0ff8744eed9e94c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -82,8 +82,8 @@ tu102_devinit = {
 };
 
 int
-tu102_devinit_new(struct nvkm_device *device, int index,
-		struct nvkm_devinit **pinit)
+tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		  struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&tu102_devinit, device, index, pinit);
+	return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index f6dca97140d6bdf877c97e5348bcec3f48ccf5c4..fd54fa504efa04a50de2cd1fced419f1e6aad9e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -170,12 +170,12 @@ nvkm_fault = {
 
 int
 nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
-		int index, struct nvkm_fault **pfault)
+		enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault)
 {
 	struct nvkm_fault *fault;
 	if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
+	nvkm_subdev_ctor(&nvkm_fault, device, type, inst, &fault->subdev);
 	fault->func = func;
 	fault->user.ctor = nvkm_ufault_new;
 	fault->user.base = func->user.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index f6b189cc43301f05b6220651f558830e2196a4bc..6af7959e02ea64df99415fc111998522c1aa54e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -30,7 +30,7 @@ void
 gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
-	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
 }
 
 void
@@ -82,8 +82,8 @@ gp100_fault = {
 };
 
 int
-gp100_fault_new(struct nvkm_device *device, int index,
+gp100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_fault **pfault)
 {
-	return nvkm_fault_new_(&gp100_fault, device, index, pfault);
+	return nvkm_fault_new_(&gp100_fault, device, type, inst, pfault);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
index 9e66d1f7654d4a7ad9843676ff21983e1b1771c9..89e0bc96fb92dd5ec5f871a78c453adca8c600d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -46,8 +46,8 @@ gp10b_fault = {
 };
 
 int
-gp10b_fault_new(struct nvkm_device *device, int index,
+gp10b_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_fault **pfault)
 {
-	return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
+	return nvkm_fault_new_(&gp10b_fault, device, type, inst, pfault);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 2707be4ffabc37773fd01838eaa3481bfdbde597..cd9d2ade5ac7a5c5174ecfc0ce5995055ada4917 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -228,8 +228,8 @@ gv100_fault = {
 };
 
 int
-gv100_fault_new(struct nvkm_device *device, int index,
+gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_fault **pfault)
 {
-	return nvkm_fault_new_(&gv100_fault, device, index, pfault);
+	return nvkm_fault_new_(&gv100_fault, device, type, inst, pfault);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index f6f1dd7eee1ff5dd394c4b3f5efdc1ce3cb039c8..36681c347fb597a631a1909200d63e148f38ea70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -18,8 +18,8 @@ struct nvkm_fault_buffer {
 	u64 addr;
 };
 
-int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
-		    int index, struct nvkm_fault **);
+int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		    int inst, struct nvkm_fault **);
 
 struct nvkm_fault_func {
 	int (*oneinit)(struct nvkm_fault *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 45a6a68b9f48a651b7e575fe907fd464b137ef8e..91eb6729c84d67977b61b15f59af10658fdaf568 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -22,6 +22,7 @@
 #include "priv.h"
 
 #include <core/memory.h>
+#include <subdev/mc.h>
 #include <subdev/mmu.h>
 #include <engine/fifo.h>
 
@@ -34,6 +35,9 @@ tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 	 *     which don't appear to actually work anymore, but newer
 	 *     versions of RM don't appear to touch anything at all..
 	 */
+	struct nvkm_device *device = buffer->fault->subdev.device;
+
+	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
 }
 
 static void
@@ -41,6 +45,11 @@ tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 foff = buffer->id * 0x20;
+
+	/* Disable the fault interrupts */
+	nvkm_wr32(device, 0xb81408, 0x1);
+	nvkm_wr32(device, 0xb81410, 0x10);
+
 	nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
 }
 
@@ -50,6 +59,10 @@ tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 foff = buffer->id * 0x20;
 
+	/* Enable the fault interrupts */
+	nvkm_wr32(device, 0xb81208, 0x1);
+	nvkm_wr32(device, 0xb81210, 0x10);
+
 	nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
 	nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
 	nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
@@ -109,14 +122,20 @@ tu102_fault_intr(struct nvkm_fault *fault)
 	}
 
 	if (stat & 0x00000200) {
+		/* Clear the associated interrupt flag */
+		nvkm_wr32(device, 0xb81010, 0x10);
+
 		if (fault->buffer[0]) {
 			nvkm_event_send(&fault->event, 1, 0, NULL, 0);
 			stat &= ~0x00000200;
 		}
 	}
 
-	/*XXX: guess, can't confirm until we get fw... */
+	/* Replayable MMU fault */
 	if (stat & 0x00000100) {
+		/* Clear the associated interrupt flag */
+		nvkm_wr32(device, 0xb81008, 0x1);
+
 		if (fault->buffer[1]) {
 			nvkm_event_send(&fault->event, 1, 1, NULL, 0);
 			stat &= ~0x00000100;
@@ -162,8 +181,8 @@ tu102_fault = {
 };
 
 int
-tu102_fault_new(struct nvkm_device *device, int index,
+tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_fault **pfault)
 {
-	return nvkm_fault_new_(&tu102_fault, device, index, pfault);
+	return nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 5940e0dea2f8b96f3d9f548d33dc75d14a088e6c..6faaea948fc44d5ac8ff487f47f26f08af55e20a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -122,7 +122,7 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 		nvkm_debug(subdev, "%d comptags\n", tags);
 	}
 
-	return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
+	return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1);
 }
 
 static int
@@ -205,7 +205,9 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
 	for (i = 0; i < fb->tile.regions; i++)
 		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
 
-	nvkm_mm_fini(&fb->tags);
+	nvkm_mm_fini(&fb->tags.mm);
+	mutex_destroy(&fb->tags.mutex);
+
 	nvkm_ram_del(&fb->ram);
 
 	nvkm_blob_dtor(&fb->vpr_scrubber);
@@ -225,21 +227,21 @@ nvkm_fb = {
 
 void
 nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_fb *fb)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
 {
-	nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
+	nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev);
 	fb->func = func;
 	fb->tile.regions = fb->func->tile.regions;
-	fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
-				fb->func->default_bigpage);
+	fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
+	mutex_init(&fb->tags.mutex);
 }
 
 int
 nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_fb **pfb)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
 	if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_fb_ctor(func, device, index, *pfb);
+	nvkm_fb_ctor(func, device, type, inst, *pfb);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 06bf95c0c5493bd37866eb38d32b6ba323c69156..770a4ad391229d35baff9ff49528864f4d17eb03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -32,7 +32,7 @@ g84_fb = {
 };
 
 int
-g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+g84_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nv50_fb_new_(&g84_fb, device, index, pfb);
+	return nv50_fb_new_(&g84_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
index bf82686851cd4a5b47e830448c49058ddf92c615..b47bebfbc26f97bcb27673cf2b67b99484363952 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -34,7 +34,7 @@ ga100_fb = {
 };
 
 int
-ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gp102_fb_new_(&ga100_fb, device, index, pfb);
+	return gp102_fb_new_(&ga100_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index bcecf84a6e67995a50d527cf73dab04a77e67105..6ea7908f05638cd1b58221ffe0a24314fac5ded9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -34,7 +34,7 @@ ga102_fb = {
 };
 
 int
-ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gp102_fb_new_(&ga102_fb, device, index, pfb);
+	return gp102_fb_new_(&ga102_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index e8dc4e913494a5e8791b171fce4bbe54deec08af..9dcc40f9ef797b8f310e5b00b4af3847d3a1d8e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -117,13 +117,13 @@ gf100_fb_dtor(struct nvkm_fb *base)
 
 int
 gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_fb **pfb)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
 	struct gf100_fb *fb;
 
 	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_fb_ctor(func, device, index, &fb->base);
+	nvkm_fb_ctor(func, device, type, inst, &fb->base);
 	*pfb = &fb->base;
 
 	return 0;
@@ -141,7 +141,7 @@ gf100_fb = {
 };
 
 int
-gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gf100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gf100_fb, device, index, pfb);
+	return gf100_fb_new_(&gf100_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 2ed7cdaab37c36fad1e333162ed9b09a2380e494..0cac7b06acc818d2f97a307b9fb971c2526eaf19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -10,8 +10,8 @@ struct gf100_fb {
 	dma_addr_t r100c10;
 };
 
-int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
-		  int index, struct nvkm_fb **);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_fb **);
 void *gf100_fb_dtor(struct nvkm_fb *);
 void gf100_fb_init(struct nvkm_fb *);
 void gf100_fb_intr(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 4a9f463745b53450452e2796bdbfcbecdad12669..76678dd60f93f9fd7373615776d73266abbe7344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -36,7 +36,7 @@ gf108_fb = {
 };
 
 int
-gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gf108_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gf108_fb, device, index, pfb);
+	return gf100_fb_new_(&gf108_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 48fd98e08baae33993f9440581c61b8cb9e580ce..f73442ccb424b491461054cb30abe87aaf6f8520 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -83,7 +83,7 @@ gk104_fb = {
 };
 
 int
-gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk104_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gk104_fb, device, index, pfb);
+	return gf100_fb_new_(&gk104_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
index 0695e5dd360e4b419c68be377ae8f0f4d85bfaa2..45d6cdffafeedc5f326bc45556357d882531f7d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -65,7 +65,7 @@ gk110_fb = {
 };
 
 int
-gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk110_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gk110_fb, device, index, pfb);
+	return gf100_fb_new_(&gk110_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a7e29b1250941e4684f7a49cde5dca3195d2bd6d..6bc42f89d8c4100a100750b70a0012ab5dd34776 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -34,7 +34,7 @@ gk20a_fb = {
 };
 
 int
-gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk20a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gk20a_fb, device, index, pfb);
+	return gf100_fb_new_(&gk20a_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 69c876d5d1c11ed0af14c399644ae500545cadcd..de52462a92bf0ecd2b85a509091800e7b9761444 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -36,7 +36,7 @@ gm107_fb = {
 };
 
 int
-gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm107_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gm107_fb, device, index, pfb);
+	return gf100_fb_new_(&gm107_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index d3b8c3367152b4b18b58ecc38770c3cf4279d962..5acf8d15d06fb70a5970f6082e69f1cffbd937b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -67,7 +67,7 @@ gm200_fb = {
 };
 
 int
-gm200_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm200_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gm200_fb, device, index, pfb);
+	return gf100_fb_new_(&gm200_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
index 12db61e31128ce0f1e1904b072a52a2bc9b809d1..86f61a3f2feae1563be947d66b99921232a181d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -34,7 +34,7 @@ gm20b_fb = {
 };
 
 int
-gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm20b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+	return gf100_fb_new_(&gm20b_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 8205ce436b3e847663d5d44ab35c9e13885008a4..09e943edc3628256f514a8ba7050b0775eeafdce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -71,7 +71,7 @@ gp100_fb = {
 };
 
 int
-gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gp100_fb, device, index, pfb);
+	return gf100_fb_new_(&gp100_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index fc8c93aa3da52e6b85fcb99a8d83a1589023b0c6..0e78b3d734a0fbfb0f1f047a1cf93f3181be8002 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -114,9 +114,9 @@ gp102_fb = {
 
 int
 gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_fb **pfb)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	int ret = gf100_fb_new_(func, device, index, pfb);
+	int ret = gf100_fb_new_(func, device, type, inst, pfb);
 	if (ret)
 		return ret;
 
@@ -126,9 +126,9 @@ gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
 }
 
 int
-gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gp102_fb_new_(&gp102_fb, device, index, pfb);
+	return gp102_fb_new_(&gp102_fb, device, type, inst, pfb);
 }
 
 MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
index af8e43979dc1154866ec13631ce0fa7a8dfd9a4f..84c9815a6d48aa5975815c379033e9612903e8b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -31,7 +31,7 @@ gp10b_fb = {
 };
 
 int
-gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp10b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gp10b_fb, device, index, pfb);
+	return gf100_fb_new_(&gp10b_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index 9266559b45f971c44569508093c99a25de1f106a..c1ec9758617c69e40ecc6dd4828b80209ab52c83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -32,7 +32,7 @@ gt215_fb = {
 };
 
 int
-gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gt215_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nv50_fb_new_(&gt215_fb, device, index, pfb);
+	return nv50_fb_new_(&gt215_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index feda86a5fba8592000ac2657a815b72203cbaf91..63daa83ae12d13252e408986bf8a4ec405c01944 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -42,9 +42,9 @@ gv100_fb = {
 };
 
 int
-gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return gp102_fb_new_(&gv100_fb, device, index, pfb);
+	return gp102_fb_new_(&gv100_fb, device, type, inst, pfb);
 }
 
 MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 73b3b86a282622a39998b0b90761edeb8c8e49cb..70c7b08ee0a65936f05c4ea896b29755dfa742d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -31,7 +31,7 @@ mcp77_fb = {
 };
 
 int
-mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+mcp77_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+	return nv50_fb_new_(&mcp77_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 6d11e32ec7addc2497e3bab52d22355ebcce1ea1..308d955168e8b1c3e6a78ed3cac7233ae716ce30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -31,7 +31,7 @@ mcp89_fb = {
 };
 
 int
-mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+mcp89_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+	return nv50_fb_new_(&mcp89_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c886664533c81dd3fc056a6e5220d6705ec4d53b..8d5a007ecc47658c9db981a1cd57a2cb0aca02eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -44,7 +44,7 @@ nv04_fb = {
 };
 
 int
-nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv04_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv04_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv04_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index c998b7e96aa3f0287dd22f2a47e4eb44adb152cb..7d2c16b27032f73a325d80f31d4df7b771e28494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -64,7 +64,7 @@ nv10_fb = {
 };
 
 int
-nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv10_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv10_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 7b9f04f44af8940cd0043374ec66c1f4791b9067..4bdad2abd56f6252a4e90fee22c766c6fab71211 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb = {
 };
 
 int
-nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv1a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv1a_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index a021d21ff153a3fe40dcb871682976e00c98816c..d254f27f9b3768cf983742769b439475f14e27ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
 		else              tile->zcomp = 0x04000000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 	tile->limit = 0;
 	tile->pitch = 0;
 	tile->zcomp = 0;
-	nvkm_mm_free(&fb->tags, &tile->tag);
+	nvkm_mm_free(&fb->tags.mm, &tile->tag);
 }
 
 void
@@ -96,7 +96,7 @@ nv20_fb = {
 };
 
 int
-nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv20_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv20_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index 7709f5fe9a458b77b5006126636d91d1a090a691..47da66dea6e6e2feed8ef96e93624197057eae26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
 		else              tile->zcomp = 0x00200000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -54,7 +54,7 @@ nv25_fb = {
 };
 
 int
-nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv25_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv25_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 8aa7826665079f8ed0fabe43c14f7d3c5d54569b..0f87efb636d5d62609ad1c6f14a37a4562d3a29f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
 		else           tile->zcomp |= 0x02000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -127,7 +127,7 @@ nv30_fb = {
 };
 
 int
-nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv30_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv30_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index 6e83dcff72e08eed9f6b5fda4a17608313a39bab..0694dcfd107e6dc9bcd4075900f7ad40509c218e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
 		else           tile->zcomp |= 0x08000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -56,7 +56,7 @@ nv35_fb = {
 };
 
 int
-nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv35_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv35_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index 2a07617bb44cb107866ae25b704d311024cdbb32..1a39770372f15d15055017cfd5eb537a580b5866 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
 		else           tile->zcomp |= 0x20000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -56,7 +56,7 @@ nv36_fb = {
 };
 
 int
-nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv36_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv36_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index 955160778b5b82ab928bf5c56b095fc7770c7266..77dbb9d6ba480dde95758023feae72058a36eed5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 	u32 tiles = DIV_ROUND_UP(size, 0x80);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x100);
 	if ( (flags & 2) &&
-	    !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	    !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
 		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
 		tile->zcomp |= ((tile->tag->offset           ) >> 8);
 		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -62,7 +62,7 @@ nv40_fb = {
 };
 
 int
-nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv40_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv40_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index b77f08d34cc35a34d66e7873eca701206e6ad1dd..0f9d9e48e7add9a03fe5011651def9c62610bb38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -56,7 +56,7 @@ nv41_fb = {
 };
 
 int
-nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv41_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv41_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index b59dc486083decb9714550f7cf9bd6c36b62d87d..b1046ee9f0eaf53f4a73aa710caaa56c94306639 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -65,7 +65,7 @@ nv44_fb = {
 };
 
 int
-nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv44_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv44_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index cab7d20fa03981e3a04754de86e4823db1fd6709..0d78de422dfa823989aaad8bbe0ac30de7764ba5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -51,7 +51,7 @@ nv46_fb = {
 };
 
 int
-nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv46_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv46_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index a8b0ad4c871db966129c6df0642477b5c6a6db63..5cedde29c8eee93ea36b65affaaa19b038eb8b28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -39,7 +39,7 @@ nv47_fb = {
 };
 
 int
-nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv47_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv47_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index d0b317bb0252e5b05a6328158ad504b628e4a7dc..95cc099603d8bfb46f666c3c2545410591c36404 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -39,7 +39,7 @@ nv49_fb = {
 };
 
 int
-nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv49_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv49_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 6a6f0c0860713c594f2ad65b1e3f9a94fbb9f596..c9f3148f4e75cfd9aee63b6c9c3d185cb588a5ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -37,7 +37,7 @@ nv4e_fb = {
 };
 
 int
-nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv4e_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+	return nvkm_fb_new_(&nv4e_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index b2f5bf8144eaaa660e085190c950bb9afbda5b2d..95fd8f83401025bdb219b894dcddceb9697a6c70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -262,16 +262,15 @@ nv50_fb_ = {
 
 int
 nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_fb **pfb)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
 	struct nv50_fb *fb;
 
 	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+	nvkm_fb_ctor(&nv50_fb_, device, type, inst, &fb->base);
 	fb->func = func;
 	*pfb = &fb->base;
-
 	return 0;
 }
 
@@ -283,7 +282,7 @@ nv50_fb = {
 };
 
 int
-nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv50_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-	return nv50_fb_new_(&nv50_fb, device, index, pfb);
+	return nv50_fb_new_(&nv50_fb, device, type, inst, pfb);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index 5e2b0c9539ede919ca40d2d93a6e0dc8d5abb7bb..a5e673859a90165533fc935d054428c5afb05f72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -17,6 +17,6 @@ struct nv50_fb_func {
 	u32 trap;
 };
 
-int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		 struct nvkm_fb **pfb);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 66932ac10d15c68b65e80d982a90e54873e3d393..3f1be9780c65fdd35c4f4088f254cf72c21d145b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -38,9 +38,9 @@ struct nvkm_fb_func {
 };
 
 void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
-		  int index, struct nvkm_fb *);
+		  enum nvkm_subdev_type type, int inst, struct nvkm_fb *);
 int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
-		 int index, struct nvkm_fb **);
+		 enum nvkm_subdev_type type, int inst, struct nvkm_fb **);
 int nvkm_fb_bios_memtype(struct nvkm_bios *);
 
 void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
@@ -78,7 +78,7 @@ int gm200_fb_init_page(struct nvkm_fb *);
 void gp100_fb_init_remapper(struct nvkm_fb *);
 void gp100_fb_init_unkn(struct nvkm_fb *);
 
-int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  struct nvkm_fb **);
 bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
 int gp102_fb_vpr_scrub(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index b11867f682cb933fe60996092e29357e4aab8154..03b1bdb27770a62e6dbc2414ccd13ae7e0b0b060 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -81,12 +81,12 @@ nvkm_vram_dtor(struct nvkm_memory *memory)
 	struct nvkm_vram *vram = nvkm_vram(memory);
 	struct nvkm_mm_node *next = vram->mn;
 	struct nvkm_mm_node *node;
-	mutex_lock(&vram->ram->fb->subdev.mutex);
+	mutex_lock(&vram->ram->mutex);
 	while ((node = next)) {
 		next = node->next;
 		nvkm_mm_free(&vram->ram->vram, &node);
 	}
-	mutex_unlock(&vram->ram->fb->subdev.mutex);
+	mutex_unlock(&vram->ram->mutex);
 	return vram;
 }
 
@@ -126,7 +126,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
 	vram->page = page;
 	*pmemory = &vram->memory;
 
-	mutex_lock(&ram->fb->subdev.mutex);
+	mutex_lock(&ram->mutex);
 	node = &vram->mn;
 	do {
 		if (back)
@@ -134,7 +134,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
 		else
 			ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
 		if (ret) {
-			mutex_unlock(&ram->fb->subdev.mutex);
+			mutex_unlock(&ram->mutex);
 			nvkm_memory_unref(pmemory);
 			return ret;
 		}
@@ -143,7 +143,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
 		node = &r->next;
 		max -= r->length;
 	} while (max);
-	mutex_unlock(&ram->fb->subdev.mutex);
+	mutex_unlock(&ram->mutex);
 	return 0;
 }
 
@@ -163,6 +163,7 @@ nvkm_ram_del(struct nvkm_ram **pram)
 		if (ram->func->dtor)
 			*pram = ram->func->dtor(ram);
 		nvkm_mm_fini(&ram->vram);
+		mutex_destroy(&ram->mutex);
 		kfree(*pram);
 		*pram = NULL;
 	}
@@ -196,6 +197,7 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 	ram->fb = fb;
 	ram->type = type;
 	ram->size = size;
+	mutex_init(&ram->mutex);
 
 	if (!nvkm_mm_initialised(&ram->vram)) {
 		ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index d350d92852d285ac65293f73224b4a2a02c3e09b..2b678b60b4d3e15e91528bfb789fd2d4f303f741 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -260,7 +260,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
 	ram_block(fuc);
 
-	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+	if (ram->base.fb->subdev.device->disp)
 		ram_wr32(fuc, 0x62c000, 0x0f0f0000);
 
 	/* MR1: turn termination on early, for some reason.. */
@@ -661,7 +661,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 
 	ram_unblock(fuc);
 
-	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+	if (ram->base.fb->subdev.device->disp)
 		ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
 	if (next->bios.rammap_11_08_01)
@@ -711,7 +711,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
 	ram_block(fuc);
 
-	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+	if (ram->base.fb->subdev.device->disp)
 		ram_wr32(fuc, 0x62c000, 0x0f0f0000);
 
 	if (vc == 1 && ram_have(fuc, gpio2E)) {
@@ -943,7 +943,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 
 	ram_unblock(fuc);
 
-	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+	if (ram->base.fb->subdev.device->disp)
 		ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
 	if (next->bios.rammap_11_08_01)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index 1c3c18ea8cedfcc8fe1699290e35eaea45252483..375dfce09f8402393086884dd4b1c43e4f87f31d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -42,12 +42,12 @@ nvkm_fuse = {
 
 int
 nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_fuse **pfuse)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse)
 {
 	struct nvkm_fuse *fuse;
 	if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_fuse, device, index, &fuse->subdev);
+	nvkm_subdev_ctor(&nvkm_fuse, device, type, inst, &fuse->subdev);
 	fuse->func = func;
 	spin_lock_init(&fuse->lock);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 13671fedc80593040f488940a84a185e000e92d6..01f770654b1d93e2b5ca1071b2d1a16eeede0f01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -47,7 +47,8 @@ gf100_fuse = {
 };
 
 int
-gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+gf100_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fuse **pfuse)
 {
-	return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
+	return nvkm_fuse_new_(&gf100_fuse, device, type, inst, pfuse);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 9aff4ea045060a7d42634ba5990f36a59296b3c4..7dc99492f536e7c90f5d16fc3c1bcd3c77ecd6b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -36,7 +36,8 @@ gm107_fuse = {
 };
 
 int
-gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_fuse **pfuse)
 {
-	return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
+	return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 514c193db25dba1847ffef8a4283801e06d6f0ae..2505e8e1c1d355dc1e22a5e02c66f3eeb7e80f58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -45,7 +45,8 @@ nv50_fuse = {
 };
 
 int
-nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+nv50_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_fuse **pfuse)
 {
-	return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
+	return nvkm_fuse_new_(&nv50_fuse, device, type, inst, pfuse);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 2edc612408dd30e8a2898c0dd7fa0d0a8a693ce1..e83d0c30dff65bf53ce946ee19bd8a35643866ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -8,6 +8,6 @@ struct nvkm_fuse_func {
 	u32 (*read)(struct nvkm_fuse *, u32 addr);
 };
 
-int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
-		   int index, struct nvkm_fuse **);
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 914276410ef853a8b080e31eb1f09ef4bd4060e1..048bcc70c3f48f97abde6e901843479ff364ac72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -241,14 +241,14 @@ nvkm_gpio = {
 
 int
 nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_gpio **pgpio)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio)
 {
 	struct nvkm_gpio *gpio;
 
 	if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_gpio, device, index, &gpio->subdev);
+	nvkm_subdev_ctor(&nvkm_gpio, device, type, inst, &gpio->subdev);
 	gpio->func = func;
 
 	return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 6dcda55fb8655fc77e4077904fb02fdbb7ab7c78..114728ccdf8eef551756d3a9d1f6af4d3ed1e369 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -68,7 +68,8 @@ g94_gpio = {
 };
 
 int
-g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+g94_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_gpio **pgpio)
 {
-	return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+	return nvkm_gpio_new_(&g94_gpio, device, type, inst, pgpio);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
index 62c791baf4008dde51f3db198d96835117578cce..4a96f926b66df2758c40b1ac24317b60422389d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -112,7 +112,8 @@ ga102_gpio = {
 };
 
 int
-ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_gpio **pgpio)
 {
-	return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+	return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
index bb7400dfaef859ef75bae804aca6c8b3c9981ebc..ecb19e4f5c48f01ca5f5cbf7c55e9b85f160b9b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -80,7 +80,8 @@ gf119_gpio = {
 };
 
 int
-gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+gf119_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_gpio **pgpio)
 {
-	return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+	return nvkm_gpio_new_(&gf119_gpio, device, type, inst, pgpio);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 2ead515b853095e5e69a847d47d63e12d98c9e0d..c0e4cdb45520be5b87d3887f2e13b7d47f8fe4cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -68,7 +68,8 @@ gk104_gpio = {
 };
 
 int
-gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_gpio **pgpio)
 {
-	return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+	return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index ae3499b48330d5b1ac70a77f4355403f45bc8390..48ad29b5638f5e44f9d31722a850ca4ea813c44a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -112,7 +112,8 @@ nv10_gpio = {
 };
 
 int
-nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+nv10_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_gpio **pgpio)
 {
-	return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+	return nvkm_gpio_new_(&nv10_gpio, device, type, inst, pgpio);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 73923fd5f7f2e7f0c2b499ea9c9b7323e716c1ca..b86c49762f11b4b89e9ccc2744ca853104442dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -126,7 +126,8 @@ nv50_gpio = {
 };
 
 int
-nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+nv50_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_gpio **pgpio)
 {
-	return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+	return nvkm_gpio_new_(&nv50_gpio, device, type, inst, pgpio);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 59e39affe2a080b1abb71ade2835cd43dcb7fe16..6590d81164e7a752a737430bd0f1071a2b8a9c12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -28,8 +28,8 @@ struct nvkm_gpio_func {
 	void (*reset)(struct nvkm_gpio *, u8);
 };
 
-int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
-		   int index, struct nvkm_gpio **);
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_gpio **);
 
 void nv50_gpio_reset(struct nvkm_gpio *, u8);
 int  nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index 5a32df0f9992582ff5f9a71fa304605fd510fcef..22574886b81948ce7c4953e657a05ce67f58d42a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -40,20 +40,18 @@ nvkm_gsp = {
 
 int
 nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
-	      int index, struct nvkm_gsp **pgsp)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
 {
 	struct nvkm_gsp *gsp;
 
 	if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+	nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
 
 	fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
 	if (IS_ERR(fwif))
 		return PTR_ERR(fwif);
 
-	return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
-				nvkm_subdev_name[gsp->subdev.index], 0,
-				&gsp->falcon);
+	return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index 2114f9b00a28f0718448c0370fa8b9fe18b825ba..2ac7fc934c09172927251d9d073182d4270e0f90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -49,7 +49,8 @@ gv100_gsp[] = {
 };
 
 int
-gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
+gv100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_gsp **pgsp)
 {
-	return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
+	return nvkm_gsp_new_(gv100_gsp, device, type, inst, pgsp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 92820fb997c1c887a9fcf8849b9b3d32128244a4..19381ddd38d4d5069e2aa6025df9ea0ca4dd96e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -10,6 +10,6 @@ struct nvkm_gsp_fwif {
 	const struct nvkm_falcon_func *flcn;
 };
 
-int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  struct nvkm_gsp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 719345074711144d13b85b7e75ccac2c8eeb0c5b..cb5cb533d91c81d8ee5e5d288bc37bcf811be8c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -277,7 +277,7 @@ nvkm_i2c_drv[] = {
 
 int
 nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_i2c **pi2c)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c)
 {
 	struct nvkm_bios *bios = device->bios;
 	struct nvkm_i2c *i2c;
@@ -289,7 +289,7 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
 	if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_i2c, device, index, &i2c->subdev);
+	nvkm_subdev_ctor(&nvkm_i2c, device, type, inst, &i2c->subdev);
 	i2c->func = func;
 	INIT_LIST_HEAD(&i2c->pad);
 	INIT_LIST_HEAD(&i2c->bus);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index bb2a31d8816111e48b5066c794c126b73608682a..e5bad085c06f01cf53fdfd5624605baeb9d8d69c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -66,7 +66,8 @@ g94_i2c = {
 };
 
 int
-g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+g94_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&g94_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index ae4aad3fcd2e35d2fdc7201dc70cda2f321cb6ef..cda30ee6767d23f0833d938688c023e9dd0ab860 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -30,7 +30,8 @@ gf117_i2c = {
 };
 
 int
-gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gf117_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&gf117_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
index 6f2b02af42c839a73ea07ca900a375e841bde5ae..e9c6a6cca09db39aae4b4308bf0d02b4e7c307a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -34,7 +34,8 @@ gf119_i2c = {
 };
 
 int
-gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gf119_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&gf119_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index f9f6bf4b66c97ab755589bfa52067106e3468d62..d35aa6fe301564d61f57dfea73ffde6b512c9206 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -66,7 +66,8 @@ gk104_i2c = {
 };
 
 int
-gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gk104_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&gk104_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
index 8e3bfa1af52a2d3c09211d3e7382aa24f2deeda4..9fec6af56e07a1d4a4b55ad9d17036a435956a70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
@@ -39,7 +39,8 @@ gk110_i2c = {
 };
 
 int
-gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gk110_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&gk110_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index 7b2375bff8a9cc8cc9e43ed0c945db02a8fe75fe..46917eb600f9dd3b929f53cf3feeb0b8c3684d47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -41,7 +41,8 @@ gm200_i2c = {
 };
 
 int
-gm200_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&gm200_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 18776f49355c341e241530c2e68b27bfaa6bde19..ecfcf147c789b04c34a26062c282666fd2e36137 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -30,7 +30,8 @@ nv04_i2c = {
 };
 
 int
-nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv04_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&nv04_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 6b762f7cee9eeec4557175493e520e7050601ca1..ad1d3fd2bcbc38806fa920081df71608821dc2e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -30,7 +30,8 @@ nv4e_i2c = {
 };
 
 int
-nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv4e_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&nv4e_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index 75640ab97d6acbefee6c0355f6edf40dfb1677f0..2f94bed2c0561574b0f9f213ccad49fd61048045 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -30,7 +30,8 @@ nv50_i2c = {
 };
 
 int
-nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv50_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&nv50_i2c, device, type, inst, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index e35f6036fcfcb85edd2d652cec50e858d72a4dfb..f9d79f72f7e7c77180ad3defbff202faaadcbb1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -4,8 +4,8 @@
 #define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
 #include <subdev/i2c.h>
 
-int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
-		  int index, struct nvkm_i2c **);
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_i2c **);
 
 struct nvkm_i2c_func {
 	int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
deleted file mode 100644
index 127efb51f67df351532d8ef0328ae248674dbd6c..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/ibus/gf100.o
-nvkm-y += nvkm/subdev/ibus/gf117.o
-nvkm-y += nvkm/subdev/ibus/gk104.o
-nvkm-y += nvkm/subdev/ibus/gk20a.o
-nvkm-y += nvkm/subdev/ibus/gm200.o
-nvkm-y += nvkm/subdev/ibus/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
deleted file mode 100644
index 302d69e384d8a4fbd4762269bf2baf2cb6e6971e..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_IBUS_PRIV_H__
-#define __NVKM_IBUS_PRIV_H__
-
-#include <subdev/ibus.h>
-
-void gf100_ibus_intr(struct nvkm_subdev *);
-void gk104_ibus_intr(struct nvkm_subdev *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index fecfa6afcf54e8548079e7ce494dfa8871cb6d29..8f0ccd3664eb6c7c6ce5aff42b3043c64a109965 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -312,20 +312,20 @@ iccsense_func = {
 };
 
 void
-nvkm_iccsense_ctor(struct nvkm_device *device, int index,
+nvkm_iccsense_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		   struct nvkm_iccsense *iccsense)
 {
-	nvkm_subdev_ctor(&iccsense_func, device, index, &iccsense->subdev);
+	nvkm_subdev_ctor(&iccsense_func, device, type, inst, &iccsense->subdev);
 }
 
 int
-nvkm_iccsense_new_(struct nvkm_device *device, int index,
+nvkm_iccsense_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		   struct nvkm_iccsense **iccsense)
 {
 	if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
 		return -ENOMEM;
 	INIT_LIST_HEAD(&(*iccsense)->sensors);
 	INIT_LIST_HEAD(&(*iccsense)->rails);
-	nvkm_iccsense_ctor(device, index, *iccsense);
+	nvkm_iccsense_ctor(device, type, inst, *iccsense);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
index cccff1c8a409cbe9c7138cc4d3ca56f5692c654b..3eabf49443951de159b86f58d7cf6be645d078a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
@@ -24,8 +24,8 @@
 #include "priv.h"
 
 int
-gf100_iccsense_new(struct nvkm_device *device, int index,
+gf100_iccsense_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		   struct nvkm_iccsense **piccsense)
 {
-	return nvkm_iccsense_new_(device, index, piccsense);
+	return nvkm_iccsense_new_(device, type, inst, piccsense);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index cc09c6c504af7880134ec223e5b2c0fd3c6e1480..c33441124241cc4bdfc3017e5ecc436102726b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -22,6 +22,6 @@ struct nvkm_iccsense_rail {
 	u8 mohm;
 };
 
-void nvkm_iccsense_ctor(struct nvkm_device *, int, struct nvkm_iccsense *);
-int nvkm_iccsense_new_(struct nvkm_device *, int, struct nvkm_iccsense **);
+void nvkm_iccsense_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense *);
+int nvkm_iccsense_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 364ea4492acc8f0336d3e179a84ec65b1f43184b..cd8163a52bb65c6bf4ba04d45b0a4b01089bc94f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -218,9 +218,11 @@ static void *
 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	void *data = imem;
 	if (imem->func->dtor)
-		return imem->func->dtor(imem);
-	return imem;
+		data = imem->func->dtor(imem);
+	mutex_destroy(&imem->mutex);
+	return data;
 }
 
 static const struct nvkm_subdev_func
@@ -232,13 +234,13 @@ nvkm_instmem = {
 };
 
 void
-nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
-		  struct nvkm_device *device, int index,
-		  struct nvkm_instmem *imem)
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
+		  enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
 {
-	nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev);
+	nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
 	imem->func = func;
 	spin_lock_init(&imem->lock);
 	INIT_LIST_HEAD(&imem->list);
 	INIT_LIST_HEAD(&imem->boot);
+	mutex_init(&imem->mutex);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 13d4d7ac0697b474db64450c42e097b1cfaec91d..648ecf5a8fbc2a298db0758438f926628024ed36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -568,7 +568,7 @@ gk20a_instmem = {
 };
 
 int
-gk20a_instmem_new(struct nvkm_device *device, int index,
+gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		  struct nvkm_instmem **pimem)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
@@ -576,7 +576,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
 
 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+	nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base);
 	mutex_init(&imem->lock);
 	*pimem = &imem->base;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6bf0dad469195a65efaa07b7e588922229e2d0d4..25603b01d6f8421a1bb01d73559a1dfb87b0e2d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -99,9 +99,9 @@ static void *
 nv04_instobj_dtor(struct nvkm_memory *memory)
 {
 	struct nv04_instobj *iobj = nv04_instobj(memory);
-	mutex_lock(&iobj->imem->base.subdev.mutex);
+	mutex_lock(&iobj->imem->base.mutex);
 	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
-	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	mutex_unlock(&iobj->imem->base.mutex);
 	nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
 	return iobj;
 }
@@ -132,10 +132,9 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 	iobj->base.memory.ptrs = &nv04_instobj_ptrs;
 	iobj->imem = imem;
 
-	mutex_lock(&imem->base.subdev.mutex);
-	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
-			   align ? align : 1, &iobj->node);
-	mutex_unlock(&imem->base.subdev.mutex);
+	mutex_lock(&imem->base.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.mutex);
 	return ret;
 }
 
@@ -218,14 +217,14 @@ nv04_instmem = {
 };
 
 int
-nv04_instmem_new(struct nvkm_device *device, int index,
+nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_instmem **pimem)
 {
 	struct nv04_instmem *imem;
 
 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+	nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base);
 	*pimem = &imem->base;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 086c118488ef5fa937b0d7eccf5e023b07349e33..6b462f9609226e1c63a72f5cc5bf7bca73d81385 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -99,9 +99,9 @@ static void *
 nv40_instobj_dtor(struct nvkm_memory *memory)
 {
 	struct nv40_instobj *iobj = nv40_instobj(memory);
-	mutex_lock(&iobj->imem->base.subdev.mutex);
+	mutex_lock(&iobj->imem->base.mutex);
 	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
-	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	mutex_unlock(&iobj->imem->base.mutex);
 	nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
 	return iobj;
 }
@@ -132,10 +132,9 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 	iobj->base.memory.ptrs = &nv40_instobj_ptrs;
 	iobj->imem = imem;
 
-	mutex_lock(&imem->base.subdev.mutex);
-	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
-			   align ? align : 1, &iobj->node);
-	mutex_unlock(&imem->base.subdev.mutex);
+	mutex_lock(&imem->base.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.mutex);
 	return ret;
 }
 
@@ -236,7 +235,7 @@ nv40_instmem = {
 };
 
 int
-nv40_instmem_new(struct nvkm_device *device, int index,
+nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_instmem **pimem)
 {
 	struct nv40_instmem *imem;
@@ -244,7 +243,7 @@ nv40_instmem_new(struct nvkm_device *device, int index,
 
 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+	nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base);
 	*pimem = &imem->base;
 
 	/* map bar */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 02c4eb28cef44db11ef989f34cab124b74d4f84b..96aca0edfa3c04f4cd2ff76d0853d87785e6470e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -133,12 +133,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
 	 * into it.  The lock has to be dropped while doing this due
 	 * to the possibility of recursion for page table allocation.
 	 */
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&imem->base.mutex);
 	while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
 		/* Evict unused mappings, and keep retrying until we either
 		 * succeed,or there's no more objects left on the LRU.
 		 */
-		mutex_lock(&subdev->mutex);
+		mutex_lock(&imem->base.mutex);
 		eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
 		if (eobj) {
 			nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
@@ -151,7 +151,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
 			emap = eobj->map;
 			eobj->map = NULL;
 		}
-		mutex_unlock(&subdev->mutex);
+		mutex_unlock(&imem->base.mutex);
 		if (!eobj)
 			break;
 		iounmap(emap);
@@ -160,12 +160,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
 
 	if (ret == 0)
 		ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&imem->base.mutex);
 	if (ret || iobj->bar) {
 		/* We either failed, or another thread beat us. */
-		mutex_unlock(&subdev->mutex);
+		mutex_unlock(&imem->base.mutex);
 		nvkm_vmm_put(vmm, &bar);
-		mutex_lock(&subdev->mutex);
+		mutex_lock(&imem->base.mutex);
 		return;
 	}
 
@@ -197,7 +197,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
 	wmb();
 	nvkm_bar_flush(subdev->device->bar);
 
-	if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+	if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
 		/* Add the now-unused mapping to the LRU instead of directly
 		 * unmapping it here, in case we need to map it again later.
 		 */
@@ -208,7 +208,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
 
 		/* Switch back to NULL accessors when last map is gone. */
 		iobj->base.memory.ptrs = NULL;
-		mutex_unlock(&subdev->mutex);
+		mutex_unlock(&imem->base.mutex);
 	}
 }
 
@@ -227,9 +227,9 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
 	/* Take the lock, and re-check that another thread hasn't
 	 * already mapped the object in the meantime.
 	 */
-	mutex_lock(&imem->subdev.mutex);
+	mutex_lock(&imem->mutex);
 	if (refcount_inc_not_zero(&iobj->maps)) {
-		mutex_unlock(&imem->subdev.mutex);
+		mutex_unlock(&imem->mutex);
 		return iobj->map;
 	}
 
@@ -252,7 +252,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
 		refcount_set(&iobj->maps, 1);
 	}
 
-	mutex_unlock(&imem->subdev.mutex);
+	mutex_unlock(&imem->mutex);
 	return map;
 }
 
@@ -265,7 +265,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
 	/* Exclude bootstrapped objects (ie. the page tables for the
 	 * instmem BAR itself) from eviction.
 	 */
-	mutex_lock(&imem->subdev.mutex);
+	mutex_lock(&imem->mutex);
 	if (likely(iobj->lru.next)) {
 		list_del_init(&iobj->lru);
 		iobj->lru.next = NULL;
@@ -273,7 +273,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
 
 	nv50_instobj_kmap(iobj, vmm);
 	nvkm_instmem_boot(imem);
-	mutex_unlock(&imem->subdev.mutex);
+	mutex_unlock(&imem->mutex);
 }
 
 static u64
@@ -315,12 +315,12 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
 	struct nvkm_vma *bar;
 	void *map = map;
 
-	mutex_lock(&imem->subdev.mutex);
+	mutex_lock(&imem->mutex);
 	if (likely(iobj->lru.next))
 		list_del(&iobj->lru);
 	map = iobj->map;
 	bar = iobj->bar;
-	mutex_unlock(&imem->subdev.mutex);
+	mutex_unlock(&imem->mutex);
 
 	if (map) {
 		struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
@@ -386,14 +386,14 @@ nv50_instmem = {
 };
 
 int
-nv50_instmem_new(struct nvkm_device *device, int index,
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		 struct nvkm_instmem **pimem)
 {
 	struct nv50_instmem *imem;
 
 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+	nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
 	INIT_LIST_HEAD(&imem->lru);
 	*pimem = &imem->base;
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index f5da8fcbdde35a9be8fff8fec01b312ba429e97d..56c15e30a5dd70aceca31fb42f8ee643c88ebe2d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,7 +16,7 @@ struct nvkm_instmem_func {
 };
 
 void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
-		       int index, struct nvkm_instmem *);
+		       enum nvkm_subdev_type, int, struct nvkm_instmem *);
 void nvkm_instmem_boot(struct nvkm_instmem *);
 
 #include <core/memory.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 23242179e60051ec48a4a3ba167df8016a15b234..fa683c1907955f2c561cfb6bd90787e62f464ae6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -33,10 +33,10 @@ nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
 
 	BUG_ON((first > limit) || (limit >= ltc->num_tags));
 
-	mutex_lock(&ltc->subdev.mutex);
+	mutex_lock(&ltc->mutex);
 	ltc->func->cbc_clear(ltc, first, limit);
 	ltc->func->cbc_wait(ltc);
-	mutex_unlock(&ltc->subdev.mutex);
+	mutex_unlock(&ltc->mutex);
 }
 
 int
@@ -113,6 +113,7 @@ nvkm_ltc_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
 	nvkm_memory_unref(&ltc->tag_ram);
+	mutex_destroy(&ltc->mutex);
 	return ltc;
 }
 
@@ -126,15 +127,16 @@ nvkm_ltc = {
 
 int
 nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_ltc **pltc)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc)
 {
 	struct nvkm_ltc *ltc;
 
 	if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_ltc, device, index, &ltc->subdev);
+	nvkm_subdev_ctor(&nvkm_ltc, device, type, inst, &ltc->subdev);
 	ltc->func = func;
+	mutex_init(&ltc->mutex);
 	ltc->zbc_min = 1; /* reserve 0 for disabled */
 	ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index a21ef45b857284b26da51998ab759cd8ff172785..fd8aeafc812d4e0a39426fe8b14e07368bc20811 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -200,8 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 	}
 
 mm_init:
-	nvkm_mm_fini(&fb->tags);
-	return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
+	nvkm_mm_fini(&fb->tags.mm);
+	return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1);
 }
 
 int
@@ -249,7 +249,8 @@ gf100_ltc = {
 };
 
 int
-gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gf100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gf100_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index b4f6e0034d5889677f00c0756e1d09c04c772d57..94aa09244d67379cf176d6fe660b14ebd03c058f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -50,7 +50,8 @@ gk104_ltc = {
 };
 
 int
-gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gk104_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gk104_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index ec0a3844b2d14995e342e0fdaa9fa638bdc17de8..54d1d65d5a858d4f3072a266657bb801f6d3c681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -145,7 +145,8 @@ gm107_ltc = {
 };
 
 int
-gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm107_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gm107_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index e18e0dc19ec8b1f988c2ea30ec4e5d274af0b59f..8cfdbbdd8e8d8862e565100c31ab675e15da0681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -57,7 +57,8 @@ gm200_ltc = {
 };
 
 int
-gm200_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm200_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gm200_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gm200_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index e923ed76d37ae088efcc5d7888eaf5b30ea9b1a0..a4a6cd9b435adad7c893b30c003b2f696b51d8f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -69,7 +69,8 @@ gp100_ltc = {
 };
 
 int
-gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gp100_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
index 601747ada6555e2be7252c2348e1ff905d0afb6e..ff05d617e7f453b1ef3895261bef81b5d4824054 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
@@ -45,7 +45,8 @@ gp102_ltc = {
 };
 
 int
-gp102_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gp102_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
index c0063c7caa50a678939ee608cc205c3aa7e9bf35..dfebd796cb4ba1febd700c659296c75c19a12ec4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -59,7 +59,8 @@ gp10b_ltc = {
 };
 
 int
-gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp10b_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gp10b_ltc, device, type, inst, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index eca5a711b1b83bb870561baa22f88c4e475c9212..2bebe139005d95a045e5279b5ecbaea257d3eb64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -5,8 +5,8 @@
 #include <subdev/ltc.h>
 #include <core/enum.h>
 
-int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
-		  int index, struct nvkm_ltc **);
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_ltc **);
 
 struct nvkm_ltc_func {
 	int  (*oneinit)(struct nvkm_ltc *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 0e57ab2a709f4376eb0a96c324ea25f2ba2ae1c4..21c4af3f81d5fc7a4fe3a2c4512b3d57e5bc8cc2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -35,14 +35,14 @@ nvkm_mc_unk260(struct nvkm_device *device, u32 data)
 }
 
 void
-nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool en)
 {
 	struct nvkm_mc *mc = device->mc;
 	const struct nvkm_mc_map *map;
 	if (likely(mc) && mc->func->intr_mask) {
-		u32 mask = nvkm_top_intr_mask(device, devidx);
+		u32 mask = nvkm_top_intr_mask(device, type, inst);
 		for (map = mc->func->intr; !mask && map->stat; map++) {
-			if (map->unit == devidx)
+			if (map->type == type && map->inst == inst)
 				mask = map->stat;
 		}
 		mc->func->intr_mask(mc, mask, en ? mask : 0);
@@ -78,27 +78,34 @@ void
 nvkm_mc_intr(struct nvkm_device *device, bool *handled)
 {
 	struct nvkm_mc *mc = device->mc;
+	struct nvkm_top *top = device->top;
+	struct nvkm_top_device *tdev;
 	struct nvkm_subdev *subdev;
 	const struct nvkm_mc_map *map;
 	u32 stat, intr;
-	u64 subdevs;
 
 	if (unlikely(!mc))
 		return;
 
-	intr = nvkm_mc_intr_stat(mc);
-	stat = nvkm_top_intr(device, intr, &subdevs);
-	while (subdevs) {
-		enum nvkm_devidx subidx = __ffs64(subdevs);
-		subdev = nvkm_device_subdev(device, subidx);
-		if (subdev)
-			nvkm_subdev_intr(subdev);
-		subdevs &= ~BIT_ULL(subidx);
+	stat = intr = nvkm_mc_intr_stat(mc);
+
+	if (top) {
+		list_for_each_entry(tdev, &top->device, head) {
+			if (tdev->intr >= 0 && (stat & BIT(tdev->intr))) {
+				subdev = nvkm_device_subdev(device, tdev->type, tdev->inst);
+				if (subdev) {
+					nvkm_subdev_intr(subdev);
+					stat &= ~BIT(tdev->intr);
+					if (!stat)
+						break;
+				}
+			}
+		}
 	}
 
 	for (map = mc->func->intr; map->stat; map++) {
 		if (intr & map->stat) {
-			subdev = nvkm_device_subdev(device, map->unit);
+			subdev = nvkm_device_subdev(device, map->type, map->inst);
 			if (subdev)
 				nvkm_subdev_intr(subdev);
 			stat &= ~map->stat;
@@ -108,23 +115,19 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
 	if (stat)
 		nvkm_error(&mc->subdev, "intr %08x\n", stat);
 	*handled = intr != 0;
-
-	if (mc->func->intr_hack)
-		mc->func->intr_hack(mc, handled);
 }
 
 static u32
-nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
-		   enum nvkm_devidx devidx)
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, enum nvkm_subdev_type type, int inst)
 {
 	struct nvkm_mc *mc = device->mc;
 	const struct nvkm_mc_map *map;
 	u64 pmc_enable = 0;
 	if (likely(mc)) {
-		if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+		if (!(pmc_enable = nvkm_top_reset(device, type, inst))) {
 			for (map = mc->func->reset; map && map->stat; map++) {
 				if (!isauto || !map->noauto) {
-					if (map->unit == devidx) {
+					if (map->type == type && map->inst == inst) {
 						pmc_enable = map->stat;
 						break;
 					}
@@ -136,9 +139,9 @@ nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
 }
 
 void
-nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
-	u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
+	u64 pmc_enable = nvkm_mc_reset_mask(device, true, type, inst);
 	if (pmc_enable) {
 		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
 		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -147,17 +150,17 @@ nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
 }
 
 void
-nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
-	u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+	u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
 	if (pmc_enable)
 		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
 }
 
 void
-nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
-	u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+	u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
 	if (pmc_enable) {
 		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
 		nvkm_rd32(device, 0x000200);
@@ -165,9 +168,9 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
 }
 
 bool
-nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
-	u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+	u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
 
 	return (pmc_enable != 0) &&
 	       ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
@@ -206,19 +209,19 @@ nvkm_mc = {
 
 void
 nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_mc *mc)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_mc *mc)
 {
-	nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+	nvkm_subdev_ctor(&nvkm_mc, device, type, inst, &mc->subdev);
 	mc->func = func;
 }
 
 int
 nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
-	     int index, struct nvkm_mc **pmc)
+	     enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
 	struct nvkm_mc *mc;
 	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_mc_ctor(func, device, index, *pmc);
+	nvkm_mc_ctor(func, device, type, inst, *pmc);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index 430a61c3df4450bf6cd1a5e69034705a2ccacec6..4cfc1c98400687e09859b342aa6e5ed87114e837 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -62,7 +62,7 @@ g84_mc = {
 };
 
 int
-g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+g84_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&g84_mc, device, index, pmc);
+	return nvkm_mc_new_(&g84_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 93ad4982ce5f656544ffdf3ca16f886a32a8f8c2..b7e58d75d894b0a28fe4f6c380376377807bac59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -62,7 +62,7 @@ g98_mc = {
 };
 
 int
-g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+g98_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&g98_mc, device, index, pmc);
+	return nvkm_mc_new_(&g98_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
index 967eb3af11eb00efd775b15d973f551a9e427876..4105175dfccd66bd3eae25914f754b3d0fbcf1bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -68,7 +68,7 @@ ga100_mc = {
 };
 
 int
-ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+	return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index f93766418056aa67d629222c7423425dacf23c4f..3a589c6f7fad62c6f29511befc3558ab6618bd1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -27,11 +27,11 @@ static const struct nvkm_mc_map
 gf100_mc_reset[] = {
 	{ 0x00020000, NVKM_ENGINE_MSPDEC },
 	{ 0x00008000, NVKM_ENGINE_MSVLD },
-	{ 0x00002000, NVKM_SUBDEV_PMU, true },
+	{ 0x00002000, NVKM_SUBDEV_PMU, 0, true },
 	{ 0x00001000, NVKM_ENGINE_GR },
 	{ 0x00000100, NVKM_ENGINE_FIFO },
-	{ 0x00000080, NVKM_ENGINE_CE1 },
-	{ 0x00000040, NVKM_ENGINE_CE0 },
+	{ 0x00000080, NVKM_ENGINE_CE, 1 },
+	{ 0x00000040, NVKM_ENGINE_CE, 0 },
 	{ 0x00000002, NVKM_ENGINE_MSPPP },
 	{}
 };
@@ -43,10 +43,10 @@ gf100_mc_intr[] = {
 	{ 0x00008000, NVKM_ENGINE_MSVLD },
 	{ 0x00001000, NVKM_ENGINE_GR },
 	{ 0x00000100, NVKM_ENGINE_FIFO },
-	{ 0x00000040, NVKM_ENGINE_CE1 },
-	{ 0x00000020, NVKM_ENGINE_CE0 },
+	{ 0x00000040, NVKM_ENGINE_CE, 1 },
+	{ 0x00000020, NVKM_ENGINE_CE, 0 },
 	{ 0x00000001, NVKM_ENGINE_MSPPP },
-	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x40000000, NVKM_SUBDEV_PRIVRING },
 	{ 0x10000000, NVKM_SUBDEV_BUS },
 	{ 0x08000000, NVKM_SUBDEV_FB },
 	{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -112,7 +112,7 @@ gf100_mc = {
 };
 
 int
-gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gf100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+	return nvkm_mc_new_(&gf100_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
index 7b8c6ecad1a5d544d7012d1577cc7f8b03f8b06c..d9b9067fa93f5c33fa0b9215423b53b260f4b636 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -26,7 +26,7 @@
 const struct nvkm_mc_map
 gk104_mc_reset[] = {
 	{ 0x00000100, NVKM_ENGINE_FIFO },
-	{ 0x00002000, NVKM_SUBDEV_PMU, true },
+	{ 0x00002000, NVKM_SUBDEV_PMU, 0, true },
 	{}
 };
 
@@ -34,7 +34,7 @@ const struct nvkm_mc_map
 gk104_mc_intr[] = {
 	{ 0x04000000, NVKM_ENGINE_DISP },
 	{ 0x00000100, NVKM_ENGINE_FIFO },
-	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x40000000, NVKM_SUBDEV_PRIVRING },
 	{ 0x10000000, NVKM_SUBDEV_BUS },
 	{ 0x08000000, NVKM_SUBDEV_FB },
 	{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -60,7 +60,7 @@ gk104_mc = {
 };
 
 int
-gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gk104_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&gk104_mc, device, index, pmc);
+	return nvkm_mc_new_(&gk104_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index ca1bf3279dbe9a046c958cfe744e7f60fd07cfa8..03590292749ac0434ca727a8d8509bdf80885d73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -35,7 +35,7 @@ gk20a_mc = {
 };
 
 int
-gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gk20a_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+	return nvkm_mc_new_(&gk20a_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 43db245eec9af667902a0d0e81404a52f653064d..5fd1a0595c334c32c2d22df37e7b3dca81989b6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -80,7 +80,7 @@ gp100_mc_intr[] = {
 	{ 0x04000000, NVKM_ENGINE_DISP },
 	{ 0x00000100, NVKM_ENGINE_FIFO },
 	{ 0x00000200, NVKM_SUBDEV_FAULT },
-	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x40000000, NVKM_SUBDEV_PRIVRING },
 	{ 0x10000000, NVKM_SUBDEV_BUS },
 	{ 0x08000000, NVKM_SUBDEV_FB },
 	{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -106,13 +106,13 @@ gp100_mc = {
 
 int
 gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_mc **pmc)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
 	struct gp100_mc *mc;
 
 	if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_mc_ctor(func, device, index, &mc->base);
+	nvkm_mc_ctor(func, device, type, inst, &mc->base);
 	*pmc = &mc->base;
 
 	spin_lock_init(&mc->lock);
@@ -122,7 +122,7 @@ gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
 }
 
 int
-gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return gp100_mc_new_(&gp100_mc, device, index, pmc);
+	return gp100_mc_new_(&gp100_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
index 45c62f5ef7821976af61f311f67f7fd563ec0a1f..dd581d030cededb8482e72cf4e1ffc78a0523b45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -43,7 +43,7 @@ gp10b_mc = {
 };
 
 int
-gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp10b_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return gp100_mc_new_(&gp10b_mc, device, index, pmc);
+	return gp100_mc_new_(&gp10b_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
index 99d50a3d956fc312b3d91ade85f3f36f2b944d22..1b4d43531dbac629095a249c114574f891df1668 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -27,7 +27,7 @@ static const struct nvkm_mc_map
 gt215_mc_reset[] = {
 	{ 0x04008000, NVKM_ENGINE_MSVLD },
 	{ 0x01020000, NVKM_ENGINE_MSPDEC },
-	{ 0x00802000, NVKM_ENGINE_CE0 },
+	{ 0x00802000, NVKM_ENGINE_CE, 0 },
 	{ 0x00400002, NVKM_ENGINE_MSPPP },
 	{ 0x00201000, NVKM_ENGINE_GR },
 	{ 0x00000100, NVKM_ENGINE_FIFO },
@@ -37,7 +37,7 @@ gt215_mc_reset[] = {
 static const struct nvkm_mc_map
 gt215_mc_intr[] = {
 	{ 0x04000000, NVKM_ENGINE_DISP },
-	{ 0x00400000, NVKM_ENGINE_CE0 },
+	{ 0x00400000, NVKM_ENGINE_CE, 0 },
 	{ 0x00020000, NVKM_ENGINE_MSPDEC },
 	{ 0x00008000, NVKM_ENGINE_MSVLD },
 	{ 0x00001000, NVKM_ENGINE_GR },
@@ -71,7 +71,7 @@ gt215_mc = {
 };
 
 int
-gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gt215_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&gt215_mc, device, index, pmc);
+	return nvkm_mc_new_(&gt215_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 6509defd14609c1a9675424da7f45f3c6cf5ee8b..bc0d09bafa99cce94d3c5b2cdf168f770bc94e3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -80,7 +80,7 @@ nv04_mc = {
 };
 
 int
-nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv04_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+	return nvkm_mc_new_(&nv04_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
index 9213107901e6de32f51973f2d6d086e2eb6f2edc..ab59ca1ee06821735d6d0721571af3c50662b681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -44,7 +44,7 @@ nv11_mc = {
 };
 
 int
-nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv11_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&nv11_mc, device, index, pmc);
+	return nvkm_mc_new_(&nv11_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
index 64bf5bbf8146cb80e6ace6f69bdca9da0e9c0d2a..03d756e26e5782a34ffb7a10ef29f13182577186 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -53,7 +53,7 @@ nv17_mc = {
 };
 
 int
-nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv17_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&nv17_mc, device, index, pmc);
+	return nvkm_mc_new_(&nv17_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 65fa44a64b985f12437fa3fa7473eaa704b98927..95f65766e8b00a724288fd32f87182f5def998e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -48,7 +48,7 @@ nv44_mc = {
 };
 
 int
-nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv44_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+	return nvkm_mc_new_(&nv44_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index fe93b4fd71002ccbfcab3505c0a6b13f4bf039c1..fce3613cdfa527fd6462ec725ee8b87f081555db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -55,7 +55,7 @@ nv50_mc = {
 };
 
 int
-nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv50_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return nvkm_mc_new_(&nv50_mc, device, index, pmc);
+	return nvkm_mc_new_(&nv50_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 4aab753a604034edccb9296fae2603b18b162a56..c8bcabb98f999a5d6895e234bf2dca3f768af326 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -4,14 +4,15 @@
 #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
 #include <subdev/mc.h>
 
-void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
-		  int index, struct nvkm_mc *);
-int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
-		 int index, struct nvkm_mc **);
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_mc *);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		 struct nvkm_mc **);
 
 struct nvkm_mc_map {
 	u32 stat;
-	u32 unit;
+	enum nvkm_subdev_type type;
+	int inst;
 	bool noauto;
 };
 
@@ -26,7 +27,6 @@ struct nvkm_mc_func {
 	void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
 	/* retrieve pending interrupt mask (NV_PMC_INTR) */
 	u32 (*intr_stat)(struct nvkm_mc *);
-	void (*intr_hack)(struct nvkm_mc *, bool *handled);
 	const struct nvkm_mc_map *reset;
 	void (*unk260)(struct nvkm_mc *, u32);
 };
@@ -53,7 +53,7 @@ void gf100_mc_unk260(struct nvkm_mc *, u32);
 void gp100_mc_intr_unarm(struct nvkm_mc *);
 void gp100_mc_intr_rearm(struct nvkm_mc *);
 void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32);
-int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
+int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
 		  struct nvkm_mc **);
 
 extern const struct nvkm_mc_map gk104_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index d098c44a4fcb56293dbb867d9e01765cd47aa177..58db83ebadc5f7d7eb36e7296a7ce5ff3d24ec99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -19,37 +19,118 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  */
+#define tu102_mc(p) container_of((p), struct tu102_mc, base)
 #include "priv.h"
 
+struct tu102_mc {
+	struct nvkm_mc base;
+	spinlock_t lock;
+	bool intr;
+	u32 mask;
+};
+
 static void
-tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+tu102_mc_intr_update(struct tu102_mc *mc)
 {
-	struct nvkm_device *device = mc->subdev.device;
-	u32 stat = nvkm_rd32(device, 0xb81010);
-	if (stat & 0x00000050) {
-		struct nvkm_subdev *subdev =
-			nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
-		nvkm_wr32(device, 0xb81010, stat & 0x00000050);
-		if (subdev)
-			nvkm_subdev_intr(subdev);
-		*handled = true;
+	struct nvkm_device *device = mc->base.subdev.device;
+	u32 mask = mc->intr ? mc->mask : 0, i;
+
+	for (i = 0; i < 2; i++) {
+		nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+		nvkm_wr32(device, 0x000160 + (i * 0x04),  mask);
 	}
+
+	if (mask & 0x00000200)
+		nvkm_wr32(device, 0xb81608, 0x6);
+	else
+		nvkm_wr32(device, 0xb81610, 0x6);
 }
 
+void
+tu102_mc_intr_unarm(struct nvkm_mc *base)
+{
+	struct tu102_mc *mc = tu102_mc(base);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->lock, flags);
+	mc->intr = false;
+	tu102_mc_intr_update(mc);
+	spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_rearm(struct nvkm_mc *base)
+{
+	struct tu102_mc *mc = tu102_mc(base);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->lock, flags);
+	mc->intr = true;
+	tu102_mc_intr_update(mc);
+	spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+	struct tu102_mc *mc = tu102_mc(base);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->lock, flags);
+	mc->mask = (mc->mask & ~mask) | intr;
+	tu102_mc_intr_update(mc);
+	spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static u32
+tu102_mc_intr_stat(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x000100);
+	u32 intr1 = nvkm_rd32(device, 0x000104);
+	u32 intr_top = nvkm_rd32(device, 0xb81600);
+
+	/* Turing and above route the MMU fault interrupts via a different
+	 * interrupt tree with different control registers. For the moment remap
+	 * them back to the old PMC vector.
+	 */
+	if (intr_top & 0x00000006)
+		intr0 |= 0x00000200;
+
+	return intr0 | intr1;
+}
+
+
 static const struct nvkm_mc_func
 tu102_mc = {
 	.init = nv50_mc_init,
 	.intr = gp100_mc_intr,
-	.intr_unarm = gp100_mc_intr_unarm,
-	.intr_rearm = gp100_mc_intr_rearm,
-	.intr_mask = gp100_mc_intr_mask,
-	.intr_stat = gf100_mc_intr_stat,
-	.intr_hack = tu102_mc_intr_hack,
+	.intr_unarm = tu102_mc_intr_unarm,
+	.intr_rearm = tu102_mc_intr_rearm,
+	.intr_mask = tu102_mc_intr_mask,
+	.intr_stat = tu102_mc_intr_stat,
 	.reset = gk104_mc_reset,
 };
 
+static int
+tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+	      enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
+{
+	struct tu102_mc *mc;
+
+	if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_mc_ctor(func, device, type, inst, &mc->base);
+	*pmc = &mc->base;
+
+	spin_lock_init(&mc->lock);
+	mc->intr = false;
+	mc->mask = 0x7fffffff;
+	return 0;
+}
+
 int
-tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+tu102_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
-	return gp100_mc_new_(&tu102_mc, device, index, pmc);
+	return tu102_mc_new_(&tu102_mc, device, type, inst, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 6d5212ae2fd57b741715e7d7c9e4c83fe5bdd13e..ad3b44a9e0e71583fd154b653e7f9e2f03e77f80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
 	nvkm_vmm_unref(&mmu->vmm);
 
 	nvkm_mmu_ptc_fini(mmu);
+	mutex_destroy(&mmu->mutex);
 	return mmu;
 }
 
@@ -414,22 +415,23 @@ nvkm_mmu = {
 
 void
 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_mmu *mmu)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
 {
-	nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
+	nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev);
 	mmu->func = func;
 	mmu->dma_bits = func->dma_bits;
 	nvkm_mmu_ptc_init(mmu);
+	mutex_init(&mmu->mutex);
 	mmu->user.ctor = nvkm_ummu_new;
 	mmu->user.base = func->mmu.user;
 }
 
 int
 nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_mmu **pmmu)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu)
 {
 	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_mmu_ctor(func, device, index, *pmmu);
+	nvkm_mmu_ctor(func, device, type, inst, *pmmu);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
index 8accda5a772b7fffab4083edf1dbf8c7863a16ce..ce47a3b97be90c358abb725b8a48ec811f85d6a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -35,7 +35,8 @@ g84_mmu = {
 };
 
 int
-g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+g84_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&g84_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&g84_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2cd5ec81c0d0ad6b4976f640394e38262201b889..7a28b1d49f7c861b9a7b39323bcf9f47393db5b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -84,7 +84,8 @@ gf100_mmu = {
 };
 
 int
-gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gf100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&gf100_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
index 3d7d1eb1cff90f32e102303a10afee8865db0723..34c9b2b821f643199764f953237d7c153cfa6d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -35,7 +35,8 @@ gk104_mmu = {
 };
 
 int
-gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gk104_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&gk104_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
index ac74965a60d4c0eac43738780f6c0b71cc41d8bc..a7db29c429eea46e06ff288767e5dbe73523385d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -35,7 +35,8 @@ gk20a_mmu = {
 };
 
 int
-gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gk20a_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&gk20a_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index 83990c83f9f81728490b8eed1d54f5dfbfbbf01c..e1696f637a689ad0da92a41c5907ee5a6a136124 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -90,9 +90,10 @@ gm200_mmu_fixed = {
 };
 
 int
-gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gm200_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
 	if (device->fb->page)
-		return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu);
-	return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu);
+		return nvkm_mmu_new_(&gm200_mmu_fixed, device, type, inst, pmmu);
+	return nvkm_mmu_new_(&gm200_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
index 7353a94b40914acc7485c1ccd02f2ec0dc22c42c..e6e1a8ad701ef4371982a328f36b49ed69a10797 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -47,9 +47,10 @@ gm20b_mmu_fixed = {
 };
 
 int
-gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gm20b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
 	if (device->fb->page)
-		return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu);
-	return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu);
+		return nvkm_mmu_new_(&gm20b_mmu_fixed, device, type, inst, pmmu);
+	return nvkm_mmu_new_(&gm20b_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 65cb9d28e60e2bac860cc6e8d155343a267a6c98..daa5ab0f871189ec789a45305316ef989bd54428 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -37,9 +37,10 @@ gp100_mmu = {
 };
 
 int
-gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gp100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
 	if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
-		return gm200_mmu_new(device, index, pmmu);
-	return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu);
+		return gm200_mmu_new(device, type, inst, pmmu);
+	return nvkm_mmu_new_(&gp100_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
index 0a50be9a785ae1042d405614ee4b121922120acd..edd0bf9a5cd820666330a28bc5aa4dfe031e90ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -37,9 +37,10 @@ gp10b_mmu = {
 };
 
 int
-gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gp10b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
 	if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
-		return gm20b_mmu_new(device, index, pmmu);
-	return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu);
+		return gm20b_mmu_new(device, type, inst, pmmu);
+	return nvkm_mmu_new_(&gp10b_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
index e0997eedd6d9c97aec00c72a79d1bdd8246f952f..fb8bdc88d5667e622bb3deae6ba3888b6ffe273e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -37,7 +37,8 @@ gv100_mmu = {
 };
 
 int
-gv100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gv100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&gv100_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&gv100_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
index 0527b50730d959d29eab938d19540fb86016bced..514876d6411bc9dfe356f526437ed09720dd7c6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -35,7 +35,8 @@ mcp77_mmu = {
 };
 
 int
-mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+mcp77_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&mcp77_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index d201c887c2cd6725dc58eac19c1f85683a5979e5..0674aa8f68c8970af5be7ae43ac1e7675a3cdee3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -35,7 +35,8 @@ nv04_mmu = {
 };
 
 int
-nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv04_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv04_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index adca81895c09e1489e4685dec1ac813683bc67f0..909f92b728479dfac6dde3289f2113d41a18e9a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -47,11 +47,12 @@ nv41_mmu = {
 };
 
 int
-nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_mmu **pmmu)
 {
 	if (device->type == NVKM_DEVICE_AGP ||
 	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
-		return nv04_mmu_new(device, index, pmmu);
+		return nv04_mmu_new(device, type, inst, pmmu);
 
-	return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv41_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index 598c53a27bde4549d8a803407f7ed0759bc1865c..dd2a8d461da3ecdfc16c648e006400c202a3db7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -62,11 +62,12 @@ nv44_mmu = {
 };
 
 int
-nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_mmu **pmmu)
 {
 	if (device->type == NVKM_DEVICE_AGP ||
 	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
-		return nv04_mmu_new(device, index, pmmu);
+		return nv04_mmu_new(device, type, inst, pmmu);
 
-	return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv44_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index c0083ddda65a08a534ec3116e0d759158dac9ab4..78d46e35d0a9166ec72a4e6416aa8240f8399760 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -71,7 +71,8 @@ nv50_mmu = {
 };
 
 int
-nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv50_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv50_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 479b023442710a41d23c7fc44f0723b7f6e6a20b..5265bf4d8366c0133a0318b318f26071ac3262fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -4,10 +4,10 @@
 #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
 #include <subdev/mmu.h>
 
-void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
-		   int index, struct nvkm_mmu *);
-int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
-		  int index, struct nvkm_mmu **);
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_mmu **);
 
 struct nvkm_mmu_func {
 	void (*init)(struct nvkm_mmu *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 94081f35f9675dd13c4314288275c105c6a58930..8d060ce47f8657aeef67c7533f0f24f2d4e2930e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -51,7 +51,8 @@ tu102_mmu = {
 };
 
 int
-tu102_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&tu102_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index 6a2d9eb8e1ea8fbb6885c5064a1e2e988afd8782..5438384d9a67445625fce05b77565704424495d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
 void
 gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
 {
-	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
-	struct nvkm_device *device = subdev->device;
+	struct nvkm_device *device = vmm->mmu->subdev.device;
 	struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
 	u64 addr = 0;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&vmm->mmu->mutex);
 	/* Looks like maybe a "free flush slots" counter, the
 	 * faster you write to 0x100cbc to more it decreases.
 	 */
@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
 		if (nvkm_rd32(device, 0x100c80) & 0x00008000)
 			break;
 	);
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&vmm->mmu->mutex);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
index 1d3369683a21f3616e4bf4aec507fe6c806045f0..31984671daf84aadd7b5aa4ffcc68b768e2f3607 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = {
 static void
 nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
 {
-	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
-	struct nvkm_device *device = subdev->device;
+	struct nvkm_device *device = vmm->mmu->subdev.device;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&vmm->mmu->mutex);
 	nvkm_wr32(device, 0x100810, 0x00000022);
 	nvkm_msec(device, 2000,
 		if (nvkm_rd32(device, 0x100810) & 0x00000020)
 			break;
 	);
 	nvkm_wr32(device, 0x100810, 0x00000000);
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&vmm->mmu->mutex);
 }
 
 static const struct nvkm_vmm_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 2d89e27e8e9e5eb60e7164957dc1e36c27b4fc3d..b7548dcd72c779d68885f56357f8ceb148c90ee1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
 	struct nvkm_device *device = subdev->device;
 	int i, id;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&vmm->mmu->mutex);
 	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
 		if (!atomic_read(&vmm->engref[i]))
 			continue;
@@ -207,7 +207,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
 		case NVKM_ENGINE_MSVLD : id = 0x09; break;
 		case NVKM_ENGINE_CIPHER:
 		case NVKM_ENGINE_SEC   : id = 0x0a; break;
-		case NVKM_ENGINE_CE0   : id = 0x0d; break;
+		case NVKM_ENGINE_CE    : id = 0x0d; break;
 		default:
 			continue;
 		}
@@ -217,10 +217,9 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
 			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
 				break;
 		) < 0)
-			nvkm_error(subdev, "%s mmu invalidate timeout\n",
-				   nvkm_subdev_name[i]);
+			nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]);
 	}
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&vmm->mmu->mutex);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index b1294d0076c081b83f18f697754d18d6558db017..6cb5eefa45e9aa321d4c71ae73ba34d93457c951 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -26,15 +26,14 @@
 static void
 tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
-	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
-	struct nvkm_device *device = subdev->device;
+	struct nvkm_device *device = vmm->mmu->subdev.device;
 	u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
 
 	type |= 0x00000001; /* PAGE_ALL */
 	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
 		type |= 0x00000004; /* HUB_ONLY */
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&vmm->mmu->mutex);
 
 	nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
 	nvkm_wr32(device, 0xb830a4, 0x00000000);
@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 			break;
 	);
 
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&vmm->mmu->mutex);
 }
 
 static const struct nvkm_vmm_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index f44682d62f750dcb5311505f67dc8691472e6463..c1acfe642da3949ce9333226ae79612f01876e73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -230,7 +230,8 @@ nvkm_mxm = {
 };
 
 int
-nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
+nvkm_mxm_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_mxm **pmxm)
 {
 	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mxm *mxm;
@@ -240,7 +241,7 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
 	if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_mxm, device, index, &mxm->subdev);
+	nvkm_subdev_ctor(&nvkm_mxm, device, type, inst, &mxm->subdev);
 
 	data = mxm_table(bios, &ver, &len);
 	if (!data || !(ver = nvbios_rd08(bios, data))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 70e2c414bb7b6e8ae3d4d9d02f8e65c03cfc94e8..f3167904dcb0a5e409f72b1c5133f5cd91cb912e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -201,12 +201,13 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
 }
 
 int
-nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
+nv50_mxm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_subdev **pmxm)
 {
 	struct nvkm_mxm *mxm;
 	int ret;
 
-	ret = nvkm_mxm_new_(device, index, &mxm);
+	ret = nvkm_mxm_new_(device, type, inst, &mxm);
 	if (mxm)
 		*pmxm = &mxm->subdev;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
index fc8f69e6fc64f724e8a411f80ec81fc3ccdf44a2..fcacb6c6a7f721b46c83201d4dfc59e1ea34162d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -12,5 +12,5 @@ struct nvkm_mxm {
 	u8 *mxms;
 };
 
-int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+int nvkm_mxm_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mxm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index ee2431a7804ec81f4e74cfceaa0969d16e7a336e..a7d42ea8ba2866de040a2f2e44fe5390a29e0c67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -183,13 +183,13 @@ nvkm_pci_func = {
 
 int
 nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_pci **ppci)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_pci **ppci)
 {
 	struct nvkm_pci *pci;
 
 	if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
+	nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
 	pci->func = func;
 	pci->pdev = device->func->pci(device)->pdev;
 	pci->irq = -1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 62438d892f422bb3ebc123470b54e9514d4d0cc3..5b29aacedef3bf450d7822927f51482fd4162175 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -150,7 +150,8 @@ g84_pci_func = {
 };
 
 int
-g84_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g84_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&g84_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&g84_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index 48874359d5f63bde6b1a8ff791c22c61925fa014..a9e0674009c6f704ccb0f6af080bdc05c03cc1c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -51,7 +51,8 @@ g92_pci_func = {
 };
 
 int
-g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g92_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&g92_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&g92_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 09adb37a5664c6451db045a4f20ff6ec7fbbc36b..7bacd0693283f37ce8d2fe223a3feaa8200262a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -43,7 +43,8 @@ g94_pci_func = {
 };
 
 int
-g94_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g94_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	    struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&g94_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&g94_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 00a5e7d3ee9d491383d42735aa4f8ff2f3396ed6..099906092fe15f66d1c6ff51ef142bd8c7e16b97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -96,7 +96,8 @@ gf100_pci_func = {
 };
 
 int
-gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gf100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&gf100_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index 11bf419afe3fb1c95d343d26a483c1c15597c4b5..bcde609ba8666864f8d388876dc976c5f76c00cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -43,7 +43,8 @@ gf106_pci_func = {
 };
 
 int
-gf106_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gf106_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&gf106_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&gf106_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index e68030507d88bd266d4eee8ee2a9660f10eae278..6be87ecffc89c864dfc80b32007dee96b9f0854c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -222,7 +222,8 @@ gk104_pci_func = {
 };
 
 int
-gk104_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gk104_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&gk104_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&gk104_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index 82c5234a06ff5884c6b75728c2907fc7774e4f93..a5fafda0014d9846e7627c35f9b65cd3a1d48341 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -38,7 +38,8 @@ gp100_pci_func = {
 };
 
 int
-gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gp100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&gp100_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 5b1ed42cb90b90aa65f1af7550139f82457a5c94..9ab64194b185ab4fda04759ae126cd350390dd41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -52,7 +52,8 @@ nv04_pci_func = {
 };
 
 int
-nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv04_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&nv04_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6eb417765802d07847b30e3c120178faea657c15..6a3c31cf0200f84fc1c67b7108e389fe87ae1021 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -59,7 +59,8 @@ nv40_pci_func = {
 };
 
 int
-nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv40_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&nv40_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index fc617e4c0ab6f52bee743c2d95d3ce31949fb75e..9cad17f178ec49c9638608213103b9d775446c7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -45,7 +45,8 @@ nv46_pci_func = {
 };
 
 int
-nv46_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv46_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&nv46_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&nv46_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 1f1b26b5fa729c8c886fadb8b334e3a75ef1e0c6..741e34bf307cfccefd02d1c4a54bda248dd71f2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -31,7 +31,8 @@ nv4c_pci_func = {
 };
 
 int
-nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv4c_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	     struct nvkm_pci **ppci)
 {
-	return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+	return nvkm_pci_new_(&nv4c_pci_func, device, type, inst, ppci);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 7009aad86b6e5b73f7bb28a8db99bbb3439488d8..9b75835329625f80b99844c6a8e06c25527a1adb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -4,8 +4,8 @@
 #define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
 #include <subdev/pci.h>
 
-int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
-		  int index, struct nvkm_pci **);
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_pci **);
 
 struct nvkm_pci_func {
 	void (*init)(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index a0fe607c9c07fc5f59f88c45ff4c4538dc5badc2..24382875fb4f37d6bfbfcb129223e05f9e9c1037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -148,6 +148,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
 	nvkm_falcon_cmdq_del(&pmu->hpq);
 	nvkm_falcon_qmgr_del(&pmu->qmgr);
 	nvkm_falcon_dtor(&pmu->falcon);
+	mutex_destroy(&pmu->send.mutex);
 	return nvkm_pmu(subdev);
 }
 
@@ -162,11 +163,13 @@ nvkm_pmu = {
 
 int
 nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
-	      int index, struct nvkm_pmu *pmu)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_pmu *pmu)
 {
 	int ret;
 
-	nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
+	nvkm_subdev_ctor(&nvkm_pmu, device, type, inst, &pmu->subdev);
+
+	mutex_init(&pmu->send.mutex);
 
 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
 	init_waitqueue_head(&pmu->recv.wait);
@@ -177,9 +180,8 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
 
 	pmu->func = fwif->func;
 
-	ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
-			       nvkm_subdev_name[pmu->subdev.index], 0x10a000,
-			       &pmu->falcon);
+	ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, pmu->subdev.name,
+			       0x10a000, &pmu->falcon);
 	if (ret)
 		return ret;
 
@@ -195,10 +197,10 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
 
 int
 nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
-	      int index, struct nvkm_pmu **ppmu)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu)
 {
 	struct nvkm_pmu *pmu;
 	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_pmu_ctor(fwif, device, index, *ppmu);
+	return nvkm_pmu_ctor(fwif, device, type, inst, *ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 3ecb3d9cbcf234674a42c22d313f10c779cbf1df..f725a3ec547909ba0fca001874baeff70da64958 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -30,14 +30,14 @@ void
 gf100_pmu_reset(struct nvkm_pmu *pmu)
 {
 	struct nvkm_device *device = pmu->subdev.device;
-	nvkm_mc_disable(device, NVKM_SUBDEV_PMU);
-	nvkm_mc_enable(device, NVKM_SUBDEV_PMU);
+	nvkm_mc_disable(device, NVKM_SUBDEV_PMU, 0);
+	nvkm_mc_enable(device, NVKM_SUBDEV_PMU, 0);
 }
 
 bool
 gf100_pmu_enabled(struct nvkm_pmu *pmu)
 {
-	return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU);
+	return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU, 0);
 }
 
 static const struct nvkm_pmu_func
@@ -69,7 +69,8 @@ gf100_pmu_fwif[] = {
 };
 
 int
-gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gf100_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gf100_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 8dd0271aaaee6af471fad5417811d5082189cd39..0f4b6697a4e401541b3b22744909b01fdc5cbcac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -47,7 +47,8 @@ gf119_pmu_fwif[] = {
 };
 
 int
-gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gf119_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gf119_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8b70cc17a6341cd3eeb3820738532bb2283097ac..9e7631d7aa41bc78c7ab23ab25d3097f004572c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -127,7 +127,8 @@ gk104_pmu_fwif[] = {
 };
 
 int
-gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk104_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gk104_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 0081f2141b1083abf7fa8b67150150781edb05a9..dbaefee53e1f31e25067396c69848c543d720313 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -106,7 +106,8 @@ gk110_pmu_fwif[] = {
 };
 
 int
-gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk110_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gk110_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b227c701a5e7c6d9c81d1dd356c0f73bf09e430e..a08fb049e6d6b0ba035f2c87a7298bdd21dca6bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -48,7 +48,8 @@ gk208_pmu_fwif[] = {
 };
 
 int
-gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk208_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gk208_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 26c1adf8f44c6f742c6baeededf2f52636907a37..a67a42e73f084505a8389201e3c2fcea5ec2fdb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -210,7 +210,8 @@ gk20a_pmu_fwif[] = {
 };
 
 int
-gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk20a_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
 	struct gk20a_pmu *pmu;
 	int ret;
@@ -219,7 +220,7 @@ gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 		return -ENOMEM;
 	*ppmu = &pmu->base;
 
-	ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+	ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, type, inst, &pmu->base);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 5afb55e58b517e406451a8203066e664629928c3..622ee637f97b50117c9a098b615a368712205df0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -49,7 +49,8 @@ gm107_pmu_fwif[] = {
 };
 
 int
-gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm107_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gm107_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
index 383376addb41c4a55dd1261ea65c748c846233fa..5968c7696596c328ebe355b54a2acd139c813bfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
@@ -45,7 +45,8 @@ gm200_pmu_fwif[] = {
 };
 
 int
-gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm200_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gm200_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gm200_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 8f6ed5373ea16e046a7748717639a304ac1327f9..148706977eec740824943bbcb6007f24c7ebc375 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -240,7 +240,8 @@ gm20b_pmu_fwif[] = {
 };
 
 int
-gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 3d8ce14dba7bf1fb2b7a1d4c54dd9ced9f2f6ed0..00da1b873ce81c858a4ed556a59aa05a19e80ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -51,7 +51,8 @@ gp102_pmu_fwif[] = {
 };
 
 int
-gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index 9c237c426599b543e38f39e3d2fd3888bb70e510..461f722656e242b45e34a7d05688eb8713ba39d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -99,7 +99,8 @@ gp10b_pmu_fwif[] = {
 };
 
 int
-gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gp10b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gp10b_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 88b909913ff96e83799a317aa4808e3f19a017cd..b0407b86bc10d9ab29b1f728d4be92ac654a06b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -34,7 +34,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 	struct nvkm_device *device = subdev->device;
 	u32 addr;
 
-	mutex_lock(&subdev->mutex);
+	mutex_lock(&pmu->send.mutex);
 	/* wait for a free slot in the fifo */
 	addr  = nvkm_rd32(device, 0x10a4a0);
 	if (nvkm_msec(device, 2000,
@@ -42,7 +42,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 		if (tmp != (addr ^ 8))
 			break;
 	) < 0) {
-		mutex_unlock(&subdev->mutex);
+		mutex_unlock(&pmu->send.mutex);
 		return -EBUSY;
 	}
 
@@ -79,7 +79,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 		reply[1] = pmu->recv.data[1];
 	}
 
-	mutex_unlock(&subdev->mutex);
+	mutex_unlock(&pmu->send.mutex);
 	return 0;
 }
 
@@ -282,7 +282,8 @@ gt215_pmu_fwif[] = {
 };
 
 int
-gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
+	return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 276b6d778e532fc54b0049833ddb88d498c6f21e..e7860d17735398ba5b3d6efef7135db627fb3914 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -62,8 +62,8 @@ int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
 int gm200_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
 int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
 
-int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
-		  int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
-		  int index, struct nvkm_pmu **);
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_pmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild
new file mode 100644
index 0000000000000000000000000000000000000000..d47d1bdd0f2b9e13d204c291b4814d710893e5e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/privring/gf100.o
+nvkm-y += nvkm/subdev/privring/gf117.o
+nvkm-y += nvkm/subdev/privring/gk104.o
+nvkm-y += nvkm/subdev/privring/gk20a.o
+nvkm-y += nvkm/subdev/privring/gm200.o
+nvkm-y += nvkm/subdev/privring/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
similarity index 71%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
index 1115376bc85f5fd5d28e5cb37185721d73cf5e7d..ef7caca7037233f39e62c728e934b1459516a349 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
@@ -25,39 +25,39 @@
 #include <subdev/timer.h>
 
 static void
-gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_hub(struct nvkm_subdev *privring, int i)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
 	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
 	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
-	nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
 }
 
 static void
-gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_rop(struct nvkm_subdev *privring, int i)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
 	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
 	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
-	nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
 }
 
 static void
-gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_gpc(struct nvkm_subdev *privring, int i)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
 	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
 	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
-	nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
 }
 
 void
-gf100_ibus_intr(struct nvkm_subdev *ibus)
+gf100_privring_intr(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 intr0 = nvkm_rd32(device, 0x121c58);
 	u32 intr1 = nvkm_rd32(device, 0x121c5c);
 	u32 hubnr = nvkm_rd32(device, 0x121c70);
@@ -68,7 +68,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_hub(ibus, i);
+			gf100_privring_intr_hub(privring, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -76,7 +76,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_rop(ibus, i);
+			gf100_privring_intr_rop(privring, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -84,7 +84,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gf100_ibus_intr_gpc(ibus, i);
+			gf100_privring_intr_gpc(privring, i);
 			intr1 &= ~stat;
 		}
 	}
@@ -97,9 +97,9 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
 }
 
 static int
-gf100_ibus_init(struct nvkm_subdev *ibus)
+gf100_privring_init(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
 	nvkm_wr32(device, 0x12232c, 0x00100064);
 	nvkm_wr32(device, 0x122330, 0x00100064);
@@ -109,14 +109,14 @@ gf100_ibus_init(struct nvkm_subdev *ibus)
 }
 
 static const struct nvkm_subdev_func
-gf100_ibus = {
-	.init = gf100_ibus_init,
-	.intr = gf100_ibus_intr,
+gf100_privring = {
+	.init = gf100_privring_init,
+	.intr = gf100_privring_intr,
 };
 
 int
-gf100_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+gf100_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		   struct nvkm_subdev **pprivring)
 {
-	return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
+	return nvkm_subdev_new_(&gf100_privring, device, type, inst, pprivring);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
similarity index 79%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
index 1124dadac145b7cb957542aa7da31724213b75f0..c78721fcd729f3eb6755f928f09d0b262c9711c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
@@ -24,9 +24,9 @@
 #include "priv.h"
 
 static int
-gf117_ibus_init(struct nvkm_subdev *ibus)
+gf117_privring_init(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
 	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
 	nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
@@ -34,14 +34,14 @@ gf117_ibus_init(struct nvkm_subdev *ibus)
 }
 
 static const struct nvkm_subdev_func
-gf117_ibus = {
-	.init = gf117_ibus_init,
-	.intr = gf100_ibus_intr,
+gf117_privring = {
+	.init = gf117_privring_init,
+	.intr = gf100_privring_intr,
 };
 
 int
-gf117_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+gf117_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		   struct nvkm_subdev **pprivring)
 {
-	return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
+	return nvkm_subdev_new_(&gf117_privring, device, type, inst, pprivring);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
similarity index 71%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
index 22e487b493ad1346851a58c02599ceb97011a798..568a4c0997bd52143a769c96c190d0843b2dbf01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
@@ -25,39 +25,39 @@
 #include <subdev/timer.h>
 
 static void
-gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_hub(struct nvkm_subdev *privring, int i)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
 	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
 	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
-	nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
 }
 
 static void
-gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_rop(struct nvkm_subdev *privring, int i)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
 	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
 	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
-	nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
 }
 
 static void
-gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_gpc(struct nvkm_subdev *privring, int i)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
 	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
 	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
-	nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
 }
 
 void
-gk104_ibus_intr(struct nvkm_subdev *ibus)
+gk104_privring_intr(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 intr0 = nvkm_rd32(device, 0x120058);
 	u32 intr1 = nvkm_rd32(device, 0x12005c);
 	u32 hubnr = nvkm_rd32(device, 0x120070);
@@ -68,7 +68,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_hub(ibus, i);
+			gk104_privring_intr_hub(privring, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -76,7 +76,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_rop(ibus, i);
+			gk104_privring_intr_rop(privring, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -84,7 +84,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gk104_ibus_intr_gpc(ibus, i);
+			gk104_privring_intr_gpc(privring, i);
 			intr1 &= ~stat;
 		}
 	}
@@ -97,9 +97,9 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
 }
 
 static int
-gk104_ibus_init(struct nvkm_subdev *ibus)
+gk104_privring_init(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
 	nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
 	nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
@@ -111,15 +111,15 @@ gk104_ibus_init(struct nvkm_subdev *ibus)
 }
 
 static const struct nvkm_subdev_func
-gk104_ibus = {
-	.preinit = gk104_ibus_init,
-	.init = gk104_ibus_init,
-	.intr = gk104_ibus_intr,
+gk104_privring = {
+	.preinit = gk104_privring_init,
+	.init = gk104_privring_init,
+	.intr = gk104_privring_intr,
 };
 
 int
-gk104_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+gk104_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		   struct nvkm_subdev **pprivring)
 {
-	return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
+	return nvkm_subdev_new_(&gk104_privring, device, type, inst, pprivring);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
similarity index 73%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
index 187d544378b04fa603ceba76360a2c0ffcb3b5f1..55e4a60d87700435385971d44ab7066417f80cb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
@@ -19,13 +19,13 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <subdev/ibus.h>
+#include <subdev/privring.h>
 #include <subdev/timer.h>
 
 static void
-gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
+gk20a_privring_init_privring_ring(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	nvkm_mask(device, 0x137250, 0x3f, 0);
 
 	nvkm_mask(device, 0x000200, 0x20, 0);
@@ -46,14 +46,14 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
 }
 
 static void
-gk20a_ibus_intr(struct nvkm_subdev *ibus)
+gk20a_privring_intr(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 	u32 status0 = nvkm_rd32(device, 0x120058);
 
 	if (status0 & 0x7) {
-		nvkm_debug(ibus, "resetting ibus ring\n");
-		gk20a_ibus_init_ibus_ring(ibus);
+		nvkm_debug(privring, "resetting privring ring\n");
+		gk20a_privring_init_privring_ring(privring);
 	}
 
 	/* Acknowledge interrupt */
@@ -65,21 +65,21 @@ gk20a_ibus_intr(struct nvkm_subdev *ibus)
 }
 
 static int
-gk20a_ibus_init(struct nvkm_subdev *ibus)
+gk20a_privring_init(struct nvkm_subdev *privring)
 {
-	gk20a_ibus_init_ibus_ring(ibus);
+	gk20a_privring_init_privring_ring(privring);
 	return 0;
 }
 
 static const struct nvkm_subdev_func
-gk20a_ibus = {
-	.init = gk20a_ibus_init,
-	.intr = gk20a_ibus_intr,
+gk20a_privring = {
+	.init = gk20a_privring_init,
+	.intr = gk20a_privring_intr,
 };
 
 int
-gk20a_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+gk20a_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		   struct nvkm_subdev **pprivring)
 {
-	return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
+	return nvkm_subdev_new_(&gk20a_privring, device, type, inst, pprivring);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
similarity index 83%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
index 0f1f0ad6377e2fb875e5de6aa5e518f956376e34..b4eaf6db36d728fda55143f85d5ab61b9212beb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
@@ -24,13 +24,13 @@
 #include "priv.h"
 
 static const struct nvkm_subdev_func
-gm200_ibus = {
-	.intr = gk104_ibus_intr,
+gm200_privring = {
+	.intr = gk104_privring_intr,
 };
 
 int
-gm200_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		   struct nvkm_subdev **pprivring)
 {
-	return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
+	return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
similarity index 78%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
index 0347b367cefe47aaec103dc426a81db7866327ed..4534111cf9074f915ac4c27330367ec2d7f76c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
@@ -19,14 +19,14 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <subdev/ibus.h>
+#include <subdev/privring.h>
 
 #include "priv.h"
 
 static int
-gp10b_ibus_init(struct nvkm_subdev *ibus)
+gp10b_privring_init(struct nvkm_subdev *privring)
 {
-	struct nvkm_device *device = ibus->device;
+	struct nvkm_device *device = privring->device;
 
 	nvkm_wr32(device, 0x1200a8, 0x0);
 
@@ -42,14 +42,14 @@ gp10b_ibus_init(struct nvkm_subdev *ibus)
 }
 
 static const struct nvkm_subdev_func
-gp10b_ibus = {
-	.init = gp10b_ibus_init,
-	.intr = gk104_ibus_intr,
+gp10b_privring = {
+	.init = gp10b_privring_init,
+	.intr = gk104_privring_intr,
 };
 
 int
-gp10b_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+gp10b_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		   struct nvkm_subdev **pprivring)
 {
-	return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
+	return nvkm_subdev_new_(&gp10b_privring, device, type, inst, pprivring);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h
new file mode 100644
index 0000000000000000000000000000000000000000..b378c14bc8dc7e55b065b4cfb14fc51583170ae5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_PRIVRING_PRIV_H__
+#define __NVKM_PRIVRING_PRIV_H__
+#include <subdev/privring.h>
+
+void gf100_privring_intr(struct nvkm_subdev *);
+void gk104_privring_intr(struct nvkm_subdev *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 4a4d1e224126464fd1881a062cbddcc1b34ad8b4..fc5ee118e91067ff5568e566ada85c77d166c24e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -421,10 +421,10 @@ nvkm_therm = {
 };
 
 void
-nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
-		int index, const struct nvkm_therm_func *func)
+nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, enum nvkm_subdev_type type,
+		int inst, const struct nvkm_therm_func *func)
 {
-	nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
+	nvkm_subdev_ctor(&nvkm_therm, device, type, inst, &therm->subdev);
 	therm->func = func;
 
 	nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
@@ -443,13 +443,13 @@ nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
 
 int
 nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
-		int index, struct nvkm_therm **ptherm)
+		enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
 {
 	struct nvkm_therm *therm;
 
 	if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_therm_ctor(therm, device, index, func);
+	nvkm_therm_ctor(therm, device, type, inst, func);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 96f8da40ac82670ddceab1547dfb813e413859ae..4af86f2d3e7e743db643cedd1c418c5dd7297050 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -223,12 +223,13 @@ g84_therm = {
 };
 
 int
-g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
+g84_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_therm **ptherm)
 {
 	struct nvkm_therm *therm;
 	int ret;
 
-	ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+	ret = nvkm_therm_new_(&g84_therm, device, type, inst, &therm);
 	*ptherm = therm;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 0981b02790e2bdeb03496fac7e6d292bd121f386..2b031d4eaeb6864b1becb518334d5c83ae82866f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -146,8 +146,8 @@ gf119_therm = {
 };
 
 int
-gf119_therm_new(struct nvkm_device *device, int index,
-	       struct nvkm_therm **ptherm)
+gf119_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+	return nvkm_therm_new_(&gf119_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
index 4e03971d2e3df2cd4f1c7f78788896b72530398d..45e295c271fb5baed46646861d80a4428efd17bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
@@ -35,8 +35,8 @@ gk104_clkgate_enable(struct nvkm_therm *base)
 	int i;
 
 	/* Program ENG_MANT, ENG_FILTER */
-	for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
-		if (!nvkm_device_subdev(dev, order[i].engine))
+	for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+		if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
 			continue;
 
 		nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
@@ -47,8 +47,8 @@ gk104_clkgate_enable(struct nvkm_therm *base)
 	nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
 
 	/* Enable clockgating (ENG_CLK = RUN->AUTO) */
-	for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
-		if (!nvkm_device_subdev(dev, order[i].engine))
+	for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+		if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
 			continue;
 
 		nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
@@ -64,8 +64,8 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
 	int i;
 
 	/* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
-	for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
-		if (!nvkm_device_subdev(dev, order[i].engine))
+	for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+		if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
 			continue;
 
 		nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
@@ -73,15 +73,15 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
 }
 
 const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
-	{ NVKM_ENGINE_GR,     0x00 },
-	{ NVKM_ENGINE_MSPDEC, 0x04 },
-	{ NVKM_ENGINE_MSPPP,  0x08 },
-	{ NVKM_ENGINE_MSVLD,  0x0c },
-	{ NVKM_ENGINE_CE0,    0x10 },
-	{ NVKM_ENGINE_CE1,    0x14 },
-	{ NVKM_ENGINE_MSENC,  0x18 },
-	{ NVKM_ENGINE_CE2,    0x1c },
-	{ NVKM_SUBDEV_NR, 0 },
+	{ NVKM_ENGINE_GR,     0, 0x00 },
+	{ NVKM_ENGINE_MSPDEC, 0, 0x04 },
+	{ NVKM_ENGINE_MSPPP,  0, 0x08 },
+	{ NVKM_ENGINE_MSVLD,  0, 0x0c },
+	{ NVKM_ENGINE_CE,     0, 0x10 },
+	{ NVKM_ENGINE_CE,     1, 0x14 },
+	{ NVKM_ENGINE_MSENC,  0, 0x18 },
+	{ NVKM_ENGINE_CE,     2, 0x1c },
+	{ NVKM_SUBDEV_NR },
 };
 
 const struct gf100_idle_filter gk104_idle_filter = {
@@ -106,9 +106,8 @@ gk104_therm_func = {
 };
 
 static int
-gk104_therm_new_(const struct nvkm_therm_func *func,
-		 struct nvkm_device *device,
-		 int index,
+gk104_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+		 enum nvkm_subdev_type type, int inst,
 		 const struct gk104_clkgate_engine_info *clkgate_order,
 		 const struct gf100_idle_filter *idle_filter,
 		 struct nvkm_therm **ptherm)
@@ -118,19 +117,17 @@ gk104_therm_new_(const struct nvkm_therm_func *func,
 	if (!therm)
 		return -ENOMEM;
 
-	nvkm_therm_ctor(&therm->base, device, index, func);
+	nvkm_therm_ctor(&therm->base, device, type, inst, func);
 	*ptherm = &therm->base;
 	therm->clkgate_order = clkgate_order;
 	therm->idle_filter = idle_filter;
-
 	return 0;
 }
 
 int
-gk104_therm_new(struct nvkm_device *device,
-		int index, struct nvkm_therm **ptherm)
+gk104_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
 {
-	return gk104_therm_new_(&gk104_therm_func, device, index,
+	return gk104_therm_new_(&gk104_therm_func, device, type, inst,
 				gk104_clkgate_engine_info, &gk104_idle_filter,
 				ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
index 293e7743b19bb2357064e22fe642ec741b4426cd..9a8641421038f3048404dbd0fb22fcb929037963 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
@@ -31,7 +31,8 @@
 #include "gf100.h"
 
 struct gk104_clkgate_engine_info {
-	enum nvkm_devidx engine;
+	enum nvkm_subdev_type type;
+	int inst;
 	u8 offset;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 86848ece4d897de7b318e3805f591ed46ba950de..c845fd392f58f0cdf05234cf236206bfcb3299d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -68,8 +68,8 @@ gm107_therm = {
 };
 
 int
-gm107_therm_new(struct nvkm_device *device, int index,
+gm107_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
+	return nvkm_therm_new_(&gm107_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
index 73dc78093d5d5b55fec453075623b65a86e57f05..e0cdd12463ecfa7183da0e90e4015355d0c5d6e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
@@ -32,8 +32,8 @@ gm200_therm = {
 };
 
 int
-gm200_therm_new(struct nvkm_device *device, int index,
+gm200_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&gm200_therm, device, index, ptherm);
+	return nvkm_therm_new_(&gm200_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
index 9f0dea3f61dc42f4453c23031f14de98fa9941f3..44f021392b955d9f708bd1c77475b6680c22dd31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -49,8 +49,8 @@ gp100_therm = {
 };
 
 int
-gp100_therm_new(struct nvkm_device *device, int index,
+gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&gp100_therm, device, index, ptherm);
+	return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index c08097f2aff5028a0bdfc2ec2e4e2512505f1ee8..9e451bd9395c33f19cda5a0ce97d5df7c8b84d37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -68,8 +68,8 @@ gt215_therm = {
 };
 
 int
-gt215_therm_new(struct nvkm_device *device, int index,
-	       struct nvkm_therm **ptherm)
+gt215_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
+	return nvkm_therm_new_(&gt215_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 2c92ffb5f9d0623dd850d42c202110173ac2647e..c13fee9734dff77df9857116bd7d52bc4c95e636 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -197,8 +197,8 @@ nv40_therm = {
 };
 
 int
-nv40_therm_new(struct nvkm_device *device, int index,
+nv40_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	       struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+	return nvkm_therm_new_(&nv40_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 9b57b433d4cf622e0e14433db80c51fa4b51d2e6..9cf16a75a3cdc27929005bb32a9d6af4417d0868 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -169,8 +169,8 @@ nv50_therm = {
 };
 
 int
-nv50_therm_new(struct nvkm_device *device, int index,
+nv50_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 	       struct nvkm_therm **ptherm)
 {
-	return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+	return nvkm_therm_new_(&nv50_therm, device, type, inst, ptherm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 21659daf18649c827aff2369dab63fb0230cd87e..54e960589411e213d18d2e08237afa2468346986 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -30,10 +30,10 @@
 #include <subdev/bios/gpio.h>
 #include <subdev/bios/perf.h>
 
-int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
-		    int index, struct nvkm_therm **);
-void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
-		     int index, const struct nvkm_therm_func *func);
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		    int, struct nvkm_therm **);
+void nvkm_therm_ctor(struct nvkm_therm *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		     const struct nvkm_therm_func *);
 
 struct nvkm_fan {
 	struct nvkm_therm *parent;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index dd922033628c236cfec38f3e91d49aa9ba886e78..8b0da0c062685d7f22e1558937e33255a5469998 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -183,14 +183,14 @@ nvkm_timer = {
 
 int
 nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
-		int index, struct nvkm_timer **ptmr)
+		enum nvkm_subdev_type type, int inst, struct nvkm_timer **ptmr)
 {
 	struct nvkm_timer *tmr;
 
 	if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
+	nvkm_subdev_ctor(&nvkm_timer, device, type, inst, &tmr->subdev);
 	tmr->func = func;
 	INIT_LIST_HEAD(&tmr->alarms);
 	spin_lock_init(&tmr->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 9ed5f64912d0dd09168fe44fa1472b2123ee5460..73c3776b6b83c1676c11c640dd0699d525e6ed7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -33,7 +33,8 @@ gk20a_timer = {
 };
 
 int
-gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+gk20a_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+		struct nvkm_timer **ptmr)
 {
-	return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
+	return nvkm_timer_new_(&gk20a_timer, device, type, inst, ptmr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7f48249f41decea964d62790601b6be5d6d78f4c..0058e856b378dfbdbc102b9ec5afc99bd247558f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -145,7 +145,8 @@ nv04_timer = {
 };
 
 int
-nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv04_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_timer **ptmr)
 {
-	return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
+	return nvkm_timer_new_(&nv04_timer, device, type, inst, ptmr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
index bb99a152f26e6ef24ae1ea643f97dba785867cf3..7e1f8c22f2a83988dc38feb4b7e94a63235ca809 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -82,7 +82,8 @@ nv40_timer = {
 };
 
 int
-nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv40_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_timer **ptmr)
 {
-	return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+	return nvkm_timer_new_(&nv40_timer, device, type, inst, ptmr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
index 3cf9ec1b1b57cc0e23311b3aaa590d7462495571..c2b263721f10c7b36f89eaabfc6435ccb7e4c708 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -79,7 +79,8 @@ nv41_timer = {
 };
 
 int
-nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv41_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_timer **ptmr)
 {
-	return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+	return nvkm_timer_new_(&nv41_timer, device, type, inst, ptmr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 89e97294b182045e00c4d53a1df0bcb5b7dcf84e..e6debe7e2fa95e848102f1e81bed01062ed2e010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -4,8 +4,8 @@
 #define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
 #include <subdev/timer.h>
 
-int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
-		    int index, struct nvkm_timer **);
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *, enum nvkm_subdev_type,
+		    int, struct nvkm_timer **);
 
 struct nvkm_timer_func {
 	void (*init)(struct nvkm_timer *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
index 438d9d78ab52859094a44bed6f23fea7ee86bd13..d5db845195dca72733d726e37abe291d32360cf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/subdev/top/base.o
 nvkm-y += nvkm/subdev/top/gk104.o
+nvkm-y += nvkm/subdev/top/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index cce6e4e90ebf804ffb699371e0cf3a4b8fb55a95..28d0789f50fecca9059087eaba7dafcb1f1de6dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -28,7 +28,8 @@ nvkm_top_device_new(struct nvkm_top *top)
 {
 	struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
 	if (info) {
-		info->index = NVKM_SUBDEV_NR;
+		info->type = NVKM_SUBDEV_NR;
+		info->inst = -1;
 		info->addr = 0;
 		info->fault = -1;
 		info->engine = -1;
@@ -41,14 +42,14 @@ nvkm_top_device_new(struct nvkm_top *top)
 }
 
 u32
-nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
+nvkm_top_addr(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
 	struct nvkm_top *top = device->top;
 	struct nvkm_top_device *info;
 
 	if (top) {
 		list_for_each_entry(info, &top->device, head) {
-			if (info->index == index)
+			if (info->type == type && info->inst == inst)
 				return info->addr;
 		}
 	}
@@ -57,14 +58,14 @@ nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
 }
 
 u32
-nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
 	struct nvkm_top *top = device->top;
 	struct nvkm_top_device *info;
 
 	if (top) {
 		list_for_each_entry(info, &top->device, head) {
-			if (info->index == index && info->reset >= 0)
+			if (info->type == type && info->inst == inst && info->reset >= 0)
 				return BIT(info->reset);
 		}
 	}
@@ -73,14 +74,14 @@ nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
 }
 
 u32
-nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
 	struct nvkm_top *top = device->top;
 	struct nvkm_top_device *info;
 
 	if (top) {
 		list_for_each_entry(info, &top->device, head) {
-			if (info->index == devidx && info->intr >= 0)
+			if (info->type == type && info->inst == inst && info->intr >= 0)
 				return BIT(info->intr);
 		}
 	}
@@ -88,44 +89,21 @@ nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
 	return 0;
 }
 
-u32
-nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
-{
-	struct nvkm_top *top = device->top;
-	struct nvkm_top_device *info;
-	u64 subdevs = 0;
-	u32 handled = 0;
-
-	if (top) {
-		list_for_each_entry(info, &top->device, head) {
-			if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
-				if (intr & BIT(info->intr)) {
-					subdevs |= BIT_ULL(info->index);
-					handled |= BIT(info->intr);
-				}
-			}
-		}
-	}
-
-	*psubdevs = subdevs;
-	return intr & ~handled;
-}
-
 int
-nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
 {
 	struct nvkm_top *top = device->top;
 	struct nvkm_top_device *info;
 
 	list_for_each_entry(info, &top->device, head) {
-		if (info->index == devidx && info->fault >= 0)
+		if (info->type == type && info->inst == inst && info->fault >= 0)
 			return info->fault;
 	}
 
 	return -ENOENT;
 }
 
-enum nvkm_devidx
+struct nvkm_subdev *
 nvkm_top_fault(struct nvkm_device *device, int fault)
 {
 	struct nvkm_top *top = device->top;
@@ -133,28 +111,10 @@ nvkm_top_fault(struct nvkm_device *device, int fault)
 
 	list_for_each_entry(info, &top->device, head) {
 		if (info->fault == fault)
-			return info->index;
-	}
-
-	return NVKM_SUBDEV_NR;
-}
-
-enum nvkm_devidx
-nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
-{
-	struct nvkm_top *top = device->top;
-	struct nvkm_top_device *info;
-	int n = 0;
-
-	list_for_each_entry(info, &top->device, head) {
-		if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
-			*runl = info->runlist;
-			*engn = info->engine;
-			return info->index;
-		}
+			return nvkm_device_subdev(device, info->type, info->inst);
 	}
 
-	return -ENODEV;
+	return NULL;
 }
 
 static int
@@ -186,12 +146,12 @@ nvkm_top = {
 
 int
 nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_top **ptop)
+	      enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop)
 {
 	struct nvkm_top *top;
 	if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
+	nvkm_subdev_ctor(&nvkm_top, device, type, inst, &top->subdev);
 	top->func = func;
 	INIT_LIST_HEAD(&top->device);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
new file mode 100644
index 0000000000000000000000000000000000000000..31933f3e5a076b36e14fadf75cbef8f39a7815fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static int
+ga100_top_oneinit(struct nvkm_top *top)
+{
+	struct nvkm_subdev *subdev = &top->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_top_device *info = NULL;
+	u32 data, type, inst;
+	int i, n, size = nvkm_rd32(device, 0x0224fc) >> 20;
+
+	for (i = 0, n = 0; i < size; i++) {
+		if (!info) {
+			if (!(info = nvkm_top_device_new(top)))
+				return -ENOMEM;
+			type = ~0;
+			inst = 0;
+		}
+
+		data = nvkm_rd32(device, 0x022800 + (i * 0x04));
+		nvkm_trace(subdev, "%02x: %08x\n", i, data);
+		if (!data && n == 0)
+			continue;
+
+		switch (n++) {
+		case 0:
+			type	      = (data & 0x3f000000) >> 24;
+			inst	      = (data & 0x000f0000) >> 16;
+			info->fault   = (data & 0x0000007f);
+			break;
+		case 1:
+			info->addr    = (data & 0x00fff000);
+			info->reset   = (data & 0x0000001f);
+			break;
+		case 2:
+			info->runlist = (data & 0x0000fc00) >> 10;
+			info->engine  = (data & 0x00000003);
+			break;
+		default:
+			break;
+		}
+
+		if (data & 0x80000000)
+			continue;
+		n = 0;
+
+		/* Translate engine type to NVKM engine identifier. */
+#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
+#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
+		switch (type) {
+		case 0x00000000: O_(NVKM_ENGINE_GR    ,    0); break;
+		case 0x0000000d: O_(NVKM_ENGINE_SEC2  ,    0); break;
+		case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
+		case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
+		case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
+		case 0x00000013: I_(NVKM_ENGINE_CE    , inst); break;
+		case 0x00000014: O_(NVKM_SUBDEV_GSP   ,    0); break;
+		case 0x00000015: O_(NVKM_ENGINE_NVJPG ,    0); break;
+		case 0x00000016: O_(NVKM_ENGINE_OFA   ,    0); break;
+		case 0x00000017: O_(NVKM_SUBDEV_FLA   ,    0); break;
+			break;
+		default:
+			break;
+		}
+
+		nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+				   "runlist %2d engine %2d reset %2d\n", type, inst,
+			   info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
+			   info->addr, info->fault, info->runlist, info->engine, info->reset);
+		info = NULL;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_top_func
+ga100_top = {
+	.oneinit = ga100_top_oneinit,
+};
+
+int
+ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_top **ptop)
+{
+	return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 1156634533f9751277d3c656a827fce393a88965..4dcad97bd505bd12fb17b44026b5fe40906a4bd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -70,26 +70,26 @@ gk104_top_oneinit(struct nvkm_top *top)
 			continue;
 
 		/* Translate engine type to NVKM engine identifier. */
-#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
-#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1)      \
-		info->index = NVKM_ENGINE_##A##0 + inst
-#define C_(A) if (inst == 0) info->index = NVKM_SUBDEV_##A
+#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
+#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
 		switch (type) {
-		case 0x00000000: A_(GR    ); break;
-		case 0x00000001: A_(CE0   ); break;
-		case 0x00000002: A_(CE1   ); break;
-		case 0x00000003: A_(CE2   ); break;
-		case 0x00000008: A_(MSPDEC); break;
-		case 0x00000009: A_(MSPPP ); break;
-		case 0x0000000a: A_(MSVLD ); break;
-		case 0x0000000b: A_(MSENC ); break;
-		case 0x0000000c: A_(VIC   ); break;
-		case 0x0000000d: A_(SEC2  ); break;
-		case 0x0000000e: B_(NVENC ); break;
-		case 0x0000000f: A_(NVENC1); break;
-		case 0x00000010: B_(NVDEC ); break;
-		case 0x00000013: B_(CE    ); break;
-		case 0x00000014: C_(GSP   ); break;
+		case 0x00000000: O_(NVKM_ENGINE_GR    ,    0); break;
+		case 0x00000001: O_(NVKM_ENGINE_CE    ,    0); break;
+		case 0x00000002: O_(NVKM_ENGINE_CE    ,    1); break;
+		case 0x00000003: O_(NVKM_ENGINE_CE    ,    2); break;
+		case 0x00000008: O_(NVKM_ENGINE_MSPDEC,    0); break;
+		case 0x00000009: O_(NVKM_ENGINE_MSPPP ,    0); break;
+		case 0x0000000a: O_(NVKM_ENGINE_MSVLD ,    0); break;
+		case 0x0000000b: O_(NVKM_ENGINE_MSENC ,    0); break;
+		case 0x0000000c: O_(NVKM_ENGINE_VIC   ,    0); break;
+		case 0x0000000d: O_(NVKM_ENGINE_SEC2  ,    0); break;
+		case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
+		case 0x0000000f: O_(NVKM_ENGINE_NVENC ,    1); break;
+		case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
+		case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
+		case 0x00000013: I_(NVKM_ENGINE_CE    , inst); break;
+		case 0x00000014: O_(NVKM_SUBDEV_GSP   ,    0); break;
+		case 0x00000015: O_(NVKM_ENGINE_NVJPG ,    0); break;
 		default:
 			break;
 		}
@@ -97,8 +97,7 @@ gk104_top_oneinit(struct nvkm_top *top)
 		nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
 				   "engine %2d runlist %2d intr %2d "
 				   "reset %2d\n", type, inst,
-			   info->index == NVKM_SUBDEV_NR ? NULL :
-					  nvkm_subdev_name[info->index],
+			   info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
 			   info->addr, info->fault, info->engine, info->runlist,
 			   info->intr, info->reset);
 		info = NULL;
@@ -113,7 +112,8 @@ gk104_top = {
 };
 
 int
-gk104_top_new(struct nvkm_device *device, int index, struct nvkm_top **ptop)
+gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_top **ptop)
 {
-	return nvkm_top_new_(&gk104_top, device, index, ptop);
+	return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
index a16baa2941cf45ed0d7d2adc75af368e9118dbf4..8e103a83670591caeef823abeb33b970afe27511 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -8,19 +8,8 @@ struct nvkm_top_func {
 	int (*oneinit)(struct nvkm_top *);
 };
 
-int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *,
-		  int, struct nvkm_top **);
-
-struct nvkm_top_device {
-	enum nvkm_devidx index;
-	u32 addr;
-	int fault;
-	int engine;
-	int runlist;
-	int reset;
-	int intr;
-	struct list_head head;
-};
+int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		  struct nvkm_top **);
 
 struct nvkm_top_device *nvkm_top_device_new(struct nvkm_top *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index e344901cfdc7fcdbe02f9e0f2d32be0a1ef18a90..a17a6dd8d3de9066df9d8f3e074f3b5f43be8e29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -281,12 +281,12 @@ nvkm_volt = {
 
 void
 nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_volt *volt)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_volt *volt)
 {
 	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nvkm_subdev_ctor(&nvkm_volt, device, index, &volt->subdev);
+	nvkm_subdev_ctor(&nvkm_volt, device, type, inst, &volt->subdev);
 	volt->func = func;
 
 	/* Assuming the non-bios device should build the voltage table later */
@@ -319,10 +319,10 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
 
 int
 nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
-	       int index, struct nvkm_volt **pvolt)
+	       enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
 {
 	if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_volt_ctor(func, device, index, *pvolt);
+	nvkm_volt_ctor(func, device, type, inst, *pvolt);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
index d9ed6925ca6422c94f802ee109815a4ff5dabf52..b47a1c0817be16de1107b4baa280fb1dc9b9194e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -56,12 +56,13 @@ gf100_volt = {
 };
 
 int
-gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gf100_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_volt **pvolt)
 {
 	struct nvkm_volt *volt;
 	int ret;
 
-	ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+	ret = nvkm_volt_new_(&gf100_volt, device, type, inst, &volt);
 	*pvolt = volt;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
index 547a58f0aeac326b3b7685d5a04fa1956e77b1c6..03c8a2c2916c25437df7b821432f17e1e9a71f5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
@@ -46,12 +46,13 @@ gf117_volt = {
 };
 
 int
-gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gf117_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_volt **pvolt)
 {
 	struct nvkm_volt *volt;
 	int ret;
 
-	ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
+	ret = nvkm_volt_new_(&gf117_volt, device, type, inst, &volt);
 	*pvolt = volt;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 1c744e029454030e04c3003a7db803ae83b59e28..d1ce4309cfb84057872f2d8af9cf52490bf0c33f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -95,7 +95,8 @@ gk104_volt_pwm = {
 };
 
 int
-gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gk104_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_volt **pvolt)
 {
 	const struct nvkm_volt_func *volt_func = &gk104_volt_gpio;
 	struct dcb_gpio_func gpio;
@@ -114,7 +115,7 @@ gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 
 	if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_volt_ctor(volt_func, device, index, &volt->base);
+	nvkm_volt_ctor(volt_func, device, type, inst, &volt->base);
 	*pvolt = &volt->base;
 	volt->bios = bios;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index ce5d83cdc7cf7900cf86060750b9398ef24063d9..8c2faa9645111932c6920befc021d71363d7869c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -144,14 +144,14 @@ gk20a_volt = {
 };
 
 int
-gk20a_volt_ctor(struct nvkm_device *device, int index,
+gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 		const struct cvb_coef *coefs, int nb_coefs,
 		int vmin, struct gk20a_volt *volt)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
 	int i, uv;
 
-	nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+	nvkm_volt_ctor(&gk20a_volt, device, type, inst, &volt->base);
 
 	uv = regulator_get_voltage(tdev->vdd);
 	nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv);
@@ -172,7 +172,7 @@ gk20a_volt_ctor(struct nvkm_device *device, int index,
 }
 
 int
-gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gk20a_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
 {
 	struct gk20a_volt *volt;
 
@@ -181,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 		return -ENOMEM;
 	*pvolt = &volt->base;
 
-	return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+	return gk20a_volt_ctor(device, type, inst, gk20a_cvb_coef,
 			       ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
index 6a6c97f9684e0c1e560d03209fbed77755ac4dac..01f8a5fcf496162dfb2b92f07d44181461a0d586 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -37,7 +37,7 @@ struct gk20a_volt {
 	struct regulator *vdd;
 };
 
-int gk20a_volt_ctor(struct nvkm_device *device, int index,
+int gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type, int,
 		    const struct cvb_coef *coefs, int nb_coefs,
 		    int vmin, struct gk20a_volt *volt);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 2925b9cae6813fe8c67d5c7ced06d06183c203a0..c2e9694d333f4d5a4167d80a26e59b0885a6d046 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -64,7 +64,8 @@ static const u32 speedo_to_vmin[] = {
 };
 
 int
-gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gm20b_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	       struct nvkm_volt **pvolt)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
 	struct gk20a_volt *volt;
@@ -84,9 +85,9 @@ gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 	vmin = speedo_to_vmin[tdev->gpu_speedo_id];
 
 	if (tdev->gpu_speedo_id >= 1)
-		return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
-				     ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+		return gk20a_volt_ctor(device, type, inst, gm20b_na_cvb_coef,
+				       ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
 	else
-		return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
-					ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
+		return gk20a_volt_ctor(device, type, inst, gm20b_cvb_coef,
+				       ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 23409387abb58701f6773b8a7cc1c64326f1aa01..d6a587d6082d96b7f48a4c4b271155450a8d8c76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -30,12 +30,13 @@ nv40_volt = {
 };
 
 int
-nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+nv40_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+	      struct nvkm_volt **pvolt)
 {
 	struct nvkm_volt *volt;
 	int ret;
 
-	ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+	ret = nvkm_volt_new_(&nv40_volt, device, type, inst, &volt);
 	*pvolt = volt;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 75f13a34671fb34db527dc0406ee98f839d66708..24e2d16d19134e249795aee33354a1d92b94da7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -4,10 +4,10 @@
 #define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
 #include <subdev/volt.h>
 
-void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
-		    int index, struct nvkm_volt *);
-int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
-		   int index, struct nvkm_volt **);
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		    struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+		   struct nvkm_volt **);
 
 struct nvkm_volt_func {
 	int (*oneinit)(struct nvkm_volt *);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 5417e7a47072b28c8048bab669e5da5ff3388bc5..e7281da5bc6a2d52d7ed2d190bf1601097b848d9 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,13 +5,129 @@ config DRM_OMAP
 	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
 	select OMAP2_DSS
 	select DRM_KMS_HELPER
+	select VIDEOMODE_HELPERS
+	select HDMI
 	default n
 	help
 	  DRM display driver for OMAP2/3/4 based boards.
 
 if DRM_OMAP
 
-source "drivers/gpu/drm/omapdrm/dss/Kconfig"
-source "drivers/gpu/drm/omapdrm/displays/Kconfig"
+config OMAP2_DSS_DEBUG
+	bool "Debug support"
+	default n
+	help
+	  This enables printing of debug messages. Alternatively, debug messages
+	  can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
+	  appropriate flags in <debugfs>/dynamic_debug/control.
+
+config OMAP2_DSS_DEBUGFS
+	bool "Debugfs filesystem support"
+	depends on DEBUG_FS
+	default n
+	help
+	  This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
+	  querying about clock configuration and register configuration of dss,
+	  dispc, dsi, hdmi and rfbi.
+
+config OMAP2_DSS_COLLECT_IRQ_STATS
+	bool "Collect DSS IRQ statistics"
+	depends on OMAP2_DSS_DEBUGFS
+	default n
+	help
+	  Collect DSS IRQ statistics, printable via debugfs.
+
+	  The statistics can be found from
+	  <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
+	  <debugfs>/omapdss/dsi_irq for DSI interrupts.
+
+config OMAP2_DSS_DPI
+	bool "DPI support"
+	default y
+	help
+	  DPI Interface. This is the Parallel Display Interface.
+
+config OMAP2_DSS_VENC
+	bool "VENC support"
+	default y
+	help
+	  OMAP Video Encoder support for S-Video and composite TV-out.
+
+config OMAP2_DSS_HDMI_COMMON
+	bool
+
+config OMAP4_DSS_HDMI
+	bool "HDMI support for OMAP4"
+	default y
+	select OMAP2_DSS_HDMI_COMMON
+	help
+	  HDMI support for OMAP4 based SoCs.
+
+config OMAP4_DSS_HDMI_CEC
+	bool "Enable HDMI CEC support for OMAP4"
+	depends on OMAP4_DSS_HDMI
+	select CEC_CORE
+	default y
+	help
+	  When selected the HDMI transmitter will support the CEC feature.
+
+config OMAP5_DSS_HDMI
+	bool "HDMI support for OMAP5"
+	default n
+	select OMAP2_DSS_HDMI_COMMON
+	help
+	  HDMI Interface for OMAP5 and similar cores. This adds the High
+	  Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI
+	  specification.
+
+config OMAP2_DSS_SDI
+	bool "SDI support"
+	default n
+	help
+	  SDI (Serial Display Interface) support.
+
+	  SDI is a high speed one-way display serial bus between the host
+	  processor and a display.
+
+config OMAP2_DSS_DSI
+	bool "DSI support"
+	default n
+	select DRM_MIPI_DSI
+	help
+	  MIPI DSI (Display Serial Interface) support.
+
+	  DSI is a high speed half-duplex serial interface between the host
+	  processor and a peripheral, such as a display or a framebuffer chip.
+
+	  See http://www.mipi.org/ for DSI specifications.
+
+config OMAP2_DSS_MIN_FCK_PER_PCK
+	int "Minimum FCK/PCK ratio (for scaling)"
+	range 0 32
+	default 0
+	help
+	  This can be used to adjust the minimum FCK/PCK ratio.
+
+	  With this you can make sure that DISPC FCK is at least
+	  n x PCK. Video plane scaling requires higher FCK than
+	  normally.
+
+	  If this is set to 0, there's no extra constraint on the
+	  DISPC FCK. However, the FCK will at minimum be
+	  2xPCK (if active matrix) or 3xPCK (if passive matrix).
+
+	  Max FCK is 173MHz, so this doesn't work if your PCK
+	  is very high.
+
+config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
+	bool "Sleep 20ms after VENC reset"
+	default y
+	help
+	  There is a 20ms sleep after VENC reset which seemed to fix the
+	  reset. The reason for the bug is unclear, and it's also unclear
+	  on what platforms this happens.
+
+	  This option enables the sleep, and is enabled by default. You can
+	  disable the sleep if it doesn't cause problems on your platform.
 
 endif
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index f115253115c59abc542a206ed168cfaf5b9995de..21e8277ff88f41a283b5480864388d9ca3db4935 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -4,16 +4,12 @@
 # Direct Rendering Infrastructure (DRI)
 #
 
-obj-y += dss/
-obj-y += displays/
-
 omapdrm-y := omap_drv.o \
 	omap_irq.o \
 	omap_debugfs.o \
 	omap_crtc.o \
 	omap_plane.o \
 	omap_encoder.o \
-	omap_connector.o \
 	omap_fb.o \
 	omap_gem.o \
 	omap_gem_dmabuf.o \
@@ -22,4 +18,17 @@ omapdrm-y := omap_drv.o \
 
 omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
 
-obj-$(CONFIG_DRM_OMAP)	+= omapdrm.o
+omapdrm-y += dss/base.o dss/output.o dss/dss.o dss/dispc.o \
+		dss/dispc_coefs.o dss/pll.o dss/video-pll.o
+omapdrm-$(CONFIG_OMAP2_DSS_DPI) += dss/dpi.o
+omapdrm-$(CONFIG_OMAP2_DSS_VENC) += dss/venc.o
+omapdrm-$(CONFIG_OMAP2_DSS_SDI) += dss/sdi.o
+omapdrm-$(CONFIG_OMAP2_DSS_DSI) += dss/dsi.o
+omapdrm-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += dss/hdmi_common.o dss/hdmi_wp.o \
+		dss/hdmi_pll.o dss/hdmi_phy.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI) += dss/hdmi4.o dss/hdmi4_core.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI_CEC) += dss/hdmi4_cec.o
+omapdrm-$(CONFIG_OMAP5_DSS_HDMI) += dss/hdmi5.o dss/hdmi5_core.o
+ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
deleted file mode 100644
index f2be594c7eff8fa58bbdbf49566dc074b1fb5cc7..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "OMAPDRM External Display Device Drivers"
-
-config DRM_OMAP_PANEL_DSI_CM
-	tristate "Generic DSI Command Mode Panel"
-	depends on BACKLIGHT_CLASS_DEVICE
-	help
-	  Driver for generic DSI command mode panels.
-
-endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
deleted file mode 100644
index 488ddf15361380dee9b549752a645387e01a3dc1..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
deleted file mode 100644
index e39ce0c0c9a9c314f06b62246f20d0832b647320..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ /dev/null
@@ -1,1385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic DSI Command Mode panel driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-/* #define DEBUG */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/of_device.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_connector.h>
-
-#include <video/mipi_display.h>
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-/* DSI Virtual channel. Hardcoded for now. */
-#define TCH 0
-
-#define DCS_READ_NUM_ERRORS	0x05
-#define DCS_BRIGHTNESS		0x51
-#define DCS_CTRL_DISPLAY	0x53
-#define DCS_GET_ID1		0xda
-#define DCS_GET_ID2		0xdb
-#define DCS_GET_ID3		0xdc
-
-struct panel_drv_data {
-	struct omap_dss_device dssdev;
-	struct omap_dss_device *src;
-
-	struct videomode vm;
-
-	struct platform_device *pdev;
-
-	struct mutex lock;
-
-	struct backlight_device *bldev;
-	struct backlight_device *extbldev;
-
-	unsigned long	hw_guard_end;	/* next value of jiffies when we can
-					 * issue the next sleep in/out command
-					 */
-	unsigned long	hw_guard_wait;	/* max guard time in jiffies */
-
-	/* panel HW configuration from DT or platform data */
-	struct gpio_desc *reset_gpio;
-	struct gpio_desc *ext_te_gpio;
-
-	struct regulator *vpnl;
-	struct regulator *vddi;
-
-	bool use_dsi_backlight;
-
-	int width_mm;
-	int height_mm;
-
-	struct omap_dsi_pin_config pin_config;
-
-	/* runtime variables */
-	bool enabled;
-
-	bool te_enabled;
-
-	atomic_t do_update;
-	int channel;
-
-	struct delayed_work te_timeout_work;
-
-	bool intro_printed;
-
-	struct workqueue_struct *workqueue;
-
-	bool ulps_enabled;
-	unsigned int ulps_timeout;
-	struct delayed_work ulps_work;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static irqreturn_t dsicm_te_isr(int irq, void *data);
-static void dsicm_te_timeout_work_callback(struct work_struct *work);
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable);
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata);
-
-static void dsicm_ulps_work(struct work_struct *work);
-
-static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
-{
-	struct backlight_device *backlight;
-
-	if (ddata->bldev)
-		backlight = ddata->bldev;
-	else if (ddata->extbldev)
-		backlight = ddata->extbldev;
-	else
-		return;
-
-	if (enable) {
-		backlight->props.fb_blank = FB_BLANK_UNBLANK;
-		backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
-		backlight->props.power = FB_BLANK_UNBLANK;
-	} else {
-		backlight->props.fb_blank = FB_BLANK_NORMAL;
-		backlight->props.power = FB_BLANK_POWERDOWN;
-		backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
-	}
-
-	backlight_update_status(backlight);
-}
-
-static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
-{
-	ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
-	ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct panel_drv_data *ddata)
-{
-	unsigned long wait = ddata->hw_guard_end - jiffies;
-
-	if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
-		schedule_timeout(wait);
-	}
-}
-
-static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
-{
-	struct omap_dss_device *src = ddata->src;
-	int r;
-	u8 buf[1];
-
-	r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1);
-
-	if (r < 0)
-		return r;
-
-	*data = buf[0];
-
-	return 0;
-}
-
-static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
-{
-	struct omap_dss_device *src = ddata->src;
-
-	return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
-}
-
-static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
-{
-	struct omap_dss_device *src = ddata->src;
-	u8 buf[2] = { dcs_cmd, param };
-
-	return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
-}
-
-static int dsicm_sleep_in(struct panel_drv_data *ddata)
-
-{
-	struct omap_dss_device *src = ddata->src;
-	u8 cmd;
-	int r;
-
-	hw_guard_wait(ddata);
-
-	cmd = MIPI_DCS_ENTER_SLEEP_MODE;
-	r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1);
-	if (r)
-		return r;
-
-	hw_guard_start(ddata, 120);
-
-	usleep_range(5000, 10000);
-
-	return 0;
-}
-
-static int dsicm_sleep_out(struct panel_drv_data *ddata)
-{
-	int r;
-
-	hw_guard_wait(ddata);
-
-	r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE);
-	if (r)
-		return r;
-
-	hw_guard_start(ddata, 120);
-
-	usleep_range(5000, 10000);
-
-	return 0;
-}
-
-static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
-{
-	int r;
-
-	r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
-	if (r)
-		return r;
-	r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
-	if (r)
-		return r;
-	r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
-	if (r)
-		return r;
-
-	return 0;
-}
-
-static int dsicm_set_update_window(struct panel_drv_data *ddata,
-		u16 x, u16 y, u16 w, u16 h)
-{
-	struct omap_dss_device *src = ddata->src;
-	int r;
-	u16 x1 = x;
-	u16 x2 = x + w - 1;
-	u16 y1 = y;
-	u16 y2 = y + h - 1;
-
-	u8 buf[5];
-	buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
-	buf[1] = (x1 >> 8) & 0xff;
-	buf[2] = (x1 >> 0) & 0xff;
-	buf[3] = (x2 >> 8) & 0xff;
-	buf[4] = (x2 >> 0) & 0xff;
-
-	r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
-	if (r)
-		return r;
-
-	buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
-	buf[1] = (y1 >> 8) & 0xff;
-	buf[2] = (y1 >> 0) & 0xff;
-	buf[3] = (y2 >> 8) & 0xff;
-	buf[4] = (y2 >> 0) & 0xff;
-
-	r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
-	if (r)
-		return r;
-
-	src->ops->dsi.bta_sync(src, ddata->channel);
-
-	return r;
-}
-
-static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
-{
-	if (ddata->ulps_timeout > 0)
-		queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
-				msecs_to_jiffies(ddata->ulps_timeout));
-}
-
-static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
-{
-	cancel_delayed_work(&ddata->ulps_work);
-}
-
-static int dsicm_enter_ulps(struct panel_drv_data *ddata)
-{
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	if (ddata->ulps_enabled)
-		return 0;
-
-	dsicm_cancel_ulps_work(ddata);
-
-	r = _dsicm_enable_te(ddata, false);
-	if (r)
-		goto err;
-
-	if (ddata->ext_te_gpio)
-		disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
-	src->ops->dsi.disable(src, false, true);
-
-	ddata->ulps_enabled = true;
-
-	return 0;
-
-err:
-	dev_err(&ddata->pdev->dev, "enter ULPS failed");
-	dsicm_panel_reset(ddata);
-
-	ddata->ulps_enabled = false;
-
-	dsicm_queue_ulps_work(ddata);
-
-	return r;
-}
-
-static int dsicm_exit_ulps(struct panel_drv_data *ddata)
-{
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	if (!ddata->ulps_enabled)
-		return 0;
-
-	src->ops->enable(src);
-	src->ops->dsi.enable_hs(src, ddata->channel, true);
-
-	r = _dsicm_enable_te(ddata, true);
-	if (r) {
-		dev_err(&ddata->pdev->dev, "failed to re-enable TE");
-		goto err2;
-	}
-
-	if (ddata->ext_te_gpio)
-		enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
-	dsicm_queue_ulps_work(ddata);
-
-	ddata->ulps_enabled = false;
-
-	return 0;
-
-err2:
-	dev_err(&ddata->pdev->dev, "failed to exit ULPS");
-
-	r = dsicm_panel_reset(ddata);
-	if (!r) {
-		if (ddata->ext_te_gpio)
-			enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-		ddata->ulps_enabled = false;
-	}
-
-	dsicm_queue_ulps_work(ddata);
-
-	return r;
-}
-
-static int dsicm_wake_up(struct panel_drv_data *ddata)
-{
-	if (ddata->ulps_enabled)
-		return dsicm_exit_ulps(ddata);
-
-	dsicm_cancel_ulps_work(ddata);
-	dsicm_queue_ulps_work(ddata);
-	return 0;
-}
-
-static int dsicm_bl_update_status(struct backlight_device *dev)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
-	struct omap_dss_device *src = ddata->src;
-	int r = 0;
-	int level;
-
-	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
-			dev->props.power == FB_BLANK_UNBLANK)
-		level = dev->props.brightness;
-	else
-		level = 0;
-
-	dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
-
-	mutex_lock(&ddata->lock);
-
-	if (ddata->enabled) {
-		src->ops->dsi.bus_lock(src);
-
-		r = dsicm_wake_up(ddata);
-		if (!r)
-			r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
-
-		src->ops->dsi.bus_unlock(src);
-	}
-
-	mutex_unlock(&ddata->lock);
-
-	return r;
-}
-
-static int dsicm_bl_get_intensity(struct backlight_device *dev)
-{
-	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
-			dev->props.power == FB_BLANK_UNBLANK)
-		return dev->props.brightness;
-
-	return 0;
-}
-
-static const struct backlight_ops dsicm_bl_ops = {
-	.get_brightness = dsicm_bl_get_intensity,
-	.update_status  = dsicm_bl_update_status,
-};
-
-static ssize_t dsicm_num_errors_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(dev);
-	struct omap_dss_device *src = ddata->src;
-	u8 errors = 0;
-	int r;
-
-	mutex_lock(&ddata->lock);
-
-	if (ddata->enabled) {
-		src->ops->dsi.bus_lock(src);
-
-		r = dsicm_wake_up(ddata);
-		if (!r)
-			r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
-					&errors);
-
-		src->ops->dsi.bus_unlock(src);
-	} else {
-		r = -ENODEV;
-	}
-
-	mutex_unlock(&ddata->lock);
-
-	if (r)
-		return r;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", errors);
-}
-
-static ssize_t dsicm_hw_revision_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(dev);
-	struct omap_dss_device *src = ddata->src;
-	u8 id1, id2, id3;
-	int r;
-
-	mutex_lock(&ddata->lock);
-
-	if (ddata->enabled) {
-		src->ops->dsi.bus_lock(src);
-
-		r = dsicm_wake_up(ddata);
-		if (!r)
-			r = dsicm_get_id(ddata, &id1, &id2, &id3);
-
-		src->ops->dsi.bus_unlock(src);
-	} else {
-		r = -ENODEV;
-	}
-
-	mutex_unlock(&ddata->lock);
-
-	if (r)
-		return r;
-
-	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
-}
-
-static ssize_t dsicm_store_ulps(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(dev);
-	struct omap_dss_device *src = ddata->src;
-	unsigned long t;
-	int r;
-
-	r = kstrtoul(buf, 0, &t);
-	if (r)
-		return r;
-
-	mutex_lock(&ddata->lock);
-
-	if (ddata->enabled) {
-		src->ops->dsi.bus_lock(src);
-
-		if (t)
-			r = dsicm_enter_ulps(ddata);
-		else
-			r = dsicm_wake_up(ddata);
-
-		src->ops->dsi.bus_unlock(src);
-	}
-
-	mutex_unlock(&ddata->lock);
-
-	if (r)
-		return r;
-
-	return count;
-}
-
-static ssize_t dsicm_show_ulps(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(dev);
-	unsigned int t;
-
-	mutex_lock(&ddata->lock);
-	t = ddata->ulps_enabled;
-	mutex_unlock(&ddata->lock);
-
-	return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static ssize_t dsicm_store_ulps_timeout(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(dev);
-	struct omap_dss_device *src = ddata->src;
-	unsigned long t;
-	int r;
-
-	r = kstrtoul(buf, 0, &t);
-	if (r)
-		return r;
-
-	mutex_lock(&ddata->lock);
-	ddata->ulps_timeout = t;
-
-	if (ddata->enabled) {
-		/* dsicm_wake_up will restart the timer */
-		src->ops->dsi.bus_lock(src);
-		r = dsicm_wake_up(ddata);
-		src->ops->dsi.bus_unlock(src);
-	}
-
-	mutex_unlock(&ddata->lock);
-
-	if (r)
-		return r;
-
-	return count;
-}
-
-static ssize_t dsicm_show_ulps_timeout(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
-{
-	struct panel_drv_data *ddata = dev_get_drvdata(dev);
-	unsigned int t;
-
-	mutex_lock(&ddata->lock);
-	t = ddata->ulps_timeout;
-	mutex_unlock(&ddata->lock);
-
-	return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
-static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL);
-static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
-		dsicm_show_ulps, dsicm_store_ulps);
-static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
-		dsicm_show_ulps_timeout, dsicm_store_ulps_timeout);
-
-static struct attribute *dsicm_attrs[] = {
-	&dev_attr_num_dsi_errors.attr,
-	&dev_attr_hw_revision.attr,
-	&dev_attr_ulps.attr,
-	&dev_attr_ulps_timeout.attr,
-	NULL,
-};
-
-static const struct attribute_group dsicm_attr_group = {
-	.attrs = dsicm_attrs,
-};
-
-static void dsicm_hw_reset(struct panel_drv_data *ddata)
-{
-	gpiod_set_value(ddata->reset_gpio, 1);
-	udelay(10);
-	/* reset the panel */
-	gpiod_set_value(ddata->reset_gpio, 0);
-	/* assert reset */
-	udelay(10);
-	gpiod_set_value(ddata->reset_gpio, 1);
-	/* wait after releasing reset */
-	usleep_range(5000, 10000);
-}
-
-static int dsicm_power_on(struct panel_drv_data *ddata)
-{
-	struct omap_dss_device *src = ddata->src;
-	u8 id1, id2, id3;
-	int r;
-	struct omap_dss_dsi_config dsi_config = {
-		.mode = OMAP_DSS_DSI_CMD_MODE,
-		.pixel_format = OMAP_DSS_DSI_FMT_RGB888,
-		.vm = &ddata->vm,
-		.hs_clk_min = 150000000,
-		.hs_clk_max = 300000000,
-		.lp_clk_min = 7000000,
-		.lp_clk_max = 10000000,
-	};
-
-	if (ddata->vpnl) {
-		r = regulator_enable(ddata->vpnl);
-		if (r) {
-			dev_err(&ddata->pdev->dev,
-				"failed to enable VPNL: %d\n", r);
-			return r;
-		}
-	}
-
-	if (ddata->vddi) {
-		r = regulator_enable(ddata->vddi);
-		if (r) {
-			dev_err(&ddata->pdev->dev,
-				"failed to enable VDDI: %d\n", r);
-			goto err_vpnl;
-		}
-	}
-
-	if (ddata->pin_config.num_pins > 0) {
-		r = src->ops->dsi.configure_pins(src, &ddata->pin_config);
-		if (r) {
-			dev_err(&ddata->pdev->dev,
-				"failed to configure DSI pins\n");
-			goto err_vddi;
-		}
-	}
-
-	r = src->ops->dsi.set_config(src, &dsi_config);
-	if (r) {
-		dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
-		goto err_vddi;
-	}
-
-	src->ops->enable(src);
-
-	dsicm_hw_reset(ddata);
-
-	src->ops->dsi.enable_hs(src, ddata->channel, false);
-
-	r = dsicm_sleep_out(ddata);
-	if (r)
-		goto err;
-
-	r = dsicm_get_id(ddata, &id1, &id2, &id3);
-	if (r)
-		goto err;
-
-	r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff);
-	if (r)
-		goto err;
-
-	r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY,
-			(1<<2) | (1<<5));	/* BL | BCTRL */
-	if (r)
-		goto err;
-
-	r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT,
-		MIPI_DCS_PIXEL_FMT_24BIT);
-	if (r)
-		goto err;
-
-	r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON);
-	if (r)
-		goto err;
-
-	r = _dsicm_enable_te(ddata, ddata->te_enabled);
-	if (r)
-		goto err;
-
-	r = src->ops->dsi.enable_video_output(src, ddata->channel);
-	if (r)
-		goto err;
-
-	ddata->enabled = true;
-
-	if (!ddata->intro_printed) {
-		dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
-			id1, id2, id3);
-		ddata->intro_printed = true;
-	}
-
-	src->ops->dsi.enable_hs(src, ddata->channel, true);
-
-	return 0;
-err:
-	dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n");
-
-	dsicm_hw_reset(ddata);
-
-	src->ops->dsi.disable(src, true, false);
-err_vddi:
-	if (ddata->vddi)
-		regulator_disable(ddata->vddi);
-err_vpnl:
-	if (ddata->vpnl)
-		regulator_disable(ddata->vpnl);
-
-	return r;
-}
-
-static void dsicm_power_off(struct panel_drv_data *ddata)
-{
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	src->ops->dsi.disable_video_output(src, ddata->channel);
-
-	r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
-	if (!r)
-		r = dsicm_sleep_in(ddata);
-
-	if (r) {
-		dev_err(&ddata->pdev->dev,
-				"error disabling panel, issuing HW reset\n");
-		dsicm_hw_reset(ddata);
-	}
-
-	src->ops->dsi.disable(src, true, false);
-
-	if (ddata->vddi)
-		regulator_disable(ddata->vddi);
-	if (ddata->vpnl)
-		regulator_disable(ddata->vpnl);
-
-	ddata->enabled = false;
-}
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata)
-{
-	dev_err(&ddata->pdev->dev, "performing LCD reset\n");
-
-	dsicm_power_off(ddata);
-	dsicm_hw_reset(ddata);
-	return dsicm_power_on(ddata);
-}
-
-static int dsicm_connect(struct omap_dss_device *src,
-			 struct omap_dss_device *dst)
-{
-	struct panel_drv_data *ddata = to_panel_data(dst);
-	struct device *dev = &ddata->pdev->dev;
-	int r;
-
-	r = src->ops->dsi.request_vc(src, &ddata->channel);
-	if (r) {
-		dev_err(dev, "failed to get virtual channel\n");
-		return r;
-	}
-
-	r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH);
-	if (r) {
-		dev_err(dev, "failed to set VC_ID\n");
-		src->ops->dsi.release_vc(src, ddata->channel);
-		return r;
-	}
-
-	ddata->src = src;
-	return 0;
-}
-
-static void dsicm_disconnect(struct omap_dss_device *src,
-			     struct omap_dss_device *dst)
-{
-	struct panel_drv_data *ddata = to_panel_data(dst);
-
-	src->ops->dsi.release_vc(src, ddata->channel);
-	ddata->src = NULL;
-}
-
-static void dsicm_enable(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	mutex_lock(&ddata->lock);
-
-	src->ops->dsi.bus_lock(src);
-
-	r = dsicm_power_on(ddata);
-
-	src->ops->dsi.bus_unlock(src);
-
-	if (r)
-		goto err;
-
-	mutex_unlock(&ddata->lock);
-
-	dsicm_bl_power(ddata, true);
-
-	return;
-err:
-	dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
-	mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_disable(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	dsicm_bl_power(ddata, false);
-
-	mutex_lock(&ddata->lock);
-
-	dsicm_cancel_ulps_work(ddata);
-
-	src->ops->dsi.bus_lock(src);
-
-	r = dsicm_wake_up(ddata);
-	if (!r)
-		dsicm_power_off(ddata);
-
-	src->ops->dsi.bus_unlock(src);
-
-	mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_framedone_cb(int err, void *data)
-{
-	struct panel_drv_data *ddata = data;
-	struct omap_dss_device *src = ddata->src;
-
-	dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
-	src->ops->dsi.bus_unlock(src);
-}
-
-static irqreturn_t dsicm_te_isr(int irq, void *data)
-{
-	struct panel_drv_data *ddata = data;
-	struct omap_dss_device *src = ddata->src;
-	int old;
-	int r;
-
-	old = atomic_cmpxchg(&ddata->do_update, 1, 0);
-
-	if (old) {
-		cancel_delayed_work(&ddata->te_timeout_work);
-
-		r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
-				ddata);
-		if (r)
-			goto err;
-	}
-
-	return IRQ_HANDLED;
-err:
-	dev_err(&ddata->pdev->dev, "start update failed\n");
-	src->ops->dsi.bus_unlock(src);
-	return IRQ_HANDLED;
-}
-
-static void dsicm_te_timeout_work_callback(struct work_struct *work)
-{
-	struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
-					te_timeout_work.work);
-	struct omap_dss_device *src = ddata->src;
-
-	dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
-
-	atomic_set(&ddata->do_update, 0);
-	src->ops->dsi.bus_unlock(src);
-}
-
-static int dsicm_update(struct omap_dss_device *dssdev,
-				    u16 x, u16 y, u16 w, u16 h)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
-
-	mutex_lock(&ddata->lock);
-	src->ops->dsi.bus_lock(src);
-
-	r = dsicm_wake_up(ddata);
-	if (r)
-		goto err;
-
-	if (!ddata->enabled) {
-		r = 0;
-		goto err;
-	}
-
-	/* XXX no need to send this every frame, but dsi break if not done */
-	r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive,
-				    ddata->vm.vactive);
-	if (r)
-		goto err;
-
-	if (ddata->te_enabled && ddata->ext_te_gpio) {
-		schedule_delayed_work(&ddata->te_timeout_work,
-				msecs_to_jiffies(250));
-		atomic_set(&ddata->do_update, 1);
-	} else {
-		r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
-				ddata);
-		if (r)
-			goto err;
-	}
-
-	/* note: no bus_unlock here. unlock is src framedone_cb */
-	mutex_unlock(&ddata->lock);
-	return 0;
-err:
-	src->ops->dsi.bus_unlock(src);
-	mutex_unlock(&ddata->lock);
-	return r;
-}
-
-static int dsicm_sync(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	struct omap_dss_device *src = ddata->src;
-
-	dev_dbg(&ddata->pdev->dev, "sync\n");
-
-	mutex_lock(&ddata->lock);
-	src->ops->dsi.bus_lock(src);
-	src->ops->dsi.bus_unlock(src);
-	mutex_unlock(&ddata->lock);
-
-	dev_dbg(&ddata->pdev->dev, "sync done\n");
-
-	return 0;
-}
-
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
-{
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	if (enable)
-		r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0);
-	else
-		r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
-
-	if (!ddata->ext_te_gpio)
-		src->ops->dsi.enable_te(src, enable);
-
-	/* possible panel bug */
-	msleep(100);
-
-	return r;
-}
-
-static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	struct omap_dss_device *src = ddata->src;
-	int r;
-
-	mutex_lock(&ddata->lock);
-
-	if (ddata->te_enabled == enable)
-		goto end;
-
-	src->ops->dsi.bus_lock(src);
-
-	if (ddata->enabled) {
-		r = dsicm_wake_up(ddata);
-		if (r)
-			goto err;
-
-		r = _dsicm_enable_te(ddata, enable);
-		if (r)
-			goto err;
-	}
-
-	ddata->te_enabled = enable;
-
-	src->ops->dsi.bus_unlock(src);
-end:
-	mutex_unlock(&ddata->lock);
-
-	return 0;
-err:
-	src->ops->dsi.bus_unlock(src);
-	mutex_unlock(&ddata->lock);
-
-	return r;
-}
-
-static int dsicm_get_te(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	int r;
-
-	mutex_lock(&ddata->lock);
-	r = ddata->te_enabled;
-	mutex_unlock(&ddata->lock);
-
-	return r;
-}
-
-static int dsicm_memory_read(struct omap_dss_device *dssdev,
-		void *buf, size_t size,
-		u16 x, u16 y, u16 w, u16 h)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	struct omap_dss_device *src = ddata->src;
-	int r;
-	int first = 1;
-	int plen;
-	unsigned int buf_used = 0;
-
-	if (size < w * h * 3)
-		return -ENOMEM;
-
-	mutex_lock(&ddata->lock);
-
-	if (!ddata->enabled) {
-		r = -ENODEV;
-		goto err1;
-	}
-
-	size = min((u32)w * h * 3,
-		   ddata->vm.hactive * ddata->vm.vactive * 3);
-
-	src->ops->dsi.bus_lock(src);
-
-	r = dsicm_wake_up(ddata);
-	if (r)
-		goto err2;
-
-	/* plen 1 or 2 goes into short packet. until checksum error is fixed,
-	 * use short packets. plen 32 works, but bigger packets seem to cause
-	 * an error. */
-	if (size % 2)
-		plen = 1;
-	else
-		plen = 2;
-
-	dsicm_set_update_window(ddata, x, y, w, h);
-
-	r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen);
-	if (r)
-		goto err2;
-
-	while (buf_used < size) {
-		u8 dcs_cmd = first ? 0x2e : 0x3e;
-		first = 0;
-
-		r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd,
-				buf + buf_used, size - buf_used);
-
-		if (r < 0) {
-			dev_err(dssdev->dev, "read error\n");
-			goto err3;
-		}
-
-		buf_used += r;
-
-		if (r < plen) {
-			dev_err(&ddata->pdev->dev, "short read\n");
-			break;
-		}
-
-		if (signal_pending(current)) {
-			dev_err(&ddata->pdev->dev, "signal pending, "
-					"aborting memory read\n");
-			r = -ERESTARTSYS;
-			goto err3;
-		}
-	}
-
-	r = buf_used;
-
-err3:
-	src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1);
-err2:
-	src->ops->dsi.bus_unlock(src);
-err1:
-	mutex_unlock(&ddata->lock);
-	return r;
-}
-
-static void dsicm_ulps_work(struct work_struct *work)
-{
-	struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
-			ulps_work.work);
-	struct omap_dss_device *dssdev = &ddata->dssdev;
-	struct omap_dss_device *src = ddata->src;
-
-	mutex_lock(&ddata->lock);
-
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) {
-		mutex_unlock(&ddata->lock);
-		return;
-	}
-
-	src->ops->dsi.bus_lock(src);
-
-	dsicm_enter_ulps(ddata);
-
-	src->ops->dsi.bus_unlock(src);
-	mutex_unlock(&ddata->lock);
-}
-
-static int dsicm_get_modes(struct omap_dss_device *dssdev,
-			   struct drm_connector *connector)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	connector->display_info.width_mm = ddata->width_mm;
-	connector->display_info.height_mm = ddata->height_mm;
-
-	return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static int dsicm_check_timings(struct omap_dss_device *dssdev,
-			       struct drm_display_mode *mode)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-	int ret = 0;
-
-	if (mode->hdisplay != ddata->vm.hactive)
-		ret = -EINVAL;
-
-	if (mode->vdisplay != ddata->vm.vactive)
-		ret = -EINVAL;
-
-	if (ret) {
-		dev_warn(dssdev->dev, "wrong resolution: %d x %d",
-			 mode->hdisplay, mode->vdisplay);
-		dev_warn(dssdev->dev, "panel resolution: %d x %d",
-			 ddata->vm.hactive, ddata->vm.vactive);
-	}
-
-	return ret;
-}
-
-static const struct omap_dss_device_ops dsicm_ops = {
-	.connect	= dsicm_connect,
-	.disconnect	= dsicm_disconnect,
-
-	.enable		= dsicm_enable,
-	.disable	= dsicm_disable,
-
-	.get_modes	= dsicm_get_modes,
-	.check_timings	= dsicm_check_timings,
-};
-
-static const struct omap_dss_driver dsicm_dss_driver = {
-	.update		= dsicm_update,
-	.sync		= dsicm_sync,
-
-	.enable_te	= dsicm_enable_te,
-	.get_te		= dsicm_get_te,
-
-	.memory_read	= dsicm_memory_read,
-};
-
-static int dsicm_probe_of(struct platform_device *pdev)
-{
-	struct device_node *node = pdev->dev.of_node;
-	struct backlight_device *backlight;
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct display_timing timing;
-	int err;
-
-	ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
-	if (IS_ERR(ddata->reset_gpio)) {
-		err = PTR_ERR(ddata->reset_gpio);
-		dev_err(&pdev->dev, "reset gpio request failed: %d", err);
-		return err;
-	}
-
-	ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te",
-						     GPIOD_IN);
-	if (IS_ERR(ddata->ext_te_gpio)) {
-		err = PTR_ERR(ddata->ext_te_gpio);
-		dev_err(&pdev->dev, "TE gpio request failed: %d", err);
-		return err;
-	}
-
-	err = of_get_display_timing(node, "panel-timing", &timing);
-	if (!err) {
-		videomode_from_timing(&timing, &ddata->vm);
-		if (!ddata->vm.pixelclock)
-			ddata->vm.pixelclock =
-				ddata->vm.hactive * ddata->vm.vactive * 60;
-	} else {
-		dev_warn(&pdev->dev,
-			 "failed to get video timing, using defaults\n");
-	}
-
-	ddata->width_mm = 0;
-	of_property_read_u32(node, "width-mm", &ddata->width_mm);
-
-	ddata->height_mm = 0;
-	of_property_read_u32(node, "height-mm", &ddata->height_mm);
-
-	ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
-	if (IS_ERR(ddata->vpnl)) {
-		err = PTR_ERR(ddata->vpnl);
-		if (err == -EPROBE_DEFER)
-			return err;
-		ddata->vpnl = NULL;
-	}
-
-	ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi");
-	if (IS_ERR(ddata->vddi)) {
-		err = PTR_ERR(ddata->vddi);
-		if (err == -EPROBE_DEFER)
-			return err;
-		ddata->vddi = NULL;
-	}
-
-	backlight = devm_of_find_backlight(&pdev->dev);
-	if (IS_ERR(backlight))
-		return PTR_ERR(backlight);
-
-	/* If no backlight device is found assume native backlight support */
-	if (backlight)
-		ddata->extbldev = backlight;
-	else
-		ddata->use_dsi_backlight = true;
-
-	/* TODO: ulps */
-
-	return 0;
-}
-
-static int dsicm_probe(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata;
-	struct backlight_device *bldev = NULL;
-	struct device *dev = &pdev->dev;
-	struct omap_dss_device *dssdev;
-	int r;
-
-	dev_dbg(dev, "probe\n");
-
-	ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
-	if (!ddata)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, ddata);
-	ddata->pdev = pdev;
-
-	ddata->vm.hactive = 864;
-	ddata->vm.vactive = 480;
-	ddata->vm.pixelclock = 864 * 480 * 60;
-
-	r = dsicm_probe_of(pdev);
-	if (r)
-		return r;
-
-	dssdev = &ddata->dssdev;
-	dssdev->dev = dev;
-	dssdev->ops = &dsicm_ops;
-	dssdev->driver = &dsicm_dss_driver;
-	dssdev->type = OMAP_DISPLAY_TYPE_DSI;
-	dssdev->display = true;
-	dssdev->owner = THIS_MODULE;
-	dssdev->of_port = 0;
-	dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
-	dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
-		OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
-
-	omapdss_display_init(dssdev);
-	omapdss_device_register(dssdev);
-
-	mutex_init(&ddata->lock);
-
-	atomic_set(&ddata->do_update, 0);
-
-	if (ddata->ext_te_gpio) {
-		r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio),
-				dsicm_te_isr,
-				IRQF_TRIGGER_RISING,
-				"taal vsync", ddata);
-
-		if (r) {
-			dev_err(dev, "IRQ request failed\n");
-			goto err_reg;
-		}
-
-		INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
-					dsicm_te_timeout_work_callback);
-
-		dev_dbg(dev, "Using GPIO TE\n");
-	}
-
-	ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
-	if (!ddata->workqueue) {
-		r = -ENOMEM;
-		goto err_reg;
-	}
-	INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
-
-	dsicm_hw_reset(ddata);
-
-	if (ddata->use_dsi_backlight) {
-		struct backlight_properties props = { 0 };
-		props.max_brightness = 255;
-		props.type = BACKLIGHT_RAW;
-
-		bldev = devm_backlight_device_register(dev, dev_name(dev),
-			dev, ddata, &dsicm_bl_ops, &props);
-		if (IS_ERR(bldev)) {
-			r = PTR_ERR(bldev);
-			goto err_bl;
-		}
-
-		ddata->bldev = bldev;
-	}
-
-	r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
-	if (r) {
-		dev_err(dev, "failed to create sysfs files\n");
-		goto err_bl;
-	}
-
-	return 0;
-
-err_bl:
-	destroy_workqueue(ddata->workqueue);
-err_reg:
-	if (ddata->extbldev)
-		put_device(&ddata->extbldev->dev);
-
-	return r;
-}
-
-static int __exit dsicm_remove(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct omap_dss_device *dssdev = &ddata->dssdev;
-
-	dev_dbg(&pdev->dev, "remove\n");
-
-	omapdss_device_unregister(dssdev);
-
-	if (omapdss_device_is_enabled(dssdev))
-		dsicm_disable(dssdev);
-	omapdss_device_disconnect(ddata->src, dssdev);
-
-	sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
-
-	if (ddata->extbldev)
-		put_device(&ddata->extbldev->dev);
-
-	dsicm_cancel_ulps_work(ddata);
-	destroy_workqueue(ddata->workqueue);
-
-	/* reset, to be sure that the panel is in a valid state */
-	dsicm_hw_reset(ddata);
-
-	return 0;
-}
-
-static const struct of_device_id dsicm_of_match[] = {
-	{ .compatible = "omapdss,panel-dsi-cm", },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, dsicm_of_match);
-
-static struct platform_driver dsicm_driver = {
-	.probe = dsicm_probe,
-	.remove = __exit_p(dsicm_remove),
-	.driver = {
-		.name = "panel-dsi-cm",
-		.of_match_table = dsicm_of_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-module_platform_driver(dsicm_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
deleted file mode 100644
index e11b258a22941e51fcc265b4c4d5a882dd7bd9bf..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ /dev/null
@@ -1,135 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config OMAP2_DSS_INIT
-	bool
-
-config OMAP_DSS_BASE
-	tristate
-
-menuconfig OMAP2_DSS
-	tristate "OMAP2+ Display Subsystem support"
-	select OMAP_DSS_BASE
-	select VIDEOMODE_HELPERS
-	select OMAP2_DSS_INIT
-	select HDMI
-	help
-	  OMAP2+ Display Subsystem support.
-
-if OMAP2_DSS
-
-config OMAP2_DSS_DEBUG
-	bool "Debug support"
-	default n
-	help
-	  This enables printing of debug messages. Alternatively, debug messages
-	  can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
-	  appropriate flags in <debugfs>/dynamic_debug/control.
-
-config OMAP2_DSS_DEBUGFS
-	bool "Debugfs filesystem support"
-	depends on DEBUG_FS
-	default n
-	help
-	  This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
-	  querying about clock configuration and register configuration of dss,
-	  dispc, dsi, hdmi and rfbi.
-
-config OMAP2_DSS_COLLECT_IRQ_STATS
-	bool "Collect DSS IRQ statistics"
-	depends on OMAP2_DSS_DEBUGFS
-	default n
-	help
-	  Collect DSS IRQ statistics, printable via debugfs.
-
-	  The statistics can be found from
-	  <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
-	  <debugfs>/omapdss/dsi_irq for DSI interrupts.
-
-config OMAP2_DSS_DPI
-	bool "DPI support"
-	default y
-	help
-	  DPI Interface. This is the Parallel Display Interface.
-
-config OMAP2_DSS_VENC
-	bool "VENC support"
-	default y
-	help
-	  OMAP Video Encoder support for S-Video and composite TV-out.
-
-config OMAP2_DSS_HDMI_COMMON
-	bool
-
-config OMAP4_DSS_HDMI
-	bool "HDMI support for OMAP4"
-	default y
-	select OMAP2_DSS_HDMI_COMMON
-	help
-	  HDMI support for OMAP4 based SoCs.
-
-config OMAP4_DSS_HDMI_CEC
-	bool "Enable HDMI CEC support for OMAP4"
-	depends on OMAP4_DSS_HDMI
-	select CEC_CORE
-	default y
-	help
-	  When selected the HDMI transmitter will support the CEC feature.
-
-config OMAP5_DSS_HDMI
-	bool "HDMI support for OMAP5"
-	default n
-	select OMAP2_DSS_HDMI_COMMON
-	help
-	  HDMI Interface for OMAP5 and similar cores. This adds the High
-	  Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI
-	  specification.
-
-config OMAP2_DSS_SDI
-	bool "SDI support"
-	default n
-	help
-	  SDI (Serial Display Interface) support.
-
-	  SDI is a high speed one-way display serial bus between the host
-	  processor and a display.
-
-config OMAP2_DSS_DSI
-	bool "DSI support"
-	default n
-	help
-	  MIPI DSI (Display Serial Interface) support.
-
-	  DSI is a high speed half-duplex serial interface between the host
-	  processor and a peripheral, such as a display or a framebuffer chip.
-
-	  See https://www.mipi.org/ for DSI specifications.
-
-config OMAP2_DSS_MIN_FCK_PER_PCK
-	int "Minimum FCK/PCK ratio (for scaling)"
-	range 0 32
-	default 0
-	help
-	  This can be used to adjust the minimum FCK/PCK ratio.
-
-	  With this you can make sure that DISPC FCK is at least
-	  n x PCK. Video plane scaling requires higher FCK than
-	  normally.
-
-	  If this is set to 0, there's no extra constraint on the
-	  DISPC FCK. However, the FCK will at minimum be
-	  2xPCK (if active matrix) or 3xPCK (if passive matrix).
-
-	  Max FCK is 173MHz, so this doesn't work if your PCK
-	  is very high.
-
-config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
-	bool "Sleep 20ms after VENC reset"
-	default y
-	help
-	  There is a 20ms sleep after VENC reset which seemed to fix the
-	  reset. The reason for the bug is unclear, and it's also unclear
-	  on what platforms this happens.
-
-	  This option enables the sleep, and is enabled by default. You can
-	  disable the sleep if it doesn't cause problems on your platform.
-
-endif
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
deleted file mode 100644
index f967e6948f2e2a6c03456c93b3ccc9bfbcb60400..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
-
-obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o output.o
-
-obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-# Core DSS files
-omapdss-y := dss.o dispc.o dispc_coefs.o \
-	pll.o video-pll.o
-omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
-omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
-omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
-omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
-	hdmi_phy.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
-omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
-ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index cf50430e6363cfd438b39eddc2ed35a115338fc6..050ca7eafac586621e4bad81ce4e65c31eafb4ea 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -16,32 +16,10 @@
 #include "dss.h"
 #include "omapdss.h"
 
-static struct dss_device *dss_device;
-
-struct dss_device *omapdss_get_dss(void)
-{
-	return dss_device;
-}
-EXPORT_SYMBOL(omapdss_get_dss);
-
-void omapdss_set_dss(struct dss_device *dss)
-{
-	dss_device = dss;
-}
-EXPORT_SYMBOL(omapdss_set_dss);
-
 struct dispc_device *dispc_get_dispc(struct dss_device *dss)
 {
 	return dss->dispc;
 }
-EXPORT_SYMBOL(dispc_get_dispc);
-
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
-{
-	return dss->dispc_ops;
-}
-EXPORT_SYMBOL(dispc_get_ops);
-
 
 /* -----------------------------------------------------------------------------
  * OMAP DSS Devices Handling
@@ -56,7 +34,6 @@ void omapdss_device_register(struct omap_dss_device *dssdev)
 	list_add_tail(&dssdev->list, &omapdss_devices_list);
 	mutex_unlock(&omapdss_devices_lock);
 }
-EXPORT_SYMBOL_GPL(omapdss_device_register);
 
 void omapdss_device_unregister(struct omap_dss_device *dssdev)
 {
@@ -64,7 +41,6 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev)
 	list_del(&dssdev->list);
 	mutex_unlock(&omapdss_devices_lock);
 }
-EXPORT_SYMBOL_GPL(omapdss_device_unregister);
 
 static bool omapdss_device_is_registered(struct device_node *node)
 {
@@ -86,24 +62,16 @@ static bool omapdss_device_is_registered(struct device_node *node)
 
 struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
 {
-	if (!try_module_get(dssdev->owner))
-		return NULL;
-
-	if (get_device(dssdev->dev) == NULL) {
-		module_put(dssdev->owner);
+	if (get_device(dssdev->dev) == NULL)
 		return NULL;
-	}
 
 	return dssdev;
 }
-EXPORT_SYMBOL(omapdss_device_get);
 
 void omapdss_device_put(struct omap_dss_device *dssdev)
 {
 	put_device(dssdev->dev);
-	module_put(dssdev->owner);
 }
-EXPORT_SYMBOL(omapdss_device_put);
 
 struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
 {
@@ -149,7 +117,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
 			goto done;
 		}
 
-		if (dssdev->id && (dssdev->next || dssdev->bridge))
+		if (dssdev->id && dssdev->bridge)
 			goto done;
 	}
 
@@ -164,7 +132,6 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
 	mutex_unlock(&omapdss_devices_lock);
 	return dssdev;
 }
-EXPORT_SYMBOL(omapdss_device_next_output);
 
 static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
 {
@@ -175,8 +142,6 @@ int omapdss_device_connect(struct dss_device *dss,
 			   struct omap_dss_device *src,
 			   struct omap_dss_device *dst)
 {
-	int ret;
-
 	dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
 		src ? dev_name(src->dev) : "NULL",
 		dst ? dev_name(dst->dev) : "NULL");
@@ -195,17 +160,8 @@ int omapdss_device_connect(struct dss_device *dss,
 
 	dst->dss = dss;
 
-	if (dst->ops && dst->ops->connect) {
-		ret = dst->ops->connect(src, dst);
-		if (ret < 0) {
-			dst->dss = NULL;
-			return ret;
-		}
-	}
-
 	return 0;
 }
-EXPORT_SYMBOL_GPL(omapdss_device_connect);
 
 void omapdss_device_disconnect(struct omap_dss_device *src,
 			       struct omap_dss_device *dst)
@@ -222,43 +178,12 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
 	}
 
 	if (!dst->id && !omapdss_device_is_connected(dst)) {
-		WARN_ON(!dst->display);
+		WARN_ON(1);
 		return;
 	}
 
-	WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
-
-	if (dst->ops && dst->ops->disconnect)
-		dst->ops->disconnect(src, dst);
 	dst->dss = NULL;
 }
-EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-
-void omapdss_device_enable(struct omap_dss_device *dssdev)
-{
-	if (!dssdev)
-		return;
-
-	if (dssdev->ops && dssdev->ops->enable)
-		dssdev->ops->enable(dssdev);
-
-	omapdss_device_enable(dssdev->next);
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_enable);
-
-void omapdss_device_disable(struct omap_dss_device *dssdev)
-{
-	if (!dssdev)
-		return;
-
-	omapdss_device_disable(dssdev->next);
-
-	if (dssdev->ops && dssdev->ops->disable)
-		dssdev->ops->disable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_disable);
 
 /* -----------------------------------------------------------------------------
  * Components Handling
@@ -344,7 +269,6 @@ void omapdss_gather_components(struct device *dev)
 	for_each_available_child_of_node(dev->of_node, child)
 		omapdss_walk_device(dev, child, true);
 }
-EXPORT_SYMBOL(omapdss_gather_components);
 
 static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
 {
@@ -369,8 +293,3 @@ bool omapdss_stack_is_ready(void)
 
 	return true;
 }
-EXPORT_SYMBOL(omapdss_stack_is_ready);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP Display Subsystem Base");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 599183879caf628bb01912f2150377c36f8905ad..f4cbef8ccace8faf6503a8119136e142b422935b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -351,8 +351,6 @@ static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
 static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
 					   enum omap_plane_id plane);
 
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
-
 static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
 {
 	__raw_writel(val, dispc->base + idx);
@@ -379,12 +377,12 @@ static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
 	REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
 }
 
-static int dispc_get_num_ovls(struct dispc_device *dispc)
+int dispc_get_num_ovls(struct dispc_device *dispc)
 {
 	return dispc->feat->num_ovls;
 }
 
-static int dispc_get_num_mgrs(struct dispc_device *dispc)
+int dispc_get_num_mgrs(struct dispc_device *dispc)
 {
 	return dispc->feat->num_mgrs;
 }
@@ -670,13 +668,13 @@ void dispc_runtime_put(struct dispc_device *dispc)
 	WARN_ON(r < 0 && r != -ENOSYS);
 }
 
-static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
 				   enum omap_channel channel)
 {
 	return mgr_desc[channel].vsync_irq;
 }
 
-static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
 				       enum omap_channel channel)
 {
 	if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
@@ -685,18 +683,18 @@ static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
 	return mgr_desc[channel].framedone_irq;
 }
 
-static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
 				       enum omap_channel channel)
 {
 	return mgr_desc[channel].sync_lost_irq;
 }
 
-static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
 {
 	return DISPC_IRQ_FRAMEDONEWB;
 }
 
-static void dispc_mgr_enable(struct dispc_device *dispc,
+void dispc_mgr_enable(struct dispc_device *dispc,
 			     enum omap_channel channel, bool enable)
 {
 	mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
@@ -710,13 +708,13 @@ static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
 	return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
 }
 
-static bool dispc_mgr_go_busy(struct dispc_device *dispc,
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
 			      enum omap_channel channel)
 {
 	return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
 }
 
-static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
 {
 	WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
 	WARN_ON(dispc_mgr_go_busy(dispc, channel));
@@ -726,12 +724,12 @@ static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
 	mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
 }
 
-static bool dispc_wb_go_busy(struct dispc_device *dispc)
+bool dispc_wb_go_busy(struct dispc_device *dispc)
 {
 	return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
 }
 
-static void dispc_wb_go(struct dispc_device *dispc)
+void dispc_wb_go(struct dispc_device *dispc)
 {
 	enum omap_plane_id plane = OMAP_DSS_WB;
 	bool enable, go;
@@ -877,50 +875,62 @@ static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
 #undef CVAL
 }
 
-static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
-					   const struct csc_coef_rgb2yuv *ct)
-{
-	const enum omap_plane_id plane = OMAP_DSS_WB;
-
-#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = {
+	256,   0,  358,		/* ry, rcb, rcr |1.000  0.000  1.402|*/
+	256, -88, -182,		/* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+	256, 452,    0,		/* by, bcb, bcr |1.000  1.772  0.000|*/
+	true,			/* full range */
+};
 
-	dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg,  ct->yr));
-	dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb));
-	dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg));
-	dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr));
-	dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb));
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+	298,    0,  409,	/* ry, rcb, rcr |1.164  0.000  1.596|*/
+	298, -100, -208,	/* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+	298,  516,    0,	/* by, bcb, bcr |1.164  2.017  0.000|*/
+	false,			/* limited range */
+};
 
-	REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = {
+	256,    0,  402,        /* ry, rcb, rcr |1.000  0.000  1.570|*/
+	256,  -48, -120,        /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+	256,  475,    0,        /* by, bcb, bcr |1.000  1.856  0.000|*/
+	true,                   /* full range */
+};
 
-#undef CVAL
-}
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = {
+	298,    0,  459,	/* ry, rcb, rcr |1.164  0.000  1.793|*/
+	298,  -55, -136,	/* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+	298,  541,    0,	/* by, bcb, bcr |1.164  2.112  0.000|*/
+	false,			/* limited range */
+};
 
-static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
+static void dispc_ovl_set_csc(struct dispc_device *dispc,
+			      enum omap_plane_id plane,
+			      enum drm_color_encoding color_encoding,
+			      enum drm_color_range color_range)
 {
-	int i;
-	int num_ovl = dispc_get_num_ovls(dispc);
-
-	/* YUV -> RGB, ITU-R BT.601, limited range */
-	const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
-		298,    0,  409,	/* ry, rcb, rcr */
-		298, -100, -208,	/* gy, gcb, gcr */
-		298,  516,    0,	/* by, bcb, bcr */
-		false,			/* limited range */
-	};
+	const struct csc_coef_yuv2rgb *csc;
 
-	/* RGB -> YUV, ITU-R BT.601, limited range */
-	const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
-		 66, 129,  25,		/* yr,   yg,  yb */
-		-38, -74, 112,		/* cbr, cbg, cbb */
-		112, -94, -18,		/* crr, crg, crb */
-		false,			/* limited range */
-	};
-
-	for (i = 1; i < num_ovl; i++)
-		dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
+	switch (color_encoding) {
+	default:
+	case DRM_COLOR_YCBCR_BT601:
+		if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+			csc = &coefs_yuv2rgb_bt601_full;
+		else
+			csc = &coefs_yuv2rgb_bt601_lim;
+		break;
+	case DRM_COLOR_YCBCR_BT709:
+		if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+			csc = &coefs_yuv2rgb_bt709_full;
+		else
+			csc = &coefs_yuv2rgb_bt709_lim;
+		break;
+	}
 
-	if (dispc->feat->has_writeback)
-		dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
+	dispc_ovl_write_color_conv_coef(dispc, plane, csc);
 }
 
 static void dispc_ovl_set_ba0(struct dispc_device *dispc,
@@ -1285,7 +1295,7 @@ static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
 	return false;
 }
 
-static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
 					    enum omap_plane_id plane)
 {
 	return dispc->feat->supported_color_modes[plane];
@@ -2601,7 +2611,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
 				  u8 pre_mult_alpha, u8 global_alpha,
 				  enum omap_dss_rotation_type rotation_type,
 				  bool replication, const struct videomode *vm,
-				  bool mem_to_mem)
+				  bool mem_to_mem,
+				  enum drm_color_encoding color_encoding,
+				  enum drm_color_range color_range)
 {
 	bool five_taps = true;
 	bool fieldmode = false;
@@ -2750,6 +2762,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
 				      fieldmode, fourcc, rotation);
 		dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
 		dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
+
+		if (plane != OMAP_DSS_WB)
+			dispc_ovl_set_csc(dispc, plane, color_encoding, color_range);
 	}
 
 	dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
@@ -2764,7 +2779,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
 	return 0;
 }
 
-static int dispc_ovl_setup(struct dispc_device *dispc,
+int dispc_ovl_setup(struct dispc_device *dispc,
 			   enum omap_plane_id plane,
 			   const struct omap_overlay_info *oi,
 			   const struct videomode *vm, bool mem_to_mem,
@@ -2786,12 +2801,13 @@ static int dispc_ovl_setup(struct dispc_device *dispc,
 		oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
 		oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
 		oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
-		oi->rotation_type, replication, vm, mem_to_mem);
+		oi->rotation_type, replication, vm, mem_to_mem,
+		oi->color_encoding, oi->color_range);
 
 	return r;
 }
 
-static int dispc_wb_setup(struct dispc_device *dispc,
+int dispc_wb_setup(struct dispc_device *dispc,
 		   const struct omap_dss_writeback_info *wi,
 		   bool mem_to_mem, const struct videomode *vm,
 		   enum dss_writeback_channel channel_in)
@@ -2819,7 +2835,8 @@ static int dispc_wb_setup(struct dispc_device *dispc,
 		wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
 		wi->height, wi->fourcc, wi->rotation, zorder,
 		wi->pre_mult_alpha, global_alpha, wi->rotation_type,
-		replication, vm, mem_to_mem);
+		replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
+		DRM_COLOR_YCBCR_LIMITED_RANGE);
 	if (r)
 		return r;
 
@@ -2874,12 +2891,12 @@ static int dispc_wb_setup(struct dispc_device *dispc,
 	return 0;
 }
 
-static bool dispc_has_writeback(struct dispc_device *dispc)
+bool dispc_has_writeback(struct dispc_device *dispc)
 {
 	return dispc->feat->has_writeback;
 }
 
-static int dispc_ovl_enable(struct dispc_device *dispc,
+int dispc_ovl_enable(struct dispc_device *dispc,
 			    enum omap_plane_id plane, bool enable)
 {
 	DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2970,7 +2987,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
 		REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
 }
 
-static void dispc_mgr_setup(struct dispc_device *dispc,
+void dispc_mgr_setup(struct dispc_device *dispc,
 			    enum omap_channel channel,
 			    const struct omap_overlay_manager_info *info)
 {
@@ -3049,7 +3066,7 @@ static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
 	mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
 }
 
-static void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
 				     enum omap_channel channel,
 				     const struct dss_lcd_mgr_config *config)
 {
@@ -3098,7 +3115,7 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
 		return pclk <= dispc->feat->max_tv_pclk;
 }
 
-static int dispc_mgr_check_timings(struct dispc_device *dispc,
+int dispc_mgr_check_timings(struct dispc_device *dispc,
 				   enum omap_channel channel,
 				   const struct videomode *vm)
 {
@@ -3191,7 +3208,7 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
 }
 
 /* change name to mode? */
-static void dispc_mgr_set_timings(struct dispc_device *dispc,
+void dispc_mgr_set_timings(struct dispc_device *dispc,
 				  enum omap_channel channel,
 				  const struct videomode *vm)
 {
@@ -3735,17 +3752,17 @@ int dispc_mgr_get_clock_div(struct dispc_device *dispc,
 	return 0;
 }
 
-static u32 dispc_read_irqstatus(struct dispc_device *dispc)
+u32 dispc_read_irqstatus(struct dispc_device *dispc)
 {
 	return dispc_read_reg(dispc, DISPC_IRQSTATUS);
 }
 
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
 {
 	dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
 }
 
-static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
 {
 	u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
 
@@ -3769,7 +3786,7 @@ void dispc_disable_sidle(struct dispc_device *dispc)
 	REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3);	/* SIDLEMODE: no idle */
 }
 
-static u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
 				enum omap_channel channel)
 {
 	const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
@@ -3824,7 +3841,7 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
 	{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
 };
 
-static void dispc_mgr_set_gamma(struct dispc_device *dispc,
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
 				enum omap_channel channel,
 				const struct drm_color_lut *lut,
 				unsigned int length)
@@ -3930,8 +3947,6 @@ static void _omap_dispc_initial_config(struct dispc_device *dispc)
 	    dispc->feat->has_gamma_table)
 		REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
 
-	dispc_setup_color_conv_coef(dispc);
-
 	dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
 
 	dispc_init_fifos(dispc);
@@ -4482,7 +4497,7 @@ static irqreturn_t dispc_irq_handler(int irq, void *arg)
 	return dispc->user_handler(irq, dispc->user_data);
 }
 
-static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
 			     void *dev_id)
 {
 	int r;
@@ -4506,7 +4521,7 @@ static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
 	return r;
 }
 
-static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
 {
 	devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
 
@@ -4514,7 +4529,7 @@ static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
 	dispc->user_data = NULL;
 }
 
-static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
 {
 	u32 limit = 0;
 
@@ -4684,47 +4699,6 @@ static void dispc_errata_i734_wa(struct dispc_device *dispc)
 	REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
 }
 
-static const struct dispc_ops dispc_ops = {
-	.read_irqstatus = dispc_read_irqstatus,
-	.clear_irqstatus = dispc_clear_irqstatus,
-	.write_irqenable = dispc_write_irqenable,
-
-	.request_irq = dispc_request_irq,
-	.free_irq = dispc_free_irq,
-
-	.runtime_get = dispc_runtime_get,
-	.runtime_put = dispc_runtime_put,
-
-	.get_num_ovls = dispc_get_num_ovls,
-	.get_num_mgrs = dispc_get_num_mgrs,
-
-	.get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit,
-
-	.mgr_enable = dispc_mgr_enable,
-	.mgr_is_enabled = dispc_mgr_is_enabled,
-	.mgr_get_vsync_irq = dispc_mgr_get_vsync_irq,
-	.mgr_get_framedone_irq = dispc_mgr_get_framedone_irq,
-	.mgr_get_sync_lost_irq = dispc_mgr_get_sync_lost_irq,
-	.mgr_go_busy = dispc_mgr_go_busy,
-	.mgr_go = dispc_mgr_go,
-	.mgr_set_lcd_config = dispc_mgr_set_lcd_config,
-	.mgr_check_timings = dispc_mgr_check_timings,
-	.mgr_set_timings = dispc_mgr_set_timings,
-	.mgr_setup = dispc_mgr_setup,
-	.mgr_gamma_size = dispc_mgr_gamma_size,
-	.mgr_set_gamma = dispc_mgr_set_gamma,
-
-	.ovl_enable = dispc_ovl_enable,
-	.ovl_setup = dispc_ovl_setup,
-	.ovl_get_color_modes = dispc_ovl_get_color_modes,
-
-	.wb_get_framedone_irq = dispc_wb_get_framedone_irq,
-	.wb_setup = dispc_wb_setup,
-	.has_writeback = dispc_has_writeback,
-	.wb_go_busy = dispc_wb_go_busy,
-	.wb_go = dispc_wb_go,
-};
-
 /* DISPC HW IP initialisation */
 static const struct of_device_id dispc_of_match[] = {
 	{ .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
@@ -4826,7 +4800,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
 	dispc_runtime_put(dispc);
 
 	dss->dispc = dispc;
-	dss->dispc_ops = &dispc_ops;
 
 	dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
 						 dispc);
@@ -4848,7 +4821,6 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data)
 	dss_debugfs_remove_file(dispc->debugfs);
 
 	dss->dispc = NULL;
-	dss->dispc_ops = NULL;
 
 	pm_runtime_disable(dev);
 
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
deleted file mode 100644
index 3b82158b1bfdb325b67ee43278750c54bcf13166..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "DISPLAY"
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-
-#include <drm/drm_connector.h>
-#include <drm/drm_modes.h>
-
-#include "omapdss.h"
-
-static int disp_num_counter;
-
-void omapdss_display_init(struct omap_dss_device *dssdev)
-{
-	int id;
-
-	/*
-	 * Note: this presumes that all displays either have an DT alias, or
-	 * none has.
-	 */
-	id = of_alias_get_id(dssdev->dev->of_node, "display");
-	if (id < 0)
-		id = disp_num_counter++;
-
-	/* Use 'label' property for name, if it exists */
-	of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
-
-	if (dssdev->name == NULL)
-		dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL,
-					      "display%u", id);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_init);
-
-int omapdss_display_get_modes(struct drm_connector *connector,
-			      const struct videomode *vm)
-{
-	struct drm_display_mode *mode;
-
-	mode = drm_mode_create(connector->dev);
-	if (!mode)
-		return 0;
-
-	drm_display_mode_from_videomode(vm, mode);
-
-	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_set_name(mode);
-	drm_mode_probed_add(connector, mode);
-
-	return 1;
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 1d2992daef4097e6e323389866c42809c81ba95e..030f997eccd00d258f40d5ea535a6ab2da29c629 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -641,7 +641,6 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
 	out->type = OMAP_DISPLAY_TYPE_DPI;
 	out->dispc_channel = dpi_get_channel(dpi);
 	out->of_port = port_num;
-	out->owner = THIS_MODULE;
 
 	r = omapdss_device_init_output(out, &dpi->bridge);
 	if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 735a4e9027d0c38e138fbc5c1b6941c8e8b26826..8e11612f5fe171e5bde7f22b3ee7d2f3d52da053 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -14,7 +14,9 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/semaphore.h>
@@ -33,6 +35,9 @@
 #include <linux/component.h>
 #include <linux/sys_soc.h>
 
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
 #include <video/mipi_display.h>
 
 #include "omapdss.h"
@@ -40,73 +45,7 @@
 
 #define DSI_CATCH_MISSING_TE
 
-struct dsi_reg { u16 module; u16 idx; };
-
-#define DSI_REG(mod, idx)		((const struct dsi_reg) { mod, idx })
-
-/* DSI Protocol Engine */
-
-#define DSI_PROTO			0
-#define DSI_PROTO_SZ			0x200
-
-#define DSI_REVISION			DSI_REG(DSI_PROTO, 0x0000)
-#define DSI_SYSCONFIG			DSI_REG(DSI_PROTO, 0x0010)
-#define DSI_SYSSTATUS			DSI_REG(DSI_PROTO, 0x0014)
-#define DSI_IRQSTATUS			DSI_REG(DSI_PROTO, 0x0018)
-#define DSI_IRQENABLE			DSI_REG(DSI_PROTO, 0x001C)
-#define DSI_CTRL			DSI_REG(DSI_PROTO, 0x0040)
-#define DSI_GNQ				DSI_REG(DSI_PROTO, 0x0044)
-#define DSI_COMPLEXIO_CFG1		DSI_REG(DSI_PROTO, 0x0048)
-#define DSI_COMPLEXIO_IRQ_STATUS	DSI_REG(DSI_PROTO, 0x004C)
-#define DSI_COMPLEXIO_IRQ_ENABLE	DSI_REG(DSI_PROTO, 0x0050)
-#define DSI_CLK_CTRL			DSI_REG(DSI_PROTO, 0x0054)
-#define DSI_TIMING1			DSI_REG(DSI_PROTO, 0x0058)
-#define DSI_TIMING2			DSI_REG(DSI_PROTO, 0x005C)
-#define DSI_VM_TIMING1			DSI_REG(DSI_PROTO, 0x0060)
-#define DSI_VM_TIMING2			DSI_REG(DSI_PROTO, 0x0064)
-#define DSI_VM_TIMING3			DSI_REG(DSI_PROTO, 0x0068)
-#define DSI_CLK_TIMING			DSI_REG(DSI_PROTO, 0x006C)
-#define DSI_TX_FIFO_VC_SIZE		DSI_REG(DSI_PROTO, 0x0070)
-#define DSI_RX_FIFO_VC_SIZE		DSI_REG(DSI_PROTO, 0x0074)
-#define DSI_COMPLEXIO_CFG2		DSI_REG(DSI_PROTO, 0x0078)
-#define DSI_RX_FIFO_VC_FULLNESS		DSI_REG(DSI_PROTO, 0x007C)
-#define DSI_VM_TIMING4			DSI_REG(DSI_PROTO, 0x0080)
-#define DSI_TX_FIFO_VC_EMPTINESS	DSI_REG(DSI_PROTO, 0x0084)
-#define DSI_VM_TIMING5			DSI_REG(DSI_PROTO, 0x0088)
-#define DSI_VM_TIMING6			DSI_REG(DSI_PROTO, 0x008C)
-#define DSI_VM_TIMING7			DSI_REG(DSI_PROTO, 0x0090)
-#define DSI_STOPCLK_TIMING		DSI_REG(DSI_PROTO, 0x0094)
-#define DSI_VC_CTRL(n)			DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
-#define DSI_VC_TE(n)			DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_HEADER(n)	DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_PAYLOAD(n)	DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
-#define DSI_VC_SHORT_PACKET_HEADER(n)	DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
-#define DSI_VC_IRQSTATUS(n)		DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
-#define DSI_VC_IRQENABLE(n)		DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
-
-/* DSIPHY_SCP */
-
-#define DSI_PHY				1
-#define DSI_PHY_OFFSET			0x200
-#define DSI_PHY_SZ			0x40
-
-#define DSI_DSIPHY_CFG0			DSI_REG(DSI_PHY, 0x0000)
-#define DSI_DSIPHY_CFG1			DSI_REG(DSI_PHY, 0x0004)
-#define DSI_DSIPHY_CFG2			DSI_REG(DSI_PHY, 0x0008)
-#define DSI_DSIPHY_CFG5			DSI_REG(DSI_PHY, 0x0014)
-#define DSI_DSIPHY_CFG10		DSI_REG(DSI_PHY, 0x0028)
-
-/* DSI_PLL_CTRL_SCP */
-
-#define DSI_PLL				2
-#define DSI_PLL_OFFSET			0x300
-#define DSI_PLL_SZ			0x20
-
-#define DSI_PLL_CONTROL			DSI_REG(DSI_PLL, 0x0000)
-#define DSI_PLL_STATUS			DSI_REG(DSI_PLL, 0x0004)
-#define DSI_PLL_GO			DSI_REG(DSI_PLL, 0x0008)
-#define DSI_PLL_CONFIGURATION1		DSI_REG(DSI_PLL, 0x000C)
-#define DSI_PLL_CONFIGURATION2		DSI_REG(DSI_PLL, 0x0010)
+#include "dsi.h"
 
 #define REG_GET(dsi, idx, start, end) \
 	FLD_GET(dsi_read_reg(dsi, idx), start, end)
@@ -114,324 +53,36 @@ struct dsi_reg { u16 module; u16 idx; };
 #define REG_FLD_MOD(dsi, idx, val, start, end) \
 	dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
 
-/* Global interrupts */
-#define DSI_IRQ_VC0		(1 << 0)
-#define DSI_IRQ_VC1		(1 << 1)
-#define DSI_IRQ_VC2		(1 << 2)
-#define DSI_IRQ_VC3		(1 << 3)
-#define DSI_IRQ_WAKEUP		(1 << 4)
-#define DSI_IRQ_RESYNC		(1 << 5)
-#define DSI_IRQ_PLL_LOCK	(1 << 7)
-#define DSI_IRQ_PLL_UNLOCK	(1 << 8)
-#define DSI_IRQ_PLL_RECALL	(1 << 9)
-#define DSI_IRQ_COMPLEXIO_ERR	(1 << 10)
-#define DSI_IRQ_HS_TX_TIMEOUT	(1 << 14)
-#define DSI_IRQ_LP_RX_TIMEOUT	(1 << 15)
-#define DSI_IRQ_TE_TRIGGER	(1 << 16)
-#define DSI_IRQ_ACK_TRIGGER	(1 << 17)
-#define DSI_IRQ_SYNC_LOST	(1 << 18)
-#define DSI_IRQ_LDO_POWER_GOOD	(1 << 19)
-#define DSI_IRQ_TA_TIMEOUT	(1 << 20)
-#define DSI_IRQ_ERROR_MASK \
-	(DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
-	DSI_IRQ_TA_TIMEOUT)
-#define DSI_IRQ_CHANNEL_MASK	0xf
-
-/* Virtual channel interrupts */
-#define DSI_VC_IRQ_CS		(1 << 0)
-#define DSI_VC_IRQ_ECC_CORR	(1 << 1)
-#define DSI_VC_IRQ_PACKET_SENT	(1 << 2)
-#define DSI_VC_IRQ_FIFO_TX_OVF	(1 << 3)
-#define DSI_VC_IRQ_FIFO_RX_OVF	(1 << 4)
-#define DSI_VC_IRQ_BTA		(1 << 5)
-#define DSI_VC_IRQ_ECC_NO_CORR	(1 << 6)
-#define DSI_VC_IRQ_FIFO_TX_UDF	(1 << 7)
-#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
-#define DSI_VC_IRQ_ERROR_MASK \
-	(DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
-	DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
-	DSI_VC_IRQ_FIFO_TX_UDF)
-
-/* ComplexIO interrupts */
-#define DSI_CIO_IRQ_ERRSYNCESC1		(1 << 0)
-#define DSI_CIO_IRQ_ERRSYNCESC2		(1 << 1)
-#define DSI_CIO_IRQ_ERRSYNCESC3		(1 << 2)
-#define DSI_CIO_IRQ_ERRSYNCESC4		(1 << 3)
-#define DSI_CIO_IRQ_ERRSYNCESC5		(1 << 4)
-#define DSI_CIO_IRQ_ERRESC1		(1 << 5)
-#define DSI_CIO_IRQ_ERRESC2		(1 << 6)
-#define DSI_CIO_IRQ_ERRESC3		(1 << 7)
-#define DSI_CIO_IRQ_ERRESC4		(1 << 8)
-#define DSI_CIO_IRQ_ERRESC5		(1 << 9)
-#define DSI_CIO_IRQ_ERRCONTROL1		(1 << 10)
-#define DSI_CIO_IRQ_ERRCONTROL2		(1 << 11)
-#define DSI_CIO_IRQ_ERRCONTROL3		(1 << 12)
-#define DSI_CIO_IRQ_ERRCONTROL4		(1 << 13)
-#define DSI_CIO_IRQ_ERRCONTROL5		(1 << 14)
-#define DSI_CIO_IRQ_STATEULPS1		(1 << 15)
-#define DSI_CIO_IRQ_STATEULPS2		(1 << 16)
-#define DSI_CIO_IRQ_STATEULPS3		(1 << 17)
-#define DSI_CIO_IRQ_STATEULPS4		(1 << 18)
-#define DSI_CIO_IRQ_STATEULPS5		(1 << 19)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1	(1 << 20)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1	(1 << 21)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2	(1 << 22)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2	(1 << 23)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3	(1 << 24)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3	(1 << 25)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4	(1 << 26)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4	(1 << 27)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5	(1 << 28)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5	(1 << 29)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0	(1 << 30)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1	(1 << 31)
-#define DSI_CIO_IRQ_ERROR_MASK \
-	(DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
-	 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
-	 DSI_CIO_IRQ_ERRSYNCESC5 | \
-	 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
-	 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
-	 DSI_CIO_IRQ_ERRESC5 | \
-	 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
-	 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
-	 DSI_CIO_IRQ_ERRCONTROL5 | \
-	 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
-	 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
-	 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
-	 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
-	 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
-
-typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
-struct dsi_data;
-
-static int dsi_display_init_dispc(struct dsi_data *dsi);
-static void dsi_display_uninit_dispc(struct dsi_data *dsi);
-
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
-
-/* DSI PLL HSDIV indices */
-#define HSDIV_DISPC	0
-#define HSDIV_DSI	1
-
-#define DSI_MAX_NR_ISRS                2
-#define DSI_MAX_NR_LANES	5
-
-enum dsi_model {
-	DSI_MODEL_OMAP3,
-	DSI_MODEL_OMAP4,
-	DSI_MODEL_OMAP5,
-};
-
-enum dsi_lane_function {
-	DSI_LANE_UNUSED	= 0,
-	DSI_LANE_CLK,
-	DSI_LANE_DATA1,
-	DSI_LANE_DATA2,
-	DSI_LANE_DATA3,
-	DSI_LANE_DATA4,
-};
-
-struct dsi_lane_config {
-	enum dsi_lane_function function;
-	u8 polarity;
-};
-
-struct dsi_isr_data {
-	omap_dsi_isr_t	isr;
-	void		*arg;
-	u32		mask;
-};
-
-enum fifo_size {
-	DSI_FIFO_SIZE_0		= 0,
-	DSI_FIFO_SIZE_32	= 1,
-	DSI_FIFO_SIZE_64	= 2,
-	DSI_FIFO_SIZE_96	= 3,
-	DSI_FIFO_SIZE_128	= 4,
-};
-
-enum dsi_vc_source {
-	DSI_VC_SOURCE_L4 = 0,
-	DSI_VC_SOURCE_VP,
-};
-
-struct dsi_irq_stats {
-	unsigned long last_reset;
-	unsigned int irq_count;
-	unsigned int dsi_irqs[32];
-	unsigned int vc_irqs[4][32];
-	unsigned int cio_irqs[32];
-};
-
-struct dsi_isr_tables {
-	struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
-	struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
-	struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
-};
-
-struct dsi_clk_calc_ctx {
-	struct dsi_data *dsi;
-	struct dss_pll *pll;
-
-	/* inputs */
-
-	const struct omap_dss_dsi_config *config;
-
-	unsigned long req_pck_min, req_pck_nom, req_pck_max;
-
-	/* outputs */
-
-	struct dss_pll_clock_info dsi_cinfo;
-	struct dispc_clock_info dispc_cinfo;
-
-	struct videomode vm;
-	struct omap_dss_dsi_videomode_timings dsi_vm;
-};
-
-struct dsi_lp_clock_info {
-	unsigned long lp_clk;
-	u16 lp_clk_div;
-};
-
-struct dsi_module_id_data {
-	u32 address;
-	int id;
-};
-
-enum dsi_quirks {
-	DSI_QUIRK_PLL_PWR_BUG = (1 << 0),	/* DSI-PLL power command 0x3 is not working */
-	DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
-	DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
-	DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
-	DSI_QUIRK_GNQ = (1 << 4),
-	DSI_QUIRK_PHY_DCC = (1 << 5),
-};
-
-struct dsi_of_data {
-	enum dsi_model model;
-	const struct dss_pll_hw *pll_hw;
-	const struct dsi_module_id_data *modules;
-	unsigned int max_fck_freq;
-	unsigned int max_pll_lpdiv;
-	enum dsi_quirks quirks;
-};
-
-struct dsi_data {
-	struct device *dev;
-	void __iomem *proto_base;
-	void __iomem *phy_base;
-	void __iomem *pll_base;
-
-	const struct dsi_of_data *data;
-	int module_id;
-
-	int irq;
-
-	bool is_enabled;
-
-	struct clk *dss_clk;
-	struct regmap *syscon;
-	struct dss_device *dss;
-
-	struct dispc_clock_info user_dispc_cinfo;
-	struct dss_pll_clock_info user_dsi_cinfo;
-
-	struct dsi_lp_clock_info user_lp_cinfo;
-	struct dsi_lp_clock_info current_lp_cinfo;
-
-	struct dss_pll pll;
-
-	bool vdds_dsi_enabled;
-	struct regulator *vdds_dsi_reg;
-
-	struct {
-		enum dsi_vc_source source;
-		struct omap_dss_device *dssdev;
-		enum fifo_size tx_fifo_size;
-		enum fifo_size rx_fifo_size;
-		int vc_id;
-	} vc[4];
-
-	struct mutex lock;
-	struct semaphore bus_lock;
-
-	spinlock_t irq_lock;
-	struct dsi_isr_tables isr_tables;
-	/* space for a copy used by the interrupt handler */
-	struct dsi_isr_tables isr_tables_copy;
-
-	int update_channel;
-#ifdef DSI_PERF_MEASURE
-	unsigned int update_bytes;
-#endif
-
-	bool te_enabled;
-	bool ulps_enabled;
-
-	void (*framedone_callback)(int, void *);
-	void *framedone_data;
-
-	struct delayed_work framedone_timeout_work;
-
-#ifdef DSI_CATCH_MISSING_TE
-	struct timer_list te_timer;
-#endif
-
-	unsigned long cache_req_pck;
-	unsigned long cache_clk_freq;
-	struct dss_pll_clock_info cache_cinfo;
-
-	u32		errors;
-	spinlock_t	errors_lock;
-#ifdef DSI_PERF_MEASURE
-	ktime_t perf_setup_time;
-	ktime_t perf_start_time;
-#endif
-	int debug_read;
-	int debug_write;
-	struct {
-		struct dss_debugfs_entry *irqs;
-		struct dss_debugfs_entry *regs;
-		struct dss_debugfs_entry *clks;
-	} debugfs;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-	spinlock_t irq_stats_lock;
-	struct dsi_irq_stats irq_stats;
-#endif
-
-	unsigned int num_lanes_supported;
-	unsigned int line_buffer_size;
+static int dsi_init_dispc(struct dsi_data *dsi);
+static void dsi_uninit_dispc(struct dsi_data *dsi);
 
-	struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
-	unsigned int num_lanes_used;
-
-	unsigned int scp_clk_refcount;
-
-	struct dss_lcd_mgr_config mgr_config;
-	struct videomode vm;
-	enum omap_dss_dsi_pixel_format pix_fmt;
-	enum omap_dss_dsi_mode mode;
-	struct omap_dss_dsi_videomode_timings vm_timings;
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel);
 
-	struct omap_dss_device output;
-};
-
-struct dsi_packet_sent_handler_data {
-	struct dsi_data *dsi;
-	struct completion *completion;
-};
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+				       const struct mipi_dsi_msg *msg);
 
 #ifdef DSI_PERF_MEASURE
 static bool dsi_perf;
 module_param(dsi_perf, bool, 0644);
 #endif
 
+/* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */
+#define VC_VIDEO	0
+#define VC_CMD		1
+
+#define drm_bridge_to_dsi(bridge) \
+	container_of(bridge, struct dsi_data, bridge)
+
 static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
 {
 	return dev_get_drvdata(dssdev->dev);
 }
 
+static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host)
+{
+	return container_of(host, struct dsi_data, host);
+}
+
 static inline void dsi_write_reg(struct dsi_data *dsi,
 				 const struct dsi_reg idx, u32 val)
 {
@@ -461,17 +112,13 @@ static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
 	return __raw_readl(base + idx.idx);
 }
 
-static void dsi_bus_lock(struct omap_dss_device *dssdev)
+static void dsi_bus_lock(struct dsi_data *dsi)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
 	down(&dsi->bus_lock);
 }
 
-static void dsi_bus_unlock(struct omap_dss_device *dssdev)
+static void dsi_bus_unlock(struct dsi_data *dsi)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
 	up(&dsi->bus_lock);
 }
 
@@ -514,22 +161,6 @@ static inline bool wait_for_bit_change(struct dsi_data *dsi,
 	return false;
 }
 
-static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
-{
-	switch (fmt) {
-	case OMAP_DSS_DSI_FMT_RGB888:
-	case OMAP_DSS_DSI_FMT_RGB666:
-		return 24;
-	case OMAP_DSS_DSI_FMT_RGB666_PACKED:
-		return 18;
-	case OMAP_DSS_DSI_FMT_RGB565:
-		return 16;
-	default:
-		BUG();
-		return 0;
-	}
-}
-
 #ifdef DSI_PERF_MEASURE
 static void dsi_perf_mark_setup(struct dsi_data *dsi)
 {
@@ -623,7 +254,7 @@ static void print_irq_status(u32 status)
 #undef PIS
 }
 
-static void print_irq_status_vc(int channel, u32 status)
+static void print_irq_status_vc(int vc, u32 status)
 {
 	if (status == 0)
 		return;
@@ -634,7 +265,7 @@ static void print_irq_status_vc(int channel, u32 status)
 #define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
 
 	pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
-		channel,
+		vc,
 		status,
 		PIS(CS),
 		PIS(ECC_CORR),
@@ -1015,7 +646,7 @@ static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
 	return r;
 }
 
-static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_register_isr_vc(struct dsi_data *dsi, int vc,
 			       omap_dsi_isr_t isr, void *arg, u32 mask)
 {
 	unsigned long flags;
@@ -1024,18 +655,18 @@ static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
 	spin_lock_irqsave(&dsi->irq_lock, flags);
 
 	r = _dsi_register_isr(isr, arg, mask,
-			dsi->isr_tables.isr_table_vc[channel],
-			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+			dsi->isr_tables.isr_table_vc[vc],
+			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
 
 	if (r == 0)
-		_omap_dsi_set_irqs_vc(dsi, channel);
+		_omap_dsi_set_irqs_vc(dsi, vc);
 
 	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 
 	return r;
 }
 
-static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc,
 				 omap_dsi_isr_t isr, void *arg, u32 mask)
 {
 	unsigned long flags;
@@ -1044,49 +675,11 @@ static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
 	spin_lock_irqsave(&dsi->irq_lock, flags);
 
 	r = _dsi_unregister_isr(isr, arg, mask,
-			dsi->isr_tables.isr_table_vc[channel],
-			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
-
-	if (r == 0)
-		_omap_dsi_set_irqs_vc(dsi, channel);
-
-	spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
-	return r;
-}
-
-static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
-				void *arg, u32 mask)
-{
-	unsigned long flags;
-	int r;
-
-	spin_lock_irqsave(&dsi->irq_lock, flags);
-
-	r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
-			ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
-	if (r == 0)
-		_omap_dsi_set_irqs_cio(dsi);
-
-	spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
-	return r;
-}
-
-static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
-				  void *arg, u32 mask)
-{
-	unsigned long flags;
-	int r;
-
-	spin_lock_irqsave(&dsi->irq_lock, flags);
-
-	r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
-			ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
+			dsi->isr_tables.isr_table_vc[vc],
+			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
 
 	if (r == 0)
-		_omap_dsi_set_irqs_cio(dsi);
+		_omap_dsi_set_irqs_vc(dsi, vc);
 
 	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 
@@ -1819,56 +1412,6 @@ static void dsi_cio_timings(struct dsi_data *dsi)
 	dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
 }
 
-/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
-static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
-					 unsigned int mask_p,
-					 unsigned int mask_n)
-{
-	int i;
-	u32 l;
-	u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
-
-	l = 0;
-
-	for (i = 0; i < dsi->num_lanes_supported; ++i) {
-		unsigned int p = dsi->lanes[i].polarity;
-
-		if (mask_p & (1 << i))
-			l |= 1 << (i * 2 + (p ? 0 : 1));
-
-		if (mask_n & (1 << i))
-			l |= 1 << (i * 2 + (p ? 1 : 0));
-	}
-
-	/*
-	 * Bits in REGLPTXSCPDAT4TO0DXDY:
-	 * 17: DY0 18: DX0
-	 * 19: DY1 20: DX1
-	 * 21: DY2 22: DX2
-	 * 23: DY3 24: DX3
-	 * 25: DY4 26: DX4
-	 */
-
-	/* Set the lane override configuration */
-
-	/* REGLPTXSCPDAT4TO0DXDY */
-	REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
-
-	/* Enable lane override */
-
-	/* ENLPTXSCPDAT */
-	REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
-}
-
-static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
-{
-	/* Disable lane override */
-	REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
-	/* Reset the lane override configuration */
-	/* REGLPTXSCPDAT4TO0DXDY */
-	REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
-}
-
 static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
 {
 	int t, i;
@@ -2043,32 +1586,6 @@ static int dsi_cio_init(struct dsi_data *dsi)
 	l = FLD_MOD(l, 0x1fff, 12, 0);	/* STOP_STATE_COUNTER_IO */
 	dsi_write_reg(dsi, DSI_TIMING1, l);
 
-	if (dsi->ulps_enabled) {
-		unsigned int mask_p;
-		int i;
-
-		DSSDBG("manual ulps exit\n");
-
-		/* ULPS is exited by Mark-1 state for 1ms, followed by
-		 * stop state. DSS HW cannot do this via the normal
-		 * ULPS exit sequence, as after reset the DSS HW thinks
-		 * that we are not in ULPS mode, and refuses to send the
-		 * sequence. So we need to send the ULPS exit sequence
-		 * manually by setting positive lines high and negative lines
-		 * low for 1ms.
-		 */
-
-		mask_p = 0;
-
-		for (i = 0; i < dsi->num_lanes_supported; ++i) {
-			if (dsi->lanes[i].function == DSI_LANE_UNUSED)
-				continue;
-			mask_p |= 1 << i;
-		}
-
-		dsi_cio_enable_lane_override(dsi, mask_p, 0);
-	}
-
 	r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
 	if (r)
 		goto err_cio_pwr;
@@ -2087,29 +1604,15 @@ static int dsi_cio_init(struct dsi_data *dsi)
 	if (r)
 		goto err_tx_clk_esc_rst;
 
-	if (dsi->ulps_enabled) {
-		/* Keep Mark-1 state for 1ms (as per DSI spec) */
-		ktime_t wait = ns_to_ktime(1000 * 1000);
-		set_current_state(TASK_UNINTERRUPTIBLE);
-		schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
-
-		/* Disable the override. The lanes should be set to Mark-11
-		 * state by the HW */
-		dsi_cio_disable_lane_override(dsi);
-	}
-
 	/* FORCE_TX_STOP_MODE_IO */
 	REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
 
 	dsi_cio_timings(dsi);
 
-	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
-		/* DDR_CLK_ALWAYS_ON */
-		REG_FLD_MOD(dsi, DSI_CLK_CTRL,
-			dsi->vm_timings.ddr_clk_always_on, 13, 13);
-	}
-
-	dsi->ulps_enabled = false;
+	/* DDR_CLK_ALWAYS_ON */
+	REG_FLD_MOD(dsi, DSI_CLK_CTRL,
+		    !(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS),
+		    13, 13);
 
 	DSSDBG("CIO init done\n");
 
@@ -2120,8 +1623,6 @@ static int dsi_cio_init(struct dsi_data *dsi)
 err_cio_pwr_dom:
 	dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
 err_cio_pwr:
-	if (dsi->ulps_enabled)
-		dsi_cio_disable_lane_override(dsi);
 err_scp_clk_dom:
 	dsi_disable_scp_clk(dsi);
 	dsi_disable_pads(dsi);
@@ -2218,9 +1719,9 @@ static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
 	return 0;
 }
 
-static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
+static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc)
 {
-	return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
+	return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0);
 }
 
 static void dsi_packet_sent_handler_vp(void *data, u32 mask)
@@ -2228,14 +1729,14 @@ static void dsi_packet_sent_handler_vp(void *data, u32 mask)
 	struct dsi_packet_sent_handler_data *vp_data =
 		(struct dsi_packet_sent_handler_data *) data;
 	struct dsi_data *dsi = vp_data->dsi;
-	const int channel = dsi->update_channel;
+	const int vc = dsi->update_vc;
 	u8 bit = dsi->te_enabled ? 30 : 31;
 
-	if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
+	if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0)
 		complete(vp_data->completion);
 }
 
-static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc)
 {
 	DECLARE_COMPLETION_ONSTACK(completion);
 	struct dsi_packet_sent_handler_data vp_data = {
@@ -2247,13 +1748,13 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
 
 	bit = dsi->te_enabled ? 30 : 31;
 
-	r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+	r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
 		&vp_data, DSI_VC_IRQ_PACKET_SENT);
 	if (r)
 		goto err0;
 
 	/* Wait for completion only if TE_EN/TE_START is still set */
-	if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
+	if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) {
 		if (wait_for_completion_timeout(&completion,
 				msecs_to_jiffies(10)) == 0) {
 			DSSERR("Failed to complete previous frame transfer\n");
@@ -2262,12 +1763,12 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
 		}
 	}
 
-	dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+	dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
 		&vp_data, DSI_VC_IRQ_PACKET_SENT);
 
 	return 0;
 err1:
-	dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+	dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
 		&vp_data, DSI_VC_IRQ_PACKET_SENT);
 err0:
 	return r;
@@ -2278,13 +1779,13 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
 	struct dsi_packet_sent_handler_data *l4_data =
 		(struct dsi_packet_sent_handler_data *) data;
 	struct dsi_data *dsi = l4_data->dsi;
-	const int channel = dsi->update_channel;
+	const int vc = dsi->update_vc;
 
-	if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
+	if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0)
 		complete(l4_data->completion);
 }
 
-static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc)
 {
 	DECLARE_COMPLETION_ONSTACK(completion);
 	struct dsi_packet_sent_handler_data l4_data = {
@@ -2293,13 +1794,13 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
 	};
 	int r = 0;
 
-	r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+	r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
 		&l4_data, DSI_VC_IRQ_PACKET_SENT);
 	if (r)
 		goto err0;
 
 	/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
-	if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
+	if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) {
 		if (wait_for_completion_timeout(&completion,
 				msecs_to_jiffies(10)) == 0) {
 			DSSERR("Failed to complete previous l4 transfer\n");
@@ -2308,47 +1809,47 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
 		}
 	}
 
-	dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+	dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
 		&l4_data, DSI_VC_IRQ_PACKET_SENT);
 
 	return 0;
 err1:
-	dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+	dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
 		&l4_data, DSI_VC_IRQ_PACKET_SENT);
 err0:
 	return r;
 }
 
-static int dsi_sync_vc(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc(struct dsi_data *dsi, int vc)
 {
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
 	WARN_ON(in_interrupt());
 
-	if (!dsi_vc_is_enabled(dsi, channel))
+	if (!dsi_vc_is_enabled(dsi, vc))
 		return 0;
 
-	switch (dsi->vc[channel].source) {
+	switch (dsi->vc[vc].source) {
 	case DSI_VC_SOURCE_VP:
-		return dsi_sync_vc_vp(dsi, channel);
+		return dsi_sync_vc_vp(dsi, vc);
 	case DSI_VC_SOURCE_L4:
-		return dsi_sync_vc_l4(dsi, channel);
+		return dsi_sync_vc_l4(dsi, vc);
 	default:
 		BUG();
 		return -EINVAL;
 	}
 }
 
-static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
+static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable)
 {
-	DSSDBG("dsi_vc_enable channel %d, enable %d\n",
-			channel, enable);
+	DSSDBG("dsi_vc_enable vc %d, enable %d\n",
+			vc, enable);
 
 	enable = enable ? 1 : 0;
 
-	REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0);
 
-	if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
+	if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) {
 		DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
 		return -EIO;
 	}
@@ -2356,17 +1857,17 @@ static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
 	return 0;
 }
 
-static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
+static void dsi_vc_initial_config(struct dsi_data *dsi, int vc)
 {
 	u32 r;
 
-	DSSDBG("Initial config of virtual channel %d", channel);
+	DSSDBG("Initial config of VC %d", vc);
 
-	r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+	r = dsi_read_reg(dsi, DSI_VC_CTRL(vc));
 
 	if (FLD_GET(r, 15, 15)) /* VC_BUSY */
 		DSSERR("VC(%d) busy when trying to configure it!\n",
-				channel);
+				vc);
 
 	r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
 	r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN  */
@@ -2381,74 +1882,39 @@ static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
 	r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
 	r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
 
-	dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
-
-	dsi->vc[channel].source = DSI_VC_SOURCE_L4;
-}
-
-static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
-				enum dsi_vc_source source)
-{
-	if (dsi->vc[channel].source == source)
-		return 0;
-
-	DSSDBG("Source config of virtual channel %d", channel);
-
-	dsi_sync_vc(dsi, channel);
-
-	dsi_vc_enable(dsi, channel, 0);
-
-	/* VC_BUSY */
-	if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
-		DSSERR("vc(%d) busy when trying to config for VP\n", channel);
-		return -EIO;
-	}
-
-	/* SOURCE, 0 = L4, 1 = video port */
-	REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
-
-	/* DCS_CMD_ENABLE */
-	if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
-		bool enable = source == DSI_VC_SOURCE_VP;
-		REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
-	}
-
-	dsi_vc_enable(dsi, channel, 1);
-
-	dsi->vc[channel].source = source;
+	dsi_write_reg(dsi, DSI_VC_CTRL(vc), r);
 
-	return 0;
+	dsi->vc[vc].source = DSI_VC_SOURCE_L4;
 }
 
-static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
+static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc,
 		bool enable)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
 
-	DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+	DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable);
+
+	if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable)
+		return;
 
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
-	dsi_vc_enable(dsi, channel, 0);
+	dsi_vc_enable(dsi, vc, 0);
 	dsi_if_enable(dsi, 0);
 
-	REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9);
 
-	dsi_vc_enable(dsi, channel, 1);
+	dsi_vc_enable(dsi, vc, 1);
 	dsi_if_enable(dsi, 1);
 
 	dsi_force_tx_stop_mode_io(dsi);
-
-	/* start the DDR clock by sending a NULL packet */
-	if (dsi->vm_timings.ddr_clk_always_on && enable)
-		dsi_vc_send_null(dsi, channel);
 }
 
-static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
+static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc)
 {
-	while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+	while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
 		u32 val;
-		val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+		val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
 		DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
 				(val >> 0) & 0xff,
 				(val >> 8) & 0xff,
@@ -2494,13 +1960,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
 		DSSERR("\t\tDSI Protocol Violation\n");
 }
 
-static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
+static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc)
 {
 	/* RX_FIFO_NOT_EMPTY */
-	while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+	while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
 		u32 val;
 		u8 dt;
-		val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+		val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
 		DSSERR("\trawval %#08x\n", val);
 		dt = FLD_GET(val, 5, 0);
 		if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
@@ -2515,7 +1981,7 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
 		} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
 			DSSERR("\tDCS long response, len %d\n",
 					FLD_GET(val, 23, 8));
-			dsi_vc_flush_long_data(dsi, channel);
+			dsi_vc_flush_long_data(dsi, vc);
 		} else {
 			DSSERR("\tunknown datatype 0x%02x\n", dt);
 		}
@@ -2523,35 +1989,35 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
 	return 0;
 }
 
-static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
+static int dsi_vc_send_bta(struct dsi_data *dsi, int vc)
 {
 	if (dsi->debug_write || dsi->debug_read)
-		DSSDBG("dsi_vc_send_bta %d\n", channel);
+		DSSDBG("dsi_vc_send_bta %d\n", vc);
 
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
 	/* RX_FIFO_NOT_EMPTY */
-	if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+	if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
 		DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
-		dsi_vc_flush_receive_data(dsi, channel);
+		dsi_vc_flush_receive_data(dsi, vc);
 	}
 
-	REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */
 
 	/* flush posted write */
-	dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+	dsi_read_reg(dsi, DSI_VC_CTRL(vc));
 
 	return 0;
 }
 
-static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
 	DECLARE_COMPLETION_ONSTACK(completion);
 	int r = 0;
 	u32 err;
 
-	r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
+	r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler,
 			&completion, DSI_VC_IRQ_BTA);
 	if (r)
 		goto err0;
@@ -2561,7 +2027,7 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
 	if (r)
 		goto err1;
 
-	r = dsi_vc_send_bta(dsi, channel);
+	r = dsi_vc_send_bta(dsi, vc);
 	if (r)
 		goto err2;
 
@@ -2582,29 +2048,30 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
 	dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
 			DSI_IRQ_ERROR_MASK);
 err1:
-	dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
+	dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler,
 			&completion, DSI_VC_IRQ_BTA);
 err0:
 	return r;
 }
 
-static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
-					    u8 data_type, u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc,
+					    int channel, u8 data_type, u16 len,
+					    u8 ecc)
 {
 	u32 val;
 	u8 data_id;
 
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
-	data_id = data_type | dsi->vc[channel].vc_id << 6;
+	data_id = data_type | channel << 6;
 
 	val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
 		FLD_VAL(ecc, 31, 24);
 
-	dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
+	dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val);
 }
 
-static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
+static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc,
 					     u8 b1, u8 b2, u8 b3, u8 b4)
 {
 	u32 val;
@@ -2614,33 +2081,31 @@ static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
 /*	DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
 			b1, b2, b3, b4, val); */
 
-	dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+	dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val);
 }
 
-static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
-			    u8 *data, u16 len, u8 ecc)
+static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
+			    const struct mipi_dsi_msg *msg)
 {
 	/*u32 val; */
 	int i;
-	u8 *p;
+	const u8 *p;
 	int r = 0;
 	u8 b1, b2, b3, b4;
 
 	if (dsi->debug_write)
-		DSSDBG("dsi_vc_send_long, %d bytes\n", len);
+		DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len);
 
 	/* len + header */
-	if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
+	if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
 		DSSERR("unable to send long packet: packet too long.\n");
 		return -EINVAL;
 	}
 
-	dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
-
-	dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
+	dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0);
 
-	p = data;
-	for (i = 0; i < len >> 2; i++) {
+	p = msg->tx_buf;
+	for (i = 0; i < msg->tx_len >> 2; i++) {
 		if (dsi->debug_write)
 			DSSDBG("\tsending full packet %d\n", i);
 
@@ -2649,10 +2114,10 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
 		b3 = *p++;
 		b4 = *p++;
 
-		dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
+		dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4);
 	}
 
-	i = len % 4;
+	i = msg->tx_len % 4;
 	if (i) {
 		b1 = 0; b2 = 0; b3 = 0;
 
@@ -2674,194 +2139,89 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
 			break;
 		}
 
-		dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
+		dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0);
 	}
 
 	return r;
 }
 
-static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
-			     u16 data, u8 ecc)
+static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
+			     const struct mipi_dsi_msg *msg)
 {
+	struct mipi_dsi_packet pkt;
 	u32 r;
-	u8 data_id;
+
+	r = mipi_dsi_create_packet(&pkt, msg);
+	if (r < 0)
+		return r;
 
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
 	if (dsi->debug_write)
-		DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
-				channel,
-				data_type, data & 0xff, (data >> 8) & 0xff);
+		DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n",
+		       vc, msg->type, pkt.header[1], pkt.header[2]);
 
-	dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
-
-	if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
+	if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) {
 		DSSERR("ERROR FIFO FULL, aborting transfer\n");
 		return -EINVAL;
 	}
 
-	data_id = data_type | dsi->vc[channel].vc_id << 6;
-
-	r = (data_id << 0) | (data << 8) | (ecc << 24);
+	r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 |
+	    pkt.header[0];
 
-	dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
+	dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r);
 
 	return 0;
 }
 
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
-{
-	return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
-}
-
-static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
-				      u8 *data, int len,
-				      enum dss_dsi_content_type type)
-{
-	int r;
-
-	if (len == 0) {
-		BUG_ON(type == DSS_DSI_CONTENT_DCS);
-		r = dsi_vc_send_short(dsi, channel,
-				MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
-	} else if (len == 1) {
-		r = dsi_vc_send_short(dsi, channel,
-				type == DSS_DSI_CONTENT_GENERIC ?
-				MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
-				MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
-	} else if (len == 2) {
-		r = dsi_vc_send_short(dsi, channel,
-				type == DSS_DSI_CONTENT_GENERIC ?
-				MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
-				MIPI_DSI_DCS_SHORT_WRITE_PARAM,
-				data[0] | (data[1] << 8), 0);
-	} else {
-		r = dsi_vc_send_long(dsi, channel,
-				type == DSS_DSI_CONTENT_GENERIC ?
-				MIPI_DSI_GENERIC_LONG_WRITE :
-				MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
-	}
-
-	return r;
-}
-
-static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
-		u8 *data, int len)
-{
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
-	return dsi_vc_write_nosync_common(dsi, channel, data, len,
-			DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
-		u8 *data, int len)
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
+	const struct mipi_dsi_msg msg = {
+		.channel = channel,
+		.type = MIPI_DSI_NULL_PACKET,
+	};
 
-	return dsi_vc_write_nosync_common(dsi, channel, data, len,
-			DSS_DSI_CONTENT_GENERIC);
+	return dsi_vc_send_long(dsi, vc, &msg);
 }
 
-static int dsi_vc_write_common(struct omap_dss_device *dssdev,
-			       int channel, u8 *data, int len,
-			       enum dss_dsi_content_type type)
+static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc,
+			       const struct mipi_dsi_msg *msg)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
 	int r;
 
-	r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
-	if (r)
-		goto err;
-
-	r = dsi_vc_send_bta_sync(dssdev, channel);
-	if (r)
-		goto err;
-
-	/* RX_FIFO_NOT_EMPTY */
-	if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
-		DSSERR("rx fifo not empty after write, dumping data:\n");
-		dsi_vc_flush_receive_data(dsi, channel);
-		r = -EIO;
-		goto err;
-	}
-
-	return 0;
-err:
-	DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
-			channel, data[0], len);
-	return r;
-}
-
-static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
-		int len)
-{
-	return dsi_vc_write_common(dssdev, channel, data, len,
-			DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
-		int len)
-{
-	return dsi_vc_write_common(dssdev, channel, data, len,
-			DSS_DSI_CONTENT_GENERIC);
-}
+	if (mipi_dsi_packet_format_is_short(msg->type))
+		r = dsi_vc_send_short(dsi, vc, msg);
+	else
+		r = dsi_vc_send_long(dsi, vc, msg);
 
-static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
-					u8 dcs_cmd)
-{
-	int r;
+	if (r < 0)
+		return r;
 
-	if (dsi->debug_read)
-		DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
-			channel, dcs_cmd);
+	/*
+	 * TODO: we do not always have to do the BTA sync, for example
+	 * we can improve performance by setting the update window
+	 * information without sending BTA sync between the commands.
+	 * In that case we can return early.
+	 */
 
-	r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+	r = dsi_vc_send_bta_sync(dssdev, vc);
 	if (r) {
-		DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
-			" failed\n", channel, dcs_cmd);
+		DSSERR("bta sync failed\n");
 		return r;
 	}
 
-	return 0;
-}
-
-static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
-					    u8 *reqdata, int reqlen)
-{
-	u16 data;
-	u8 data_type;
-	int r;
-
-	if (dsi->debug_read)
-		DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
-			channel, reqlen);
-
-	if (reqlen == 0) {
-		data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
-		data = 0;
-	} else if (reqlen == 1) {
-		data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
-		data = reqdata[0];
-	} else if (reqlen == 2) {
-		data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
-		data = reqdata[0] | (reqdata[1] << 8);
-	} else {
-		BUG();
-		return -EINVAL;
-	}
-
-	r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
-	if (r) {
-		DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
-			" failed\n", channel, reqlen);
-		return r;
+	/* RX_FIFO_NOT_EMPTY */
+	if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
+		DSSERR("rx fifo not empty after write, dumping data:\n");
+		dsi_vc_flush_receive_data(dsi, vc);
+		return -EIO;
 	}
 
 	return 0;
 }
 
-static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
+static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf,
 			       int buflen, enum dss_dsi_content_type type)
 {
 	u32 val;
@@ -2869,13 +2229,13 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
 	int r;
 
 	/* RX_FIFO_NOT_EMPTY */
-	if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
+	if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) {
 		DSSERR("RX fifo empty when trying to read.\n");
 		r = -EIO;
 		goto err;
 	}
 
-	val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+	val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
 	if (dsi->debug_read)
 		DSSDBG("\theader: %08x\n", val);
 	dt = FLD_GET(val, 5, 0);
@@ -2939,7 +2299,7 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
 		for (w = 0; w < len + 2;) {
 			int b;
 			val = dsi_read_reg(dsi,
-				DSI_VC_SHORT_PACKET_HEADER(channel));
+				DSI_VC_SHORT_PACKET_HEADER(vc));
 			if (dsi->debug_read)
 				DSSDBG("\t\t%02x %02x %02x %02x\n",
 						(val >> 0) & 0xff,
@@ -2963,168 +2323,73 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
 	}
 
 err:
-	DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
+	DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc,
 		type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
 
 	return r;
 }
 
-static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
-		u8 *buf, int buflen)
+static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc,
+			   const struct mipi_dsi_msg *msg)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
+	u8 cmd = ((u8 *)msg->tx_buf)[0];
 	int r;
 
-	r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
+	if (dsi->debug_read)
+		DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd);
+
+	r = dsi_vc_send_short(dsi, vc, msg);
 	if (r)
 		goto err;
 
-	r = dsi_vc_send_bta_sync(dssdev, channel);
+	r = dsi_vc_send_bta_sync(dssdev, vc);
 	if (r)
 		goto err;
 
-	r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+	r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
 		DSS_DSI_CONTENT_DCS);
 	if (r < 0)
 		goto err;
 
-	if (r != buflen) {
+	if (r != msg->rx_len) {
 		r = -EIO;
 		goto err;
 	}
 
 	return 0;
 err:
-	DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
+	DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__,  vc, cmd);
 	return r;
 }
 
-static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
-		u8 *reqdata, int reqlen, u8 *buf, int buflen)
+static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
+			       const struct mipi_dsi_msg *msg)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
 	int r;
 
-	r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
+	r = dsi_vc_send_short(dsi, vc, msg);
 	if (r)
-		return r;
+		goto err;
 
-	r = dsi_vc_send_bta_sync(dssdev, channel);
+	r = dsi_vc_send_bta_sync(dssdev, vc);
 	if (r)
-		return r;
+		goto err;
 
-	r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+	r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
 		DSS_DSI_CONTENT_GENERIC);
 	if (r < 0)
-		return r;
-
-	if (r != buflen) {
-		r = -EIO;
-		return r;
-	}
-
-	return 0;
-}
-
-static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
-		u16 len)
-{
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
-	return dsi_vc_send_short(dsi, channel,
-			MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
-}
-
-static int dsi_enter_ulps(struct dsi_data *dsi)
-{
-	DECLARE_COMPLETION_ONSTACK(completion);
-	int r, i;
-	unsigned int mask;
-
-	DSSDBG("Entering ULPS");
-
-	WARN_ON(!dsi_bus_is_locked(dsi));
-
-	WARN_ON(dsi->ulps_enabled);
-
-	if (dsi->ulps_enabled)
-		return 0;
-
-	/* DDR_CLK_ALWAYS_ON */
-	if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
-		dsi_if_enable(dsi, 0);
-		REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
-		dsi_if_enable(dsi, 1);
-	}
-
-	dsi_sync_vc(dsi, 0);
-	dsi_sync_vc(dsi, 1);
-	dsi_sync_vc(dsi, 2);
-	dsi_sync_vc(dsi, 3);
-
-	dsi_force_tx_stop_mode_io(dsi);
-
-	dsi_vc_enable(dsi, 0, false);
-	dsi_vc_enable(dsi, 1, false);
-	dsi_vc_enable(dsi, 2, false);
-	dsi_vc_enable(dsi, 3, false);
-
-	if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) {	/* HS_BUSY */
-		DSSERR("HS busy when enabling ULPS\n");
-		return -EIO;
-	}
-
-	if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) {	/* LP_BUSY */
-		DSSERR("LP busy when enabling ULPS\n");
-		return -EIO;
-	}
-
-	r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
-			DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
-	if (r)
-		return r;
-
-	mask = 0;
-
-	for (i = 0; i < dsi->num_lanes_supported; ++i) {
-		if (dsi->lanes[i].function == DSI_LANE_UNUSED)
-			continue;
-		mask |= 1 << i;
-	}
-	/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
-	/* LANEx_ULPS_SIG2 */
-	REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
-
-	/* flush posted write and wait for SCP interface to finish the write */
-	dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
+		goto err;
 
-	if (wait_for_completion_timeout(&completion,
-				msecs_to_jiffies(1000)) == 0) {
-		DSSERR("ULPS enable timeout\n");
+	if (r != msg->rx_len) {
 		r = -EIO;
 		goto err;
 	}
 
-	dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
-			DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
-
-	/* Reset LANEx_ULPS_SIG2 */
-	REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
-
-	/* flush posted write and wait for SCP interface to finish the write */
-	dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
-
-	dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
-
-	dsi_if_enable(dsi, false);
-
-	dsi->ulps_enabled = true;
-
 	return 0;
-
 err:
-	dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
-			DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+	DSSERR("%s(vc %d, reqlen %d) failed\n", __func__,  vc, msg->tx_len);
 	return r;
 }
 
@@ -3241,7 +2506,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
 	int num_line_buffers;
 
 	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
-		int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+		int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
 		const struct videomode *vm = &dsi->vm;
 		/*
 		 * Don't use line buffers if width is greater than the video
@@ -3372,7 +2637,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
 	int tclk_trail, ths_exit, exiths_clk;
 	bool ddr_alwon;
 	const struct videomode *vm = &dsi->vm;
-	int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+	int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
 	int ndl = dsi->num_lanes_used - 1;
 	int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
 	int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
@@ -3500,7 +2765,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
 	dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
 	dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
 
-	switch (dsi_get_pixel_size(dsi->pix_fmt)) {
+	switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) {
 	case 16:
 		buswidth = 0;
 		break;
@@ -3621,7 +2886,7 @@ static void dsi_proto_timings(struct dsi_data *dsi)
 		int window_sync = dsi->vm_timings.window_sync;
 		bool hsync_end;
 		const struct videomode *vm = &dsi->vm;
-		int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+		int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
 		int tl, t_he, width_bytes;
 
 		hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
@@ -3659,12 +2924,9 @@ static void dsi_proto_timings(struct dsi_data *dsi)
 	}
 }
 
-static int dsi_configure_pins(struct omap_dss_device *dssdev,
-		const struct omap_dsi_pin_config *pin_cfg)
+static int dsi_configure_pins(struct dsi_data *dsi,
+		int num_pins, const u32 *pins)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-	int num_pins;
-	const int *pins;
 	struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
 	int num_lanes;
 	int i;
@@ -3677,9 +2939,6 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
 		DSI_LANE_DATA4,
 	};
 
-	num_pins = pin_cfg->num_pins;
-	pins = pin_cfg->pins;
-
 	if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
 			|| num_pins % 2 != 0)
 		return -EINVAL;
@@ -3691,15 +2950,15 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
 
 	for (i = 0; i < num_pins; i += 2) {
 		u8 lane, pol;
-		int dx, dy;
+		u32 dx, dy;
 
 		dx = pins[i];
 		dy = pins[i + 1];
 
-		if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
+		if (dx >= dsi->num_lanes_supported * 2)
 			return -EINVAL;
 
-		if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
+		if (dy >= dsi->num_lanes_supported * 2)
 			return -EINVAL;
 
 		if (dx & 1) {
@@ -3725,86 +2984,102 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
 	return 0;
 }
 
-static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
+static int dsi_enable_video_mode(struct dsi_data *dsi, int vc)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-	int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+	int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
 	u8 data_type;
 	u16 word_count;
-	int r;
 
-	r = dsi_display_init_dispc(dsi);
-	if (r)
-		return r;
+	switch (dsi->pix_fmt) {
+	case MIPI_DSI_FMT_RGB888:
+		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+		break;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+		break;
+	default:
+		return -EINVAL;
+	}
 
-	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
-		switch (dsi->pix_fmt) {
-		case OMAP_DSS_DSI_FMT_RGB888:
-			data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
-			break;
-		case OMAP_DSS_DSI_FMT_RGB666:
-			data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
-			break;
-		case OMAP_DSS_DSI_FMT_RGB666_PACKED:
-			data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
-			break;
-		case OMAP_DSS_DSI_FMT_RGB565:
-			data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
-			break;
-		default:
-			r = -EINVAL;
-			goto err_pix_fmt;
-		}
+	dsi_if_enable(dsi, false);
+	dsi_vc_enable(dsi, vc, false);
 
-		dsi_if_enable(dsi, false);
-		dsi_vc_enable(dsi, channel, false);
+	/* MODE, 1 = video mode */
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4);
+
+	word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
+
+	dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type,
+			word_count, 0);
+
+	dsi_vc_enable(dsi, vc, true);
+	dsi_if_enable(dsi, true);
+
+	return 0;
+}
+
+static void dsi_disable_video_mode(struct dsi_data *dsi, int vc)
+{
+	dsi_if_enable(dsi, false);
+	dsi_vc_enable(dsi, vc, false);
 
-		/* MODE, 1 = video mode */
-		REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
+	/* MODE, 0 = command mode */
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4);
 
-		word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
+	dsi_vc_enable(dsi, vc, true);
+	dsi_if_enable(dsi, true);
+}
 
-		dsi_vc_write_long_header(dsi, channel, data_type,
-				word_count, 0);
+static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc)
+{
+	struct dsi_data *dsi = to_dsi_data(dssdev);
+	int r;
 
-		dsi_vc_enable(dsi, channel, true);
-		dsi_if_enable(dsi, true);
+	r = dsi_init_dispc(dsi);
+	if (r) {
+		dev_err(dsi->dev, "failed to init dispc!\n");
+		return;
+	}
+
+	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+		r = dsi_enable_video_mode(dsi, vc);
+		if (r)
+			goto err_video_mode;
 	}
 
 	r = dss_mgr_enable(&dsi->output);
 	if (r)
 		goto err_mgr_enable;
 
-	return 0;
+	return;
 
 err_mgr_enable:
 	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
 		dsi_if_enable(dsi, false);
-		dsi_vc_enable(dsi, channel, false);
+		dsi_vc_enable(dsi, vc, false);
 	}
-err_pix_fmt:
-	dsi_display_uninit_dispc(dsi);
-	return r;
+err_video_mode:
+	dsi_uninit_dispc(dsi);
+	dev_err(dsi->dev, "failed to enable DSI encoder!\n");
+	return;
 }
 
-static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
+static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
 
-	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
-		dsi_if_enable(dsi, false);
-		dsi_vc_enable(dsi, channel, false);
-
-		/* MODE, 0 = command mode */
-		REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
-
-		dsi_vc_enable(dsi, channel, true);
-		dsi_if_enable(dsi, true);
-	}
+	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+		dsi_disable_video_mode(dsi, vc);
 
 	dss_mgr_disable(&dsi->output);
 
-	dsi_display_uninit_dispc(dsi);
+	dsi_uninit_dispc(dsi);
 }
 
 static void dsi_update_screen_dispc(struct dsi_data *dsi)
@@ -3817,16 +3092,14 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
 	unsigned int packet_len;
 	u32 l;
 	int r;
-	const unsigned channel = dsi->update_channel;
+	const unsigned vc = dsi->update_vc;
 	const unsigned int line_buf_size = dsi->line_buffer_size;
 	u16 w = dsi->vm.hactive;
 	u16 h = dsi->vm.vactive;
 
 	DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
 
-	dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
-
-	bytespp	= dsi_get_pixel_size(dsi->pix_fmt) / 8;
+	bytespp	= mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
 	bytespl = w * bytespp;
 	bytespf = bytespl * h;
 
@@ -3845,16 +3118,16 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
 		total_len += (bytespf % packet_payload) + 1;
 
 	l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
-	dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+	dsi_write_reg(dsi, DSI_VC_TE(vc), l);
 
-	dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
+	dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE,
 		packet_len, 0);
 
 	if (dsi->te_enabled)
 		l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
 	else
 		l = FLD_MOD(l, 1, 31, 31); /* TE_START */
-	dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+	dsi_write_reg(dsi, DSI_VC_TE(vc), l);
 
 	/* We put SIDLEMODE to no-idle for the duration of the transfer,
 	 * because DSS interrupts are not capable of waking up the CPU and the
@@ -3877,7 +3150,7 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
 		 * for TE is longer than the timer allows */
 		REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
 
-		dsi_vc_send_bta(dsi, channel);
+		dsi_vc_send_bta(dsi, vc);
 
 #ifdef DSI_CATCH_MISSING_TE
 		mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
@@ -3902,7 +3175,7 @@ static void dsi_handle_framedone(struct dsi_data *dsi, int error)
 		REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
 	}
 
-	dsi->framedone_callback(error, dsi->framedone_data);
+	dsi_bus_unlock(dsi);
 
 	if (!error)
 		dsi_perf_show(dsi, "DISPC");
@@ -3935,30 +3208,91 @@ static void dsi_framedone_irq_callback(void *data)
 
 	cancel_delayed_work(&dsi->framedone_timeout_work);
 
+	DSSDBG("Framedone received!\n");
+
 	dsi_handle_framedone(dsi, 0);
 }
 
-static int dsi_update(struct omap_dss_device *dssdev, int channel,
-		void (*callback)(int, void *), void *data)
+static int _dsi_update(struct dsi_data *dsi)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
 	dsi_perf_mark_setup(dsi);
 
-	dsi->update_channel = channel;
-
-	dsi->framedone_callback = callback;
-	dsi->framedone_data = data;
-
 #ifdef DSI_PERF_MEASURE
 	dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive *
-		dsi_get_pixel_size(dsi->pix_fmt) / 8;
+		mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
 #endif
 	dsi_update_screen_dispc(dsi);
 
 	return 0;
 }
 
+static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel)
+{
+	const u8 payload[] = { MIPI_DCS_NOP };
+	const struct mipi_dsi_msg msg = {
+		.channel = channel,
+		.type = MIPI_DSI_DCS_SHORT_WRITE,
+		.tx_len = 1,
+		.tx_buf = payload,
+	};
+
+	WARN_ON(!dsi_bus_is_locked(dsi));
+
+	return _omap_dsi_host_transfer(dsi, vc, &msg);
+}
+
+static int dsi_update_channel(struct omap_dss_device *dssdev, int vc)
+{
+	struct dsi_data *dsi = to_dsi_data(dssdev);
+	int r;
+
+	dsi_bus_lock(dsi);
+
+	if (!dsi->video_enabled) {
+		r = -EIO;
+		goto err;
+	}
+
+	if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) {
+		r = -EINVAL;
+		goto err;
+	}
+
+	DSSDBG("dsi_update_channel: %d", vc);
+
+	/*
+	 * Send NOP between the frames. If we don't send something here, the
+	 * updates stop working. This is probably related to DSI spec stating
+	 * that the DSI host should transition to LP at least once per frame.
+	 */
+	r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel);
+	if (r < 0) {
+		DSSWARN("failed to send nop between frames: %d\n", r);
+		goto err;
+	}
+
+	dsi->update_vc = vc;
+
+	if (dsi->te_enabled && dsi->te_gpio) {
+		schedule_delayed_work(&dsi->te_timeout_work,
+				      msecs_to_jiffies(250));
+		atomic_set(&dsi->do_ext_te_update, 1);
+	} else {
+		_dsi_update(dsi);
+	}
+
+	return 0;
+
+err:
+	dsi_bus_unlock(dsi);
+	return r;
+}
+
+static int dsi_update_all(struct omap_dss_device *dssdev)
+{
+	return dsi_update_channel(dssdev, VC_VIDEO);
+}
+
 /* Display funcs */
 
 static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
@@ -3983,12 +3317,12 @@ static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
 	return 0;
 }
 
-static int dsi_display_init_dispc(struct dsi_data *dsi)
+static int dsi_init_dispc(struct dsi_data *dsi)
 {
-	enum omap_channel channel = dsi->output.dispc_channel;
+	enum omap_channel dispc_channel = dsi->output.dispc_channel;
 	int r;
 
-	dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
+	dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ?
 			DSS_CLK_SRC_PLL1_1 :
 			DSS_CLK_SRC_PLL2_1);
 
@@ -4013,7 +3347,7 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
 
 	dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
 	dsi->mgr_config.video_port_width =
-			dsi_get_pixel_size(dsi->pix_fmt);
+			mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
 	dsi->mgr_config.lcden_sig_polarity = 0;
 
 	dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
@@ -4024,19 +3358,19 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
 		dss_mgr_unregister_framedone_handler(&dsi->output,
 				dsi_framedone_irq_callback, dsi);
 err:
-	dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+	dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
 	return r;
 }
 
-static void dsi_display_uninit_dispc(struct dsi_data *dsi)
+static void dsi_uninit_dispc(struct dsi_data *dsi)
 {
-	enum omap_channel channel = dsi->output.dispc_channel;
+	enum omap_channel dispc_channel = dsi->output.dispc_channel;
 
 	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
 		dss_mgr_unregister_framedone_handler(&dsi->output,
 				dsi_framedone_irq_callback, dsi);
 
-	dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+	dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
 }
 
 static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
@@ -4055,7 +3389,37 @@ static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
 	return 0;
 }
 
-static int dsi_display_init_dsi(struct dsi_data *dsi)
+static void dsi_setup_dsi_vcs(struct dsi_data *dsi)
+{
+	/* Setup VC_CMD for LP and cpu transfers */
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */
+
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */
+	dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4;
+
+	/* Setup VC_VIDEO for HS and dispc transfers */
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */
+
+	REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */
+	dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP;
+
+	if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) &&
+	    !(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO))
+		REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */
+
+	dsi_vc_enable(dsi, VC_CMD, 1);
+	dsi_vc_enable(dsi, VC_VIDEO, 1);
+
+	dsi_if_enable(dsi, 1);
+
+	dsi_force_tx_stop_mode_io(dsi);
+
+	/* start the DDR clock by sending a NULL packet */
+	if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+		dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel);
+}
+
+static int dsi_init_dsi(struct dsi_data *dsi)
 {
 	int r;
 
@@ -4097,13 +3461,7 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
 	if (r)
 		goto err3;
 
-	/* enable interface */
-	dsi_vc_enable(dsi, 0, 1);
-	dsi_vc_enable(dsi, 1, 1);
-	dsi_vc_enable(dsi, 2, 1);
-	dsi_vc_enable(dsi, 3, 1);
-	dsi_if_enable(dsi, 1);
-	dsi_force_tx_stop_mode_io(dsi);
+	dsi_setup_dsi_vcs(dsi);
 
 	return 0;
 err3:
@@ -4119,12 +3477,8 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
 	return r;
 }
 
-static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
-				   bool enter_ulps)
+static void dsi_uninit_dsi(struct dsi_data *dsi)
 {
-	if (enter_ulps && !dsi->ulps_enabled)
-		dsi_enter_ulps(dsi);
-
 	/* disable interface */
 	dsi_if_enable(dsi, 0);
 	dsi_vc_enable(dsi, 0, 0);
@@ -4136,21 +3490,19 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
 	dsi_cio_uninit(dsi);
 	dss_pll_disable(&dsi->pll);
 
-	if (disconnect_lanes) {
-		regulator_disable(dsi->vdds_dsi_reg);
-		dsi->vdds_dsi_enabled = false;
-	}
+	regulator_disable(dsi->vdds_dsi_reg);
+	dsi->vdds_dsi_enabled = false;
 }
 
-static void dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_enable(struct dsi_data *dsi)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
 	int r;
 
-	DSSDBG("dsi_display_enable\n");
-
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
+	if (WARN_ON(dsi->iface_enabled))
+		return;
+
 	mutex_lock(&dsi->lock);
 
 	r = dsi_runtime_get(dsi);
@@ -4159,10 +3511,12 @@ static void dsi_display_enable(struct omap_dss_device *dssdev)
 
 	_dsi_initialize_irq(dsi);
 
-	r = dsi_display_init_dsi(dsi);
+	r = dsi_init_dsi(dsi);
 	if (r)
 		goto err_init_dsi;
 
+	dsi->iface_enabled = true;
+
 	mutex_unlock(&dsi->lock);
 
 	return;
@@ -4171,18 +3525,16 @@ static void dsi_display_enable(struct omap_dss_device *dssdev)
 	dsi_runtime_put(dsi);
 err_get_dsi:
 	mutex_unlock(&dsi->lock);
-	DSSDBG("dsi_display_enable FAILED\n");
+	DSSDBG("dsi_enable FAILED\n");
 }
 
-static void dsi_display_disable(struct omap_dss_device *dssdev,
-		bool disconnect_lanes, bool enter_ulps)
+static void dsi_disable(struct dsi_data *dsi)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
-	DSSDBG("dsi_display_disable\n");
-
 	WARN_ON(!dsi_bus_is_locked(dsi));
 
+	if (WARN_ON(!dsi->iface_enabled))
+		return;
+
 	mutex_lock(&dsi->lock);
 
 	dsi_sync_vc(dsi, 0);
@@ -4190,18 +3542,26 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
 	dsi_sync_vc(dsi, 2);
 	dsi_sync_vc(dsi, 3);
 
-	dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
+	dsi_uninit_dsi(dsi);
 
 	dsi_runtime_put(dsi);
 
+	dsi->iface_enabled = false;
+
 	mutex_unlock(&dsi->lock);
 }
 
-static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
+static int dsi_enable_te(struct dsi_data *dsi, bool enable)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
 	dsi->te_enabled = enable;
+
+	if (dsi->te_gpio) {
+		if (enable)
+			enable_irq(dsi->te_irq);
+		else
+			disable_irq(dsi->te_irq);
+	}
+
 	return 0;
 }
 
@@ -4351,7 +3711,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
 	unsigned long pck, txbyteclk;
 
 	clkin = clk_get_rate(dsi->pll.clkin);
-	bitspp = dsi_get_pixel_size(cfg->pixel_format);
+	bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
 	ndl = dsi->num_lanes_used - 1;
 
 	/*
@@ -4384,7 +3744,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
 {
 	struct dsi_data *dsi = ctx->dsi;
 	const struct omap_dss_dsi_config *cfg = ctx->config;
-	int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+	int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
 	int ndl = dsi->num_lanes_used - 1;
 	unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
 	unsigned long byteclk = hsclk / 4;
@@ -4531,7 +3891,6 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
 	dsi_vm->hfp_blanking_mode = 1;
 	dsi_vm->hbp_blanking_mode = 1;
 
-	dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
 	dsi_vm->window_sync = 4;
 
 	/* setup DISPC videomode */
@@ -4651,7 +4010,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
 	unsigned long pll_min;
 	unsigned long pll_max;
 	int ndl = dsi->num_lanes_used - 1;
-	int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+	int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
 	unsigned long byteclk_min;
 
 	clkin = clk_get_rate(dsi->pll.clkin);
@@ -4684,39 +4043,62 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
 			dsi_vm_calc_pll_cb, ctx);
 }
 
-static int dsi_set_config(struct omap_dss_device *dssdev,
-		const struct omap_dss_dsi_config *config)
+static bool dsi_is_video_mode(struct omap_dss_device *dssdev)
 {
 	struct dsi_data *dsi = to_dsi_data(dssdev);
-	struct dsi_clk_calc_ctx ctx;
+
+	return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE;
+}
+
+static int __dsi_calc_config(struct dsi_data *dsi,
+		const struct drm_display_mode *mode,
+		struct dsi_clk_calc_ctx *ctx)
+{
+	struct omap_dss_dsi_config cfg = dsi->config;
+	struct videomode vm;
 	bool ok;
 	int r;
 
-	mutex_lock(&dsi->lock);
+	drm_display_mode_to_videomode(mode, &vm);
 
-	dsi->pix_fmt = config->pixel_format;
-	dsi->mode = config->mode;
+	cfg.vm = &vm;
+	cfg.mode = dsi->mode;
+	cfg.pixel_format = dsi->pix_fmt;
 
-	if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
-		ok = dsi_vm_calc(dsi, config, &ctx);
+	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+		ok = dsi_vm_calc(dsi, &cfg, ctx);
 	else
-		ok = dsi_cm_calc(dsi, config, &ctx);
+		ok = dsi_cm_calc(dsi, &cfg, ctx);
 
-	if (!ok) {
-		DSSERR("failed to find suitable DSI clock settings\n");
-		r = -EINVAL;
-		goto err;
-	}
+	if (!ok)
+		return -EINVAL;
+
+	dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo);
+
+	r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI],
+		cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo);
+	if (r)
+		return r;
 
-	dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo);
+	return 0;
+}
 
-	r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
-		config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
+static int dsi_set_config(struct omap_dss_device *dssdev,
+		const struct drm_display_mode *mode)
+{
+	struct dsi_data *dsi = to_dsi_data(dssdev);
+	struct dsi_clk_calc_ctx ctx;
+	int r;
+
+	mutex_lock(&dsi->lock);
+
+	r = __dsi_calc_config(dsi, mode, &ctx);
 	if (r) {
-		DSSERR("failed to find suitable DSI LP clock settings\n");
+		DSSERR("failed to find suitable DSI clock settings\n");
 		goto err;
 	}
 
+	dsi->user_lp_cinfo = ctx.lp_cinfo;
 	dsi->user_dsi_cinfo = ctx.dsi_cinfo;
 	dsi->user_dispc_cinfo = ctx.dispc_cinfo;
 
@@ -4757,12 +4139,12 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
 }
 
 /*
- * Return a hardcoded channel for the DSI output. This should work for
+ * Return a hardcoded dispc channel for the DSI output. This should work for
  * current use cases, but this can be later expanded to either resolve
  * the channel in some more dynamic manner, or get the channel as a user
  * parameter.
  */
-static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
+static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi)
 {
 	switch (dsi->data->model) {
 	case DSI_MODEL_OMAP3:
@@ -4796,59 +4178,75 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
 	}
 }
 
-static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+				       const struct mipi_dsi_msg *msg)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-	int i;
+	struct omap_dss_device *dssdev = &dsi->output;
+	int r;
 
-	for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
-		if (!dsi->vc[i].dssdev) {
-			dsi->vc[i].dssdev = dssdev;
-			*channel = i;
-			return 0;
-		}
+	dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM));
+
+	switch (msg->type) {
+	case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+	case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+	case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+	case MIPI_DSI_GENERIC_LONG_WRITE:
+	case MIPI_DSI_DCS_SHORT_WRITE:
+	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+	case MIPI_DSI_DCS_LONG_WRITE:
+	case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+	case MIPI_DSI_NULL_PACKET:
+		r = dsi_vc_write_common(dssdev, vc, msg);
+		break;
+	case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+	case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+	case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+		r = dsi_vc_generic_read(dssdev, vc, msg);
+		break;
+	case MIPI_DSI_DCS_READ:
+		r = dsi_vc_dcs_read(dssdev, vc, msg);
+		break;
+	default:
+		r = -EINVAL;
+		break;
 	}
 
-	DSSERR("cannot get VC for display %s", dssdev->name);
-	return -ENOSPC;
-}
-
-static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
-{
-	struct dsi_data *dsi = to_dsi_data(dssdev);
-
-	if (vc_id < 0 || vc_id > 3) {
-		DSSERR("VC ID out of range\n");
-		return -EINVAL;
-	}
+	if (r < 0)
+		return r;
 
-	if (channel < 0 || channel > 3) {
-		DSSERR("Virtual Channel out of range\n");
-		return -EINVAL;
-	}
+	if (msg->type == MIPI_DSI_DCS_SHORT_WRITE ||
+	    msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) {
+		u8 cmd = ((u8 *)msg->tx_buf)[0];
 
-	if (dsi->vc[channel].dssdev != dssdev) {
-		DSSERR("Virtual Channel not allocated to display %s\n",
-			dssdev->name);
-		return -EINVAL;
+		if (cmd == MIPI_DCS_SET_TEAR_OFF)
+			dsi_enable_te(dsi, false);
+		else if (cmd == MIPI_DCS_SET_TEAR_ON)
+			dsi_enable_te(dsi, true);
 	}
 
-	dsi->vc[channel].vc_id = vc_id;
-
 	return 0;
 }
 
-static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host,
+				      const struct mipi_dsi_msg *msg)
 {
-	struct dsi_data *dsi = to_dsi_data(dssdev);
+	struct dsi_data *dsi = host_to_omap(host);
+	int r;
+	int vc = VC_CMD;
+
+	dsi_bus_lock(dsi);
 
-	if ((channel >= 0 && channel <= 3) &&
-		dsi->vc[channel].dssdev == dssdev) {
-		dsi->vc[channel].dssdev = NULL;
-		dsi->vc[channel].vc_id = 0;
+	if (!dsi->iface_enabled) {
+		dsi_enable(dsi);
+		schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000));
 	}
-}
 
+	r = _omap_dsi_host_transfer(dsi, vc, msg);
+
+	dsi_bus_unlock(dsi);
+
+	return r;
+}
 
 static int dsi_get_clocks(struct dsi_data *dsi)
 {
@@ -4865,57 +4263,167 @@ static int dsi_get_clocks(struct dsi_data *dsi)
 	return 0;
 }
 
-static int dsi_connect(struct omap_dss_device *src,
-		       struct omap_dss_device *dst)
+static const struct omapdss_dsi_ops dsi_ops = {
+	.update = dsi_update_all,
+	.is_video_mode = dsi_is_video_mode,
+};
+
+static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id)
 {
-	return omapdss_device_connect(dst->dss, dst, dst->next);
+	struct dsi_data *dsi = (struct dsi_data *)dev_id;
+	int old;
+
+	old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+	if (old) {
+		cancel_delayed_work(&dsi->te_timeout_work);
+		_dsi_update(dsi);
+	}
+
+	return IRQ_HANDLED;
 }
 
-static void dsi_disconnect(struct omap_dss_device *src,
-			   struct omap_dss_device *dst)
+static void omap_dsi_te_timeout_work_callback(struct work_struct *work)
 {
-	omapdss_device_disconnect(dst, dst->next);
+	struct dsi_data *dsi =
+		container_of(work, struct dsi_data, te_timeout_work.work);
+	int old;
+
+	old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+	if (old) {
+		dev_err(dsi->dev, "TE not received for 250ms!\n");
+		_dsi_update(dsi);
+	}
 }
 
-static const struct omap_dss_device_ops dsi_ops = {
-	.connect = dsi_connect,
-	.disconnect = dsi_disconnect,
-	.enable = dsi_display_enable,
+static int omap_dsi_register_te_irq(struct dsi_data *dsi,
+				    struct mipi_dsi_device *client)
+{
+	int err;
+	int te_irq;
 
-	.dsi = {
-		.bus_lock = dsi_bus_lock,
-		.bus_unlock = dsi_bus_unlock,
+	dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN);
+	if (IS_ERR(dsi->te_gpio)) {
+		err = PTR_ERR(dsi->te_gpio);
 
-		.disable = dsi_display_disable,
+		if (err == -ENOENT) {
+			dsi->te_gpio = NULL;
+			return 0;
+		}
 
-		.enable_hs = dsi_vc_enable_hs,
+		dev_err(dsi->dev, "Could not get TE gpio: %d\n", err);
+		return err;
+	}
 
-		.configure_pins = dsi_configure_pins,
-		.set_config = dsi_set_config,
+	te_irq = gpiod_to_irq(dsi->te_gpio);
+	if (te_irq < 0) {
+		gpiod_put(dsi->te_gpio);
+		dsi->te_gpio = NULL;
+		return -EINVAL;
+	}
 
-		.enable_video_output = dsi_enable_video_output,
-		.disable_video_output = dsi_disable_video_output,
+	dsi->te_irq = te_irq;
 
-		.update = dsi_update,
+	irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
 
-		.enable_te = dsi_enable_te,
+	err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
+				   IRQF_TRIGGER_RISING, "TE", dsi);
+	if (err) {
+		dev_err(dsi->dev, "request irq failed with %d\n", err);
+		gpiod_put(dsi->te_gpio);
+		dsi->te_gpio = NULL;
+		return err;
+	}
 
-		.request_vc = dsi_request_vc,
-		.set_vc_id = dsi_set_vc_id,
-		.release_vc = dsi_release_vc,
+	INIT_DEFERRABLE_WORK(&dsi->te_timeout_work,
+			     omap_dsi_te_timeout_work_callback);
 
-		.dcs_write = dsi_vc_dcs_write,
-		.dcs_write_nosync = dsi_vc_dcs_write_nosync,
-		.dcs_read = dsi_vc_dcs_read,
+	dev_dbg(dsi->dev, "Using GPIO TE\n");
 
-		.gen_write = dsi_vc_generic_write,
-		.gen_write_nosync = dsi_vc_generic_write_nosync,
-		.gen_read = dsi_vc_generic_read,
+	return 0;
+}
 
-		.bta_sync = dsi_vc_send_bta_sync,
+static void omap_dsi_unregister_te_irq(struct dsi_data *dsi)
+{
+	if (dsi->te_gpio) {
+		free_irq(dsi->te_irq, dsi);
+		cancel_delayed_work(&dsi->te_timeout_work);
+		gpiod_put(dsi->te_gpio);
+		dsi->te_gpio = NULL;
+	}
+}
 
-		.set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
-	},
+static int omap_dsi_host_attach(struct mipi_dsi_host *host,
+				struct mipi_dsi_device *client)
+{
+	struct dsi_data *dsi = host_to_omap(host);
+	int r;
+
+	if (dsi->dsidev) {
+		DSSERR("dsi client already attached\n");
+		return -EBUSY;
+	}
+
+	if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) {
+		DSSERR("invalid pixel format\n");
+		return -EINVAL;
+	}
+
+	atomic_set(&dsi->do_ext_te_update, 0);
+
+	if (client->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		dsi->mode = OMAP_DSS_DSI_VIDEO_MODE;
+	} else {
+		r = omap_dsi_register_te_irq(dsi, client);
+		if (r)
+			return r;
+
+		dsi->mode = OMAP_DSS_DSI_CMD_MODE;
+	}
+
+	dsi->dsidev = client;
+	dsi->pix_fmt = client->format;
+
+	dsi->config.hs_clk_min = 150000000; // TODO: get from client?
+	dsi->config.hs_clk_max = client->hs_rate;
+	dsi->config.lp_clk_min = 7000000; // TODO: get from client?
+	dsi->config.lp_clk_max = client->lp_rate;
+
+	if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+		dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE;
+	else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+		dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE;
+	else
+		dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE;
+
+	return 0;
+}
+
+static int omap_dsi_host_detach(struct mipi_dsi_host *host,
+				struct mipi_dsi_device *client)
+{
+	struct dsi_data *dsi = host_to_omap(host);
+
+	if (WARN_ON(dsi->dsidev != client))
+		return -EINVAL;
+
+	cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+	dsi_bus_lock(dsi);
+
+	if (dsi->iface_enabled)
+		dsi_disable(dsi);
+
+	dsi_bus_unlock(dsi);
+
+	omap_dsi_unregister_te_irq(dsi);
+	dsi->dsidev = NULL;
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops omap_dsi_host_ops = {
+	.attach = omap_dsi_host_attach,
+	.detach = omap_dsi_host_detach,
+	.transfer = omap_dsi_host_transfer,
 };
 
 /* -----------------------------------------------------------------------------
@@ -5096,6 +4604,106 @@ static const struct component_ops dsi_component_ops = {
 	.unbind	= dsi_unbind,
 };
 
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dsi_bridge_attach(struct drm_bridge *bridge,
+			     enum drm_bridge_attach_flags flags)
+{
+	struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+				 bridge, flags);
+}
+
+static enum drm_mode_status
+dsi_bridge_mode_valid(struct drm_bridge *bridge,
+		      const struct drm_display_info *info,
+		      const struct drm_display_mode *mode)
+{
+	struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+	struct dsi_clk_calc_ctx ctx;
+	int r;
+
+	mutex_lock(&dsi->lock);
+	r = __dsi_calc_config(dsi, mode, &ctx);
+	mutex_unlock(&dsi->lock);
+
+	return r ? MODE_CLOCK_RANGE : MODE_OK;
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
+{
+	struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+	dsi_set_config(&dsi->output, adjusted_mode);
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+	struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+	struct omap_dss_device *dssdev = &dsi->output;
+
+	cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+	dsi_bus_lock(dsi);
+
+	if (!dsi->iface_enabled)
+		dsi_enable(dsi);
+
+	dsi_enable_video_output(dssdev, VC_VIDEO);
+
+	dsi->video_enabled = true;
+
+	dsi_bus_unlock(dsi);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+	struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+	struct omap_dss_device *dssdev = &dsi->output;
+
+	cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+	dsi_bus_lock(dsi);
+
+	dsi->video_enabled = false;
+
+	dsi_disable_video_output(dssdev, VC_VIDEO);
+
+	dsi_disable(dsi);
+
+	dsi_bus_unlock(dsi);
+}
+
+static const struct drm_bridge_funcs dsi_bridge_funcs = {
+	.attach = dsi_bridge_attach,
+	.mode_valid = dsi_bridge_mode_valid,
+	.mode_set = dsi_bridge_mode_set,
+	.enable = dsi_bridge_enable,
+	.disable = dsi_bridge_disable,
+};
+
+static void dsi_bridge_init(struct dsi_data *dsi)
+{
+	dsi->bridge.funcs = &dsi_bridge_funcs;
+	dsi->bridge.of_node = dsi->host.dev->of_node;
+	dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+	drm_bridge_add(&dsi->bridge);
+}
+
+static void dsi_bridge_cleanup(struct dsi_data *dsi)
+{
+	drm_bridge_remove(&dsi->bridge);
+}
+
 /* -----------------------------------------------------------------------------
  * Probe & Remove, Suspend & Resume
  */
@@ -5105,23 +4713,26 @@ static int dsi_init_output(struct dsi_data *dsi)
 	struct omap_dss_device *out = &dsi->output;
 	int r;
 
+	dsi_bridge_init(dsi);
+
 	out->dev = dsi->dev;
 	out->id = dsi->module_id == 0 ?
 			OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
 
 	out->type = OMAP_DISPLAY_TYPE_DSI;
 	out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
-	out->dispc_channel = dsi_get_channel(dsi);
-	out->ops = &dsi_ops;
-	out->owner = THIS_MODULE;
+	out->dispc_channel = dsi_get_dispc_channel(dsi);
+	out->dsi_ops = &dsi_ops;
 	out->of_port = 0;
 	out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
 		       | DRM_BUS_FLAG_DE_HIGH
 		       | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
 
-	r = omapdss_device_init_output(out, NULL);
-	if (r < 0)
+	r = omapdss_device_init_output(out, &dsi->bridge);
+	if (r < 0) {
+		dsi_bridge_cleanup(dsi);
 		return r;
+	}
 
 	omapdss_device_register(out);
 
@@ -5134,6 +4745,7 @@ static void dsi_uninit_output(struct dsi_data *dsi)
 
 	omapdss_device_unregister(out);
 	omapdss_device_cleanup_output(out);
+	dsi_bridge_cleanup(dsi);
 }
 
 static int dsi_probe_of(struct dsi_data *dsi)
@@ -5142,9 +4754,8 @@ static int dsi_probe_of(struct dsi_data *dsi)
 	struct property *prop;
 	u32 lane_arr[10];
 	int len, num_pins;
-	int r, i;
+	int r;
 	struct device_node *ep;
-	struct omap_dsi_pin_config pin_cfg;
 
 	ep = of_graph_get_endpoint_by_regs(node, 0, 0);
 	if (!ep)
@@ -5172,11 +4783,7 @@ static int dsi_probe_of(struct dsi_data *dsi)
 		goto err;
 	}
 
-	pin_cfg.num_pins = num_pins;
-	for (i = 0; i < num_pins; ++i)
-		pin_cfg.pins[i] = (int)lane_arr[i];
-
-	r = dsi_configure_pins(&dsi->output, &pin_cfg);
+	r = dsi_configure_pins(dsi, num_pins, lane_arr);
 	if (r) {
 		dev_err(dsi->dev, "failed to configure pins");
 		goto err;
@@ -5256,6 +4863,18 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
 	{ /* sentinel */ }
 };
 
+static void omap_dsi_disable_work_callback(struct work_struct *work)
+{
+	struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work);
+
+	dsi_bus_lock(dsi);
+
+	if (dsi->iface_enabled && !dsi->video_enabled)
+		dsi_disable(dsi);
+
+	dsi_bus_unlock(dsi);
+}
+
 static int dsi_probe(struct platform_device *pdev)
 {
 	const struct soc_device_attribute *soc;
@@ -5289,6 +4908,8 @@ static int dsi_probe(struct platform_device *pdev)
 	INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
 			     dsi_framedone_timeout_work_callback);
 
+	INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback);
+
 #ifdef DSI_CATCH_MISSING_TE
 	timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
 #endif
@@ -5364,11 +4985,8 @@ static int dsi_probe(struct platform_device *pdev)
 	}
 
 	/* DSI VCs initialization */
-	for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
+	for (i = 0; i < ARRAY_SIZE(dsi->vc); i++)
 		dsi->vc[i].source = DSI_VC_SOURCE_L4;
-		dsi->vc[i].dssdev = NULL;
-		dsi->vc[i].vc_id = 0;
-	}
 
 	r = dsi_get_clocks(dsi);
 	if (r)
@@ -5387,21 +5005,24 @@ static int dsi_probe(struct platform_device *pdev)
 		dsi->num_lanes_supported = 3;
 	}
 
-	r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+	dsi->host.ops = &omap_dsi_host_ops;
+	dsi->host.dev = &pdev->dev;
+
+	r = dsi_probe_of(dsi);
 	if (r) {
-		DSSERR("Failed to populate DSI child devices: %d\n", r);
+		DSSERR("Invalid DSI DT data\n");
+		goto err_pm_disable;
+	}
+
+	r = mipi_dsi_host_register(&dsi->host);
+	if (r < 0) {
+		dev_err(&pdev->dev, "failed to register DSI host: %d\n", r);
 		goto err_pm_disable;
 	}
 
 	r = dsi_init_output(dsi);
 	if (r)
-		goto err_of_depopulate;
-
-	r = dsi_probe_of(dsi);
-	if (r) {
-		DSSERR("Invalid DSI DT data\n");
-		goto err_uninit_output;
-	}
+		goto err_dsi_host_unregister;
 
 	r = component_add(&pdev->dev, &dsi_component_ops);
 	if (r)
@@ -5411,8 +5032,8 @@ static int dsi_probe(struct platform_device *pdev)
 
 err_uninit_output:
 	dsi_uninit_output(dsi);
-err_of_depopulate:
-	of_platform_depopulate(dev);
+err_dsi_host_unregister:
+	mipi_dsi_host_unregister(&dsi->host);
 err_pm_disable:
 	pm_runtime_disable(dev);
 	return r;
@@ -5426,7 +5047,7 @@ static int dsi_remove(struct platform_device *pdev)
 
 	dsi_uninit_output(dsi);
 
-	of_platform_depopulate(&pdev->dev);
+	mipi_dsi_host_unregister(&dsi->host);
 
 	pm_runtime_disable(&pdev->dev);
 
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.h b/drivers/gpu/drm/omapdrm/dss/dsi.h
new file mode 100644
index 0000000000000000000000000000000000000000..601707c0ecc4e886a8e5cecb20c6ac74d4180927
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __OMAP_DRM_DSS_DSI_H
+#define __OMAP_DRM_DSS_DSI_H
+
+#include <drm/drm_mipi_dsi.h>
+
+struct dsi_reg {
+	u16 module;
+	u16 idx;
+};
+
+#define DSI_REG(mod, idx)		((const struct dsi_reg) { mod, idx })
+
+/* DSI Protocol Engine */
+
+#define DSI_PROTO			0
+#define DSI_PROTO_SZ			0x200
+
+#define DSI_REVISION			DSI_REG(DSI_PROTO, 0x0000)
+#define DSI_SYSCONFIG			DSI_REG(DSI_PROTO, 0x0010)
+#define DSI_SYSSTATUS			DSI_REG(DSI_PROTO, 0x0014)
+#define DSI_IRQSTATUS			DSI_REG(DSI_PROTO, 0x0018)
+#define DSI_IRQENABLE			DSI_REG(DSI_PROTO, 0x001C)
+#define DSI_CTRL			DSI_REG(DSI_PROTO, 0x0040)
+#define DSI_GNQ				DSI_REG(DSI_PROTO, 0x0044)
+#define DSI_COMPLEXIO_CFG1		DSI_REG(DSI_PROTO, 0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS	DSI_REG(DSI_PROTO, 0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE	DSI_REG(DSI_PROTO, 0x0050)
+#define DSI_CLK_CTRL			DSI_REG(DSI_PROTO, 0x0054)
+#define DSI_TIMING1			DSI_REG(DSI_PROTO, 0x0058)
+#define DSI_TIMING2			DSI_REG(DSI_PROTO, 0x005C)
+#define DSI_VM_TIMING1			DSI_REG(DSI_PROTO, 0x0060)
+#define DSI_VM_TIMING2			DSI_REG(DSI_PROTO, 0x0064)
+#define DSI_VM_TIMING3			DSI_REG(DSI_PROTO, 0x0068)
+#define DSI_CLK_TIMING			DSI_REG(DSI_PROTO, 0x006C)
+#define DSI_TX_FIFO_VC_SIZE		DSI_REG(DSI_PROTO, 0x0070)
+#define DSI_RX_FIFO_VC_SIZE		DSI_REG(DSI_PROTO, 0x0074)
+#define DSI_COMPLEXIO_CFG2		DSI_REG(DSI_PROTO, 0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS		DSI_REG(DSI_PROTO, 0x007C)
+#define DSI_VM_TIMING4			DSI_REG(DSI_PROTO, 0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS	DSI_REG(DSI_PROTO, 0x0084)
+#define DSI_VM_TIMING5			DSI_REG(DSI_PROTO, 0x0088)
+#define DSI_VM_TIMING6			DSI_REG(DSI_PROTO, 0x008C)
+#define DSI_VM_TIMING7			DSI_REG(DSI_PROTO, 0x0090)
+#define DSI_STOPCLK_TIMING		DSI_REG(DSI_PROTO, 0x0094)
+#define DSI_VC_CTRL(n)			DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
+#define DSI_VC_TE(n)			DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n)	DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n)	DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n)	DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n)		DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n)		DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
+
+/* DSIPHY_SCP */
+
+#define DSI_PHY				1
+#define DSI_PHY_OFFSET			0x200
+#define DSI_PHY_SZ			0x40
+
+#define DSI_DSIPHY_CFG0			DSI_REG(DSI_PHY, 0x0000)
+#define DSI_DSIPHY_CFG1			DSI_REG(DSI_PHY, 0x0004)
+#define DSI_DSIPHY_CFG2			DSI_REG(DSI_PHY, 0x0008)
+#define DSI_DSIPHY_CFG5			DSI_REG(DSI_PHY, 0x0014)
+#define DSI_DSIPHY_CFG10		DSI_REG(DSI_PHY, 0x0028)
+
+/* DSI_PLL_CTRL_SCP */
+
+#define DSI_PLL				2
+#define DSI_PLL_OFFSET			0x300
+#define DSI_PLL_SZ			0x20
+
+#define DSI_PLL_CONTROL			DSI_REG(DSI_PLL, 0x0000)
+#define DSI_PLL_STATUS			DSI_REG(DSI_PLL, 0x0004)
+#define DSI_PLL_GO			DSI_REG(DSI_PLL, 0x0008)
+#define DSI_PLL_CONFIGURATION1		DSI_REG(DSI_PLL, 0x000C)
+#define DSI_PLL_CONFIGURATION2		DSI_REG(DSI_PLL, 0x0010)
+
+/* Global interrupts */
+#define DSI_IRQ_VC0		(1 << 0)
+#define DSI_IRQ_VC1		(1 << 1)
+#define DSI_IRQ_VC2		(1 << 2)
+#define DSI_IRQ_VC3		(1 << 3)
+#define DSI_IRQ_WAKEUP		(1 << 4)
+#define DSI_IRQ_RESYNC		(1 << 5)
+#define DSI_IRQ_PLL_LOCK	(1 << 7)
+#define DSI_IRQ_PLL_UNLOCK	(1 << 8)
+#define DSI_IRQ_PLL_RECALL	(1 << 9)
+#define DSI_IRQ_COMPLEXIO_ERR	(1 << 10)
+#define DSI_IRQ_HS_TX_TIMEOUT	(1 << 14)
+#define DSI_IRQ_LP_RX_TIMEOUT	(1 << 15)
+#define DSI_IRQ_TE_TRIGGER	(1 << 16)
+#define DSI_IRQ_ACK_TRIGGER	(1 << 17)
+#define DSI_IRQ_SYNC_LOST	(1 << 18)
+#define DSI_IRQ_LDO_POWER_GOOD	(1 << 19)
+#define DSI_IRQ_TA_TIMEOUT	(1 << 20)
+#define DSI_IRQ_ERROR_MASK \
+	(DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
+	DSI_IRQ_TA_TIMEOUT)
+#define DSI_IRQ_CHANNEL_MASK	0xf
+
+/* Virtual channel interrupts */
+#define DSI_VC_IRQ_CS		(1 << 0)
+#define DSI_VC_IRQ_ECC_CORR	(1 << 1)
+#define DSI_VC_IRQ_PACKET_SENT	(1 << 2)
+#define DSI_VC_IRQ_FIFO_TX_OVF	(1 << 3)
+#define DSI_VC_IRQ_FIFO_RX_OVF	(1 << 4)
+#define DSI_VC_IRQ_BTA		(1 << 5)
+#define DSI_VC_IRQ_ECC_NO_CORR	(1 << 6)
+#define DSI_VC_IRQ_FIFO_TX_UDF	(1 << 7)
+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
+#define DSI_VC_IRQ_ERROR_MASK \
+	(DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
+	DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
+	DSI_VC_IRQ_FIFO_TX_UDF)
+
+/* ComplexIO interrupts */
+#define DSI_CIO_IRQ_ERRSYNCESC1		(1 << 0)
+#define DSI_CIO_IRQ_ERRSYNCESC2		(1 << 1)
+#define DSI_CIO_IRQ_ERRSYNCESC3		(1 << 2)
+#define DSI_CIO_IRQ_ERRSYNCESC4		(1 << 3)
+#define DSI_CIO_IRQ_ERRSYNCESC5		(1 << 4)
+#define DSI_CIO_IRQ_ERRESC1		(1 << 5)
+#define DSI_CIO_IRQ_ERRESC2		(1 << 6)
+#define DSI_CIO_IRQ_ERRESC3		(1 << 7)
+#define DSI_CIO_IRQ_ERRESC4		(1 << 8)
+#define DSI_CIO_IRQ_ERRESC5		(1 << 9)
+#define DSI_CIO_IRQ_ERRCONTROL1		(1 << 10)
+#define DSI_CIO_IRQ_ERRCONTROL2		(1 << 11)
+#define DSI_CIO_IRQ_ERRCONTROL3		(1 << 12)
+#define DSI_CIO_IRQ_ERRCONTROL4		(1 << 13)
+#define DSI_CIO_IRQ_ERRCONTROL5		(1 << 14)
+#define DSI_CIO_IRQ_STATEULPS1		(1 << 15)
+#define DSI_CIO_IRQ_STATEULPS2		(1 << 16)
+#define DSI_CIO_IRQ_STATEULPS3		(1 << 17)
+#define DSI_CIO_IRQ_STATEULPS4		(1 << 18)
+#define DSI_CIO_IRQ_STATEULPS5		(1 << 19)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1	(1 << 20)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1	(1 << 21)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2	(1 << 22)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2	(1 << 23)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3	(1 << 24)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3	(1 << 25)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4	(1 << 26)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4	(1 << 27)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5	(1 << 28)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5	(1 << 29)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0	(1 << 30)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1	(1 << 31)
+#define DSI_CIO_IRQ_ERROR_MASK \
+	(DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
+	 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
+	 DSI_CIO_IRQ_ERRSYNCESC5 | \
+	 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
+	 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
+	 DSI_CIO_IRQ_ERRESC5 | \
+	 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
+	 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
+	 DSI_CIO_IRQ_ERRCONTROL5 | \
+	 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
+	 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
+	 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
+	 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
+	 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
+
+enum omap_dss_dsi_mode {
+	OMAP_DSS_DSI_CMD_MODE = 0,
+	OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_dss_dsi_trans_mode {
+	/* Sync Pulses: both sync start and end packets sent */
+	OMAP_DSS_DSI_PULSE_MODE,
+	/* Sync Events: only sync start packets sent */
+	OMAP_DSS_DSI_EVENT_MODE,
+	/* Burst: only sync start packets sent, pixels are time compressed */
+	OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+	unsigned long hsclk;
+
+	unsigned int ndl;
+	unsigned int bitspp;
+
+	/* pixels */
+	u16 hact;
+	/* lines */
+	u16 vact;
+
+	/* DSI video mode blanking data */
+	/* Unit: byte clock cycles */
+	u16 hss;
+	u16 hsa;
+	u16 hse;
+	u16 hfp;
+	u16 hbp;
+	/* Unit: line clocks */
+	u16 vsa;
+	u16 vfp;
+	u16 vbp;
+
+	/* DSI blanking modes */
+	int blanking_mode;
+	int hsa_blanking_mode;
+	int hbp_blanking_mode;
+	int hfp_blanking_mode;
+
+	enum omap_dss_dsi_trans_mode trans_mode;
+
+	int window_sync;
+};
+
+struct omap_dss_dsi_config {
+	enum omap_dss_dsi_mode mode;
+	enum mipi_dsi_pixel_format pixel_format;
+	const struct videomode *vm;
+
+	unsigned long hs_clk_min, hs_clk_max;
+	unsigned long lp_clk_min, lp_clk_max;
+
+	enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+/* DSI PLL HSDIV indices */
+#define HSDIV_DISPC	0
+#define HSDIV_DSI	1
+
+#define DSI_MAX_NR_ISRS                2
+#define DSI_MAX_NR_LANES	5
+
+enum dsi_model {
+	DSI_MODEL_OMAP3,
+	DSI_MODEL_OMAP4,
+	DSI_MODEL_OMAP5,
+};
+
+enum dsi_lane_function {
+	DSI_LANE_UNUSED	= 0,
+	DSI_LANE_CLK,
+	DSI_LANE_DATA1,
+	DSI_LANE_DATA2,
+	DSI_LANE_DATA3,
+	DSI_LANE_DATA4,
+};
+
+struct dsi_lane_config {
+	enum dsi_lane_function function;
+	u8 polarity;
+};
+
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+struct dsi_isr_data {
+	omap_dsi_isr_t	isr;
+	void		*arg;
+	u32		mask;
+};
+
+enum fifo_size {
+	DSI_FIFO_SIZE_0		= 0,
+	DSI_FIFO_SIZE_32	= 1,
+	DSI_FIFO_SIZE_64	= 2,
+	DSI_FIFO_SIZE_96	= 3,
+	DSI_FIFO_SIZE_128	= 4,
+};
+
+enum dsi_vc_source {
+	DSI_VC_SOURCE_L4 = 0,
+	DSI_VC_SOURCE_VP,
+};
+
+struct dsi_irq_stats {
+	unsigned long last_reset;
+	unsigned int irq_count;
+	unsigned int dsi_irqs[32];
+	unsigned int vc_irqs[4][32];
+	unsigned int cio_irqs[32];
+};
+
+struct dsi_isr_tables {
+	struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+	struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+	struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
+struct dsi_lp_clock_info {
+	unsigned long lp_clk;
+	u16 lp_clk_div;
+};
+
+struct dsi_clk_calc_ctx {
+	struct dsi_data *dsi;
+	struct dss_pll *pll;
+
+	/* inputs */
+
+	const struct omap_dss_dsi_config *config;
+
+	unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+	/* outputs */
+
+	struct dss_pll_clock_info dsi_cinfo;
+	struct dispc_clock_info dispc_cinfo;
+	struct dsi_lp_clock_info lp_cinfo;
+
+	struct videomode vm;
+	struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
+struct dsi_module_id_data {
+	u32 address;
+	int id;
+};
+
+enum dsi_quirks {
+	DSI_QUIRK_PLL_PWR_BUG = (1 << 0),	/* DSI-PLL power command 0x3 is not working */
+	DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
+	DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
+	DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
+	DSI_QUIRK_GNQ = (1 << 4),
+	DSI_QUIRK_PHY_DCC = (1 << 5),
+};
+
+struct dsi_of_data {
+	enum dsi_model model;
+	const struct dss_pll_hw *pll_hw;
+	const struct dsi_module_id_data *modules;
+	unsigned int max_fck_freq;
+	unsigned int max_pll_lpdiv;
+	enum dsi_quirks quirks;
+};
+
+struct dsi_data {
+	struct device *dev;
+	void __iomem *proto_base;
+	void __iomem *phy_base;
+	void __iomem *pll_base;
+
+	const struct dsi_of_data *data;
+	int module_id;
+
+	int irq;
+
+	bool is_enabled;
+
+	struct clk *dss_clk;
+	struct regmap *syscon;
+	struct dss_device *dss;
+
+	struct mipi_dsi_host host;
+
+	struct dispc_clock_info user_dispc_cinfo;
+	struct dss_pll_clock_info user_dsi_cinfo;
+
+	struct dsi_lp_clock_info user_lp_cinfo;
+	struct dsi_lp_clock_info current_lp_cinfo;
+
+	struct dss_pll pll;
+
+	bool vdds_dsi_enabled;
+	struct regulator *vdds_dsi_reg;
+
+	struct mipi_dsi_device *dsidev;
+
+	struct {
+		enum dsi_vc_source source;
+		enum fifo_size tx_fifo_size;
+		enum fifo_size rx_fifo_size;
+	} vc[4];
+
+	struct mutex lock;
+	struct semaphore bus_lock;
+
+	spinlock_t irq_lock;
+	struct dsi_isr_tables isr_tables;
+	/* space for a copy used by the interrupt handler */
+	struct dsi_isr_tables isr_tables_copy;
+
+	int update_vc;
+#ifdef DSI_PERF_MEASURE
+	unsigned int update_bytes;
+#endif
+
+	/* external TE GPIO */
+	struct gpio_desc *te_gpio;
+	int te_irq;
+	struct delayed_work te_timeout_work;
+	atomic_t do_ext_te_update;
+
+	bool te_enabled;
+	bool iface_enabled;
+	bool video_enabled;
+
+	struct delayed_work framedone_timeout_work;
+
+#ifdef DSI_CATCH_MISSING_TE
+	struct timer_list te_timer;
+#endif
+
+	unsigned long cache_req_pck;
+	unsigned long cache_clk_freq;
+	struct dss_pll_clock_info cache_cinfo;
+
+	u32		errors;
+	spinlock_t	errors_lock;
+#ifdef DSI_PERF_MEASURE
+	ktime_t perf_setup_time;
+	ktime_t perf_start_time;
+#endif
+	int debug_read;
+	int debug_write;
+	struct {
+		struct dss_debugfs_entry *irqs;
+		struct dss_debugfs_entry *regs;
+		struct dss_debugfs_entry *clks;
+	} debugfs;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+	spinlock_t irq_stats_lock;
+	struct dsi_irq_stats irq_stats;
+#endif
+
+	unsigned int num_lanes_supported;
+	unsigned int line_buffer_size;
+
+	struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
+	unsigned int num_lanes_used;
+
+	unsigned int scp_clk_refcount;
+
+	struct omap_dss_dsi_config config;
+
+	struct dss_lcd_mgr_config mgr_config;
+	struct videomode vm;
+	enum mipi_dsi_pixel_format pix_fmt;
+	enum omap_dss_dsi_mode mode;
+	struct omap_dss_dsi_videomode_timings vm_timings;
+
+	struct omap_dss_device output;
+	struct drm_bridge bridge;
+
+	struct delayed_work dsi_disable_work;
+};
+
+struct dsi_packet_sent_handler_data {
+	struct dsi_data *dsi;
+	struct completion *completion;
+};
+
+#endif /* __OMAP_DRM_DSS_DSI_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d7b2f5bcac1698393f92ab18208e063732b9fc3c..d6a5862b4dbf5a3c85d28f0c7d172ba8b96a0b07 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1308,6 +1308,7 @@ static int dss_bind(struct device *dev)
 {
 	struct dss_device *dss = dev_get_drvdata(dev);
 	struct platform_device *drm_pdev;
+	struct dss_pdata pdata;
 	int r;
 
 	r = component_bind_all(dev, NULL);
@@ -1316,9 +1317,9 @@ static int dss_bind(struct device *dev)
 
 	pm_set_vt_switch(0);
 
-	omapdss_set_dss(dss);
-
-	drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0);
+	pdata.dss = dss;
+	drm_pdev = platform_device_register_data(NULL, "omapdrm", 0,
+						 &pdata, sizeof(pdata));
 	if (IS_ERR(drm_pdev)) {
 		component_unbind_all(dev, NULL);
 		return PTR_ERR(drm_pdev);
@@ -1335,8 +1336,6 @@ static void dss_unbind(struct device *dev)
 
 	platform_device_unregister(dss->drm_pdev);
 
-	omapdss_set_dss(NULL);
-
 	component_unbind_all(dev, NULL);
 }
 
@@ -1569,15 +1568,7 @@ static int dss_remove(struct platform_device *pdev)
 
 static void dss_shutdown(struct platform_device *pdev)
 {
-	struct omap_dss_device *dssdev = NULL;
-
 	DSSDBG("shutdown\n");
-
-	for_each_dss_output(dssdev) {
-		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
-		    dssdev->ops && dssdev->ops->disable)
-			dssdev->ops->disable(dssdev);
-	}
 }
 
 static int dss_runtime_suspend(struct device *dev)
@@ -1650,21 +1641,14 @@ static struct platform_driver * const omap_dss_drivers[] = {
 #endif
 };
 
-static int __init omap_dss_init(void)
+int __init omap_dss_init(void)
 {
 	return platform_register_drivers(omap_dss_drivers,
 					 ARRAY_SIZE(omap_dss_drivers));
 }
 
-static void __exit omap_dss_exit(void)
+void omap_dss_exit(void)
 {
 	platform_unregister_drivers(omap_dss_drivers,
 				    ARRAY_SIZE(omap_dss_drivers));
 }
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 2b404bcb41dd8b67f305a4bd4b3b81e1adca19f6..a547527bb2f3bf7ac59a691c0b9c1d95085dc7fc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -257,8 +257,6 @@ struct dss_device {
 	struct dss_pll	*video2_pll;
 
 	struct dispc_device *dispc;
-	const struct dispc_ops *dispc_ops;
-	const struct dss_mgr_ops *mgr_ops;
 	struct omap_drm_private *mgr_ops_priv;
 };
 
@@ -393,6 +391,76 @@ void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
 int dispc_runtime_get(struct dispc_device *dispc);
 void dispc_runtime_put(struct dispc_device *dispc);
 
+int dispc_get_num_ovls(struct dispc_device *dispc);
+int dispc_get_num_mgrs(struct dispc_device *dispc);
+
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+					    enum omap_plane_id plane);
+
+u32 dispc_read_irqstatus(struct dispc_device *dispc);
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
+
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+			     void *dev_id);
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id);
+
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+				   enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+				       enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+				       enum omap_channel channel);
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc);
+
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
+
+void dispc_mgr_enable(struct dispc_device *dispc,
+			     enum omap_channel channel, bool enable);
+
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
+			      enum omap_channel channel);
+
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel);
+
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+				     enum omap_channel channel,
+				     const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(struct dispc_device *dispc,
+				  enum omap_channel channel,
+				  const struct videomode *vm);
+void dispc_mgr_setup(struct dispc_device *dispc,
+			    enum omap_channel channel,
+			    const struct omap_overlay_manager_info *info);
+
+int dispc_mgr_check_timings(struct dispc_device *dispc,
+				   enum omap_channel channel,
+				   const struct videomode *vm);
+
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+				enum omap_channel channel);
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
+				enum omap_channel channel,
+				const struct drm_color_lut *lut,
+				unsigned int length);
+
+int dispc_ovl_setup(struct dispc_device *dispc,
+			   enum omap_plane_id plane,
+			   const struct omap_overlay_info *oi,
+			   const struct videomode *vm, bool mem_to_mem,
+			   enum omap_channel channel);
+
+int dispc_ovl_enable(struct dispc_device *dispc,
+			    enum omap_plane_id plane, bool enable);
+
+bool dispc_has_writeback(struct dispc_device *dispc);
+int dispc_wb_setup(struct dispc_device *dispc,
+		   const struct omap_dss_writeback_info *wi,
+		   bool mem_to_mem, const struct videomode *vm,
+		   enum dss_writeback_channel channel_in);
+bool dispc_wb_go_busy(struct dispc_device *dispc);
+void dispc_wb_go(struct dispc_device *dispc);
+
 void dispc_enable_sidle(struct dispc_device *dispc);
 void dispc_disable_sidle(struct dispc_device *dispc);
 
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 8de41e74e8f8e3ce92729eed01d50c8deb366e5f..35b750cebaebfa840d4d26f00ca8550dcc7d8214 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -707,7 +707,6 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
 	out->type = OMAP_DISPLAY_TYPE_HDMI;
 	out->name = "hdmi.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->owner = THIS_MODULE;
 	out->of_port = 0;
 
 	r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 54e5cb5aa52de9263ef7420fc523f8906aec3b49..65085d886da5cd204a6bd500ddab68d055169b25 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -681,7 +681,6 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
 	out->type = OMAP_DISPLAY_TYPE_HDMI;
 	out->name = "hdmi.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->owner = THIS_MODULE;
 	out->of_port = 0;
 
 	r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
deleted file mode 100644
index f21b5df3121321dc6ab0e99676745314b85211cc..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-/*
- * As omapdss panel drivers are omapdss specific, but we want to define the
- * DT-data in generic manner, we convert the compatible strings of the panel and
- * encoder nodes from "panel-foo" to "omapdss,panel-foo". This way we can have
- * both correct DT data and omapdss specific drivers.
- *
- * When we get generic panel drivers to the kernel, this file will be removed.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-static struct list_head dss_conv_list __initdata;
-
-static const char prefix[] __initconst = "omapdss,";
-
-struct dss_conv_node {
-	struct list_head list;
-	struct device_node *node;
-	bool root;
-};
-
-static int __init omapdss_count_strings(const struct property *prop)
-{
-	const char *p = prop->value;
-	int l = 0, total = 0;
-	int i;
-
-	for (i = 0; total < prop->length; total += l, p += l, i++)
-		l = strlen(p) + 1;
-
-	return i;
-}
-
-static void __init omapdss_update_prop(struct device_node *node, char *compat,
-	int len)
-{
-	struct property *prop;
-
-	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
-	if (!prop)
-		return;
-
-	prop->name = "compatible";
-	prop->value = compat;
-	prop->length = len;
-
-	of_update_property(node, prop);
-}
-
-static void __init omapdss_prefix_strcpy(char *dst, int dst_len,
-	const char *src, int src_len)
-{
-	size_t total = 0;
-
-	while (total < src_len) {
-		size_t l = strlen(src) + 1;
-
-		strcpy(dst, prefix);
-		dst += strlen(prefix);
-
-		strcpy(dst, src);
-		dst += l;
-
-		src += l;
-		total += l;
-	}
-}
-
-/* prepend compatible property strings with "omapdss," */
-static void __init omapdss_omapify_node(struct device_node *node)
-{
-	struct property *prop;
-	char *new_compat;
-	int num_strs;
-	int new_len;
-
-	prop = of_find_property(node, "compatible", NULL);
-
-	if (!prop || !prop->value)
-		return;
-
-	if (strnlen(prop->value, prop->length) >= prop->length)
-		return;
-
-	/* is it already prefixed? */
-	if (strncmp(prefix, prop->value, strlen(prefix)) == 0)
-		return;
-
-	num_strs = omapdss_count_strings(prop);
-
-	new_len = prop->length + strlen(prefix) * num_strs;
-	new_compat = kmalloc(new_len, GFP_KERNEL);
-
-	omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length);
-
-	omapdss_update_prop(node, new_compat, new_len);
-}
-
-static void __init omapdss_add_to_list(struct device_node *node, bool root)
-{
-	struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL);
-	if (n) {
-		n->node = node;
-		n->root = root;
-		list_add(&n->list, &dss_conv_list);
-	}
-}
-
-static bool __init omapdss_list_contains(const struct device_node *node)
-{
-	struct dss_conv_node *n;
-
-	list_for_each_entry(n, &dss_conv_list, list) {
-		if (n->node == node)
-			return true;
-	}
-
-	return false;
-}
-
-static void __init omapdss_walk_device(struct device_node *node, bool root)
-{
-	struct device_node *n;
-
-	omapdss_add_to_list(node, root);
-
-	/*
-	 * of_graph_get_remote_port_parent() prints an error if there is no
-	 * port/ports node. To avoid that, check first that there's the node.
-	 */
-	n = of_get_child_by_name(node, "ports");
-	if (!n)
-		n = of_get_child_by_name(node, "port");
-	if (!n)
-		return;
-
-	of_node_put(n);
-
-	n = NULL;
-	while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
-		struct device_node *pn;
-
-		pn = of_graph_get_remote_port_parent(n);
-
-		if (!pn)
-			continue;
-
-		if (!of_device_is_available(pn) || omapdss_list_contains(pn)) {
-			of_node_put(pn);
-			continue;
-		}
-
-		omapdss_walk_device(pn, false);
-	}
-}
-
-static const struct of_device_id omapdss_of_match[] __initconst = {
-	{ .compatible = "ti,omap2-dss", },
-	{ .compatible = "ti,omap3-dss", },
-	{ .compatible = "ti,omap4-dss", },
-	{ .compatible = "ti,omap5-dss", },
-	{ .compatible = "ti,dra7-dss", },
-	{},
-};
-
-static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
-	{ .compatible = "panel-dsi-cm" },
-	{},
-};
-
-static void __init omapdss_find_children(struct device_node *np)
-{
-	struct device_node *child;
-
-	for_each_available_child_of_node(np, child) {
-		if (!of_find_property(child, "compatible", NULL))
-			continue;
-
-		omapdss_walk_device(child, true);
-
-		if (of_device_is_compatible(child, "ti,sysc"))
-			omapdss_find_children(child);
-	}
-}
-
-static int __init omapdss_boot_init(void)
-{
-	struct device_node *dss;
-
-	INIT_LIST_HEAD(&dss_conv_list);
-
-	dss = of_find_matching_node(NULL, omapdss_of_match);
-
-	if (dss == NULL || !of_device_is_available(dss))
-		goto put_node;
-
-	omapdss_walk_device(dss, true);
-	omapdss_find_children(dss);
-
-	while (!list_empty(&dss_conv_list)) {
-		struct dss_conv_node *n;
-
-		n = list_first_entry(&dss_conv_list, struct dss_conv_node,
-			list);
-
-		if (of_match_node(omapdss_of_fixups_whitelist, n->node))
-			omapdss_omapify_node(n->node);
-
-		list_del(&n->list);
-		of_node_put(n->node);
-		kfree(n);
-	}
-
-put_node:
-	of_node_put(dss);
-	return 0;
-}
-
-subsys_initcall(omapdss_boot_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index a48a9a254e335f7dadaa92ffddb701e4ff4cb964..a40abeafd2e96f21153af6c5cc30939edb0f3f48 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -7,13 +7,14 @@
 #ifndef __OMAP_DRM_DSS_H
 #define __OMAP_DRM_DSS_H
 
-#include <linux/list.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mode.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
-#include <video/videomode.h>
+#include <linux/list.h>
 #include <linux/platform_data/omapdss.h>
-#include <uapi/drm/drm_mode.h>
-#include <drm/drm_crtc.h>
+#include <video/videomode.h>
 
 #define DISPC_IRQ_FRAMEDONE		(1 << 0)
 #define DISPC_IRQ_VSYNC			(1 << 1)
@@ -116,28 +117,6 @@ enum omap_dss_venc_type {
 	OMAP_DSS_VENC_TYPE_SVIDEO,
 };
 
-enum omap_dss_dsi_pixel_format {
-	OMAP_DSS_DSI_FMT_RGB888,
-	OMAP_DSS_DSI_FMT_RGB666,
-	OMAP_DSS_DSI_FMT_RGB666_PACKED,
-	OMAP_DSS_DSI_FMT_RGB565,
-};
-
-enum omap_dss_dsi_mode {
-	OMAP_DSS_DSI_CMD_MODE = 0,
-	OMAP_DSS_DSI_VIDEO_MODE,
-};
-
-enum omap_display_caps {
-	OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE	= 1 << 0,
-	OMAP_DSS_DISPLAY_CAP_TEAR_ELIM		= 1 << 1,
-};
-
-enum omap_dss_display_state {
-	OMAP_DSS_DISPLAY_DISABLED = 0,
-	OMAP_DSS_DISPLAY_ACTIVE,
-};
-
 enum omap_dss_rotation_type {
 	OMAP_DSS_ROT_NONE	= 0,
 	OMAP_DSS_ROT_TILER	= 1 << 0,
@@ -162,64 +141,6 @@ enum omap_dss_output_id {
 	OMAP_DSS_OUTPUT_HDMI	= 1 << 6,
 };
 
-/* DSI */
-
-enum omap_dss_dsi_trans_mode {
-	/* Sync Pulses: both sync start and end packets sent */
-	OMAP_DSS_DSI_PULSE_MODE,
-	/* Sync Events: only sync start packets sent */
-	OMAP_DSS_DSI_EVENT_MODE,
-	/* Burst: only sync start packets sent, pixels are time compressed */
-	OMAP_DSS_DSI_BURST_MODE,
-};
-
-struct omap_dss_dsi_videomode_timings {
-	unsigned long hsclk;
-
-	unsigned int ndl;
-	unsigned int bitspp;
-
-	/* pixels */
-	u16 hact;
-	/* lines */
-	u16 vact;
-
-	/* DSI video mode blanking data */
-	/* Unit: byte clock cycles */
-	u16 hss;
-	u16 hsa;
-	u16 hse;
-	u16 hfp;
-	u16 hbp;
-	/* Unit: line clocks */
-	u16 vsa;
-	u16 vfp;
-	u16 vbp;
-
-	/* DSI blanking modes */
-	int blanking_mode;
-	int hsa_blanking_mode;
-	int hbp_blanking_mode;
-	int hfp_blanking_mode;
-
-	enum omap_dss_dsi_trans_mode trans_mode;
-
-	bool ddr_clk_always_on;
-	int window_sync;
-};
-
-struct omap_dss_dsi_config {
-	enum omap_dss_dsi_mode mode;
-	enum omap_dss_dsi_pixel_format pixel_format;
-	const struct videomode *vm;
-
-	unsigned long hs_clk_min, hs_clk_max;
-	unsigned long lp_clk_min, lp_clk_max;
-
-	bool ddr_clk_always_on;
-	enum omap_dss_dsi_trans_mode trans_mode;
-};
-
 struct omap_dss_cpr_coefs {
 	s16 rr, rg, rb;
 	s16 gr, gg, gb;
@@ -243,6 +164,9 @@ struct omap_overlay_info {
 	u8 global_alpha;
 	u8 pre_mult_alpha;
 	u8 zorder;
+
+	enum drm_color_encoding color_encoding;
+	enum drm_color_range color_range;
 };
 
 struct omap_overlay_manager_info {
@@ -258,21 +182,6 @@ struct omap_overlay_manager_info {
 	struct omap_dss_cpr_coefs cpr_coefs;
 };
 
-/* 22 pins means 1 clk lane and 10 data lanes */
-#define OMAP_DSS_MAX_DSI_PINS 22
-
-struct omap_dsi_pin_config {
-	int num_pins;
-	/*
-	 * pin numbers in the following order:
-	 * clk+, clk-
-	 * data1+, data1-
-	 * data2+, data2-
-	 * ...
-	 */
-	int pins[OMAP_DSS_MAX_DSI_PINS];
-};
-
 struct omap_dss_writeback_info {
 	u32 paddr;
 	u32 p_uv_addr;
@@ -286,89 +195,14 @@ struct omap_dss_writeback_info {
 };
 
 struct omapdss_dsi_ops {
-	void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
-			bool enter_ulps);
-
-	/* bus configuration */
-	int (*set_config)(struct omap_dss_device *dssdev,
-			const struct omap_dss_dsi_config *cfg);
-	int (*configure_pins)(struct omap_dss_device *dssdev,
-			const struct omap_dsi_pin_config *pin_cfg);
-
-	void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
-			bool enable);
-	int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-
-	int (*update)(struct omap_dss_device *dssdev, int channel,
-			void (*callback)(int, void *), void *data);
-
-	void (*bus_lock)(struct omap_dss_device *dssdev);
-	void (*bus_unlock)(struct omap_dss_device *dssdev);
-
-	int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
-	void (*disable_video_output)(struct omap_dss_device *dssdev,
-			int channel);
-
-	int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
-	int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
-			int vc_id);
-	void (*release_vc)(struct omap_dss_device *dssdev, int channel);
-
-	/* data transfer */
-	int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
-			u8 *data, int len);
-	int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
-			u8 *data, int len);
-	int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
-			u8 *data, int len);
-
-	int (*gen_write)(struct omap_dss_device *dssdev, int channel,
-			u8 *data, int len);
-	int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
-			u8 *data, int len);
-	int (*gen_read)(struct omap_dss_device *dssdev, int channel,
-			u8 *reqdata, int reqlen,
-			u8 *data, int len);
-
-	int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
-
-	int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
-			int channel, u16 plen);
-};
-
-struct omap_dss_device_ops {
-	int (*connect)(struct omap_dss_device *dssdev,
-			struct omap_dss_device *dst);
-	void (*disconnect)(struct omap_dss_device *dssdev,
-			struct omap_dss_device *dst);
-
-	void (*enable)(struct omap_dss_device *dssdev);
-	void (*disable)(struct omap_dss_device *dssdev);
-
-	int (*check_timings)(struct omap_dss_device *dssdev,
-			     struct drm_display_mode *mode);
-
-	int (*get_modes)(struct omap_dss_device *dssdev,
-			 struct drm_connector *connector);
-
-	const struct omapdss_dsi_ops dsi;
-};
-
-/**
- * enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
- */
-enum omap_dss_device_ops_flag {
-	OMAP_DSS_DEVICE_OP_MODES = BIT(3),
+	int (*update)(struct omap_dss_device *dssdev);
+	bool (*is_video_mode)(struct omap_dss_device *dssdev);
 };
 
 struct omap_dss_device {
 	struct device *dev;
 
-	struct module *owner;
-
 	struct dss_device *dss;
-	struct omap_dss_device *next;
 	struct drm_bridge *bridge;
 	struct drm_bridge *next_bridge;
 	struct drm_panel *panel;
@@ -382,23 +216,11 @@ struct omap_dss_device {
 	 */
 	enum omap_display_type type;
 
-	/*
-	 * True if the device is a display (panel or connector) at the end of
-	 * the pipeline, false otherwise.
-	 */
-	bool display;
-
 	const char *name;
 
-	const struct omap_dss_driver *driver;
-	const struct omap_dss_device_ops *ops;
-	unsigned long ops_flags;
+	const struct omapdss_dsi_ops *dsi_ops;
 	u32 bus_flags;
 
-	enum omap_display_caps caps;
-
-	enum omap_dss_display_state state;
-
 	/* OMAP DSS output specific fields */
 
 	/* DISPC channel for this output */
@@ -411,30 +233,10 @@ struct omap_dss_device {
 	unsigned int of_port;
 };
 
-struct omap_dss_driver {
-	int (*update)(struct omap_dss_device *dssdev,
-			       u16 x, u16 y, u16 w, u16 h);
-	int (*sync)(struct omap_dss_device *dssdev);
-
-	int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-	int (*get_te)(struct omap_dss_device *dssdev);
-
-	int (*memory_read)(struct omap_dss_device *dssdev,
-			void *buf, size_t size,
-			u16 x, u16 y, u16 w, u16 h);
+struct dss_pdata {
+	struct dss_device *dss;
 };
 
-struct dss_device *omapdss_get_dss(void);
-void omapdss_set_dss(struct dss_device *dss);
-static inline bool omapdss_is_initialized(void)
-{
-	return !!omapdss_get_dss();
-}
-
-void omapdss_display_init(struct omap_dss_device *dssdev);
-int omapdss_display_get_modes(struct drm_connector *connector,
-			      const struct videomode *vm);
-
 void omapdss_device_register(struct omap_dss_device *dssdev);
 void omapdss_device_unregister(struct omap_dss_device *dssdev);
 struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
@@ -445,8 +247,6 @@ int omapdss_device_connect(struct dss_device *dss,
 			   struct omap_dss_device *dst);
 void omapdss_device_disconnect(struct omap_dss_device *src,
 			       struct omap_dss_device *dst);
-void omapdss_device_enable(struct omap_dss_device *dssdev);
-void omapdss_device_disable(struct omap_dss_device *dssdev);
 
 int omap_dss_get_num_overlay_managers(void);
 
@@ -466,11 +266,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
 int omapdss_compat_init(void);
 void omapdss_compat_uninit(void);
 
-static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
-{
-	return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
-}
-
 enum dss_writeback_channel {
 	DSS_WB_LCD1_MGR =	0,
 	DSS_WB_LCD2_MGR =	1,
@@ -482,31 +277,23 @@ enum dss_writeback_channel {
 	DSS_WB_LCD3_MGR =	7,
 };
 
-struct dss_mgr_ops {
-	void (*start_update)(struct omap_drm_private *priv,
-			     enum omap_channel channel);
-	int (*enable)(struct omap_drm_private *priv,
-		      enum omap_channel channel);
-	void (*disable)(struct omap_drm_private *priv,
-			enum omap_channel channel);
-	void (*set_timings)(struct omap_drm_private *priv,
-			    enum omap_channel channel,
-			    const struct videomode *vm);
-	void (*set_lcd_config)(struct omap_drm_private *priv,
-			       enum omap_channel channel,
-			       const struct dss_lcd_mgr_config *config);
-	int (*register_framedone_handler)(struct omap_drm_private *priv,
-			enum omap_channel channel,
-			void (*handler)(void *), void *data);
-	void (*unregister_framedone_handler)(struct omap_drm_private *priv,
-			enum omap_channel channel,
-			void (*handler)(void *), void *data);
-};
-
-int dss_install_mgr_ops(struct dss_device *dss,
-			const struct dss_mgr_ops *mgr_ops,
-			struct omap_drm_private *priv);
-void dss_uninstall_mgr_ops(struct dss_device *dss);
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+				       enum omap_channel channel);
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable);
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+		enum omap_channel channel,
+		const struct videomode *vm);
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+		enum omap_channel channel,
+		const struct dss_lcd_mgr_config *config);
+int omap_crtc_dss_register_framedone(
+		struct omap_drm_private *priv, enum omap_channel channel,
+		void (*handler)(void *), void *data);
+void omap_crtc_dss_unregister_framedone(
+		struct omap_drm_private *priv, enum omap_channel channel,
+		void (*handler)(void *), void *data);
 
 void dss_mgr_set_timings(struct omap_dss_device *dssdev,
 		const struct videomode *vm);
@@ -520,80 +307,12 @@ int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
 void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
 		void (*handler)(void *), void *data);
 
-/* dispc ops */
-
-struct dispc_ops {
-	u32 (*read_irqstatus)(struct dispc_device *dispc);
-	void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask);
-	void (*write_irqenable)(struct dispc_device *dispc, u32 mask);
-
-	int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler,
-			   void *dev_id);
-	void (*free_irq)(struct dispc_device *dispc, void *dev_id);
-
-	int (*runtime_get)(struct dispc_device *dispc);
-	void (*runtime_put)(struct dispc_device *dispc);
-
-	int (*get_num_ovls)(struct dispc_device *dispc);
-	int (*get_num_mgrs)(struct dispc_device *dispc);
-
-	u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc);
-
-	void (*mgr_enable)(struct dispc_device *dispc,
-			   enum omap_channel channel, bool enable);
-	bool (*mgr_is_enabled)(struct dispc_device *dispc,
-			       enum omap_channel channel);
-	u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc,
-				 enum omap_channel channel);
-	u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc,
-				     enum omap_channel channel);
-	u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc,
-				     enum omap_channel channel);
-	bool (*mgr_go_busy)(struct dispc_device *dispc,
-			    enum omap_channel channel);
-	void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel);
-	void (*mgr_set_lcd_config)(struct dispc_device *dispc,
-				   enum omap_channel channel,
-				   const struct dss_lcd_mgr_config *config);
-	int (*mgr_check_timings)(struct dispc_device *dispc,
-				 enum omap_channel channel,
-				 const struct videomode *vm);
-	void (*mgr_set_timings)(struct dispc_device *dispc,
-				enum omap_channel channel,
-				const struct videomode *vm);
-	void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
-			  const struct omap_overlay_manager_info *info);
-	u32 (*mgr_gamma_size)(struct dispc_device *dispc,
-			      enum omap_channel channel);
-	void (*mgr_set_gamma)(struct dispc_device *dispc,
-			      enum omap_channel channel,
-			      const struct drm_color_lut *lut,
-			      unsigned int length);
-
-	int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane,
-			  bool enable);
-	int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane,
-			 const struct omap_overlay_info *oi,
-			 const struct videomode *vm, bool mem_to_mem,
-			 enum omap_channel channel);
-
-	const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
-					  enum omap_plane_id plane);
-
-	u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
-	int (*wb_setup)(struct dispc_device *dispc,
-		const struct omap_dss_writeback_info *wi,
-		bool mem_to_mem, const struct videomode *vm,
-		enum dss_writeback_channel channel_in);
-	bool (*has_writeback)(struct dispc_device *dispc);
-	bool (*wb_go_busy)(struct dispc_device *dispc);
-	void (*wb_go)(struct dispc_device *dispc);
-};
-
 struct dispc_device *dispc_get_dispc(struct dss_device *dss);
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
 
 bool omapdss_stack_is_ready(void);
 void omapdss_gather_components(struct device *dev);
 
+int omap_dss_init(void);
+void omap_dss_exit(void);
+
 #endif /* __OMAP_DRM_DSS_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 5affdf078134b97bd7d0bdf94cc8930841fae246..7378e855c278c3809bc431ff48a1c5a41b7dedfc 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -30,7 +30,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
 		return 0;
 	}
 
-	out->next = omapdss_find_device_by_node(remote_node);
 	out->bridge = of_drm_find_bridge(remote_node);
 	out->panel = of_drm_find_panel(remote_node);
 	if (IS_ERR(out->panel))
@@ -38,12 +37,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
 
 	of_node_put(remote_node);
 
-	if (out->next && out->type != out->next->type) {
-		dev_err(out->dev, "output type and display type don't match\n");
-		ret = -EINVAL;
-		goto error;
-	}
-
 	if (out->panel) {
 		struct drm_bridge *bridge;
 
@@ -69,7 +62,7 @@ int omapdss_device_init_output(struct omap_dss_device *out,
 		out->bridge = local_bridge;
 	}
 
-	if (!out->next && !out->bridge) {
+	if (!out->bridge) {
 		ret = -EPROBE_DEFER;
 		goto error;
 	}
@@ -78,98 +71,64 @@ int omapdss_device_init_output(struct omap_dss_device *out,
 
 error:
 	omapdss_device_cleanup_output(out);
-	out->next = NULL;
 	return ret;
 }
-EXPORT_SYMBOL(omapdss_device_init_output);
 
 void omapdss_device_cleanup_output(struct omap_dss_device *out)
 {
 	if (out->bridge && out->panel)
 		drm_panel_bridge_remove(out->next_bridge ?
 					out->next_bridge : out->bridge);
-
-	if (out->next)
-		omapdss_device_put(out->next);
-}
-EXPORT_SYMBOL(omapdss_device_cleanup_output);
-
-int dss_install_mgr_ops(struct dss_device *dss,
-			const struct dss_mgr_ops *mgr_ops,
-			struct omap_drm_private *priv)
-{
-	if (dss->mgr_ops)
-		return -EBUSY;
-
-	dss->mgr_ops = mgr_ops;
-	dss->mgr_ops_priv = priv;
-
-	return 0;
-}
-EXPORT_SYMBOL(dss_install_mgr_ops);
-
-void dss_uninstall_mgr_ops(struct dss_device *dss)
-{
-	dss->mgr_ops = NULL;
-	dss->mgr_ops_priv = NULL;
 }
-EXPORT_SYMBOL(dss_uninstall_mgr_ops);
 
 void dss_mgr_set_timings(struct omap_dss_device *dssdev,
 			 const struct videomode *vm)
 {
-	dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv,
+	omap_crtc_dss_set_timings(dssdev->dss->mgr_ops_priv,
 					  dssdev->dispc_channel, vm);
 }
-EXPORT_SYMBOL(dss_mgr_set_timings);
 
 void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
 		const struct dss_lcd_mgr_config *config)
 {
-	dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv,
+	omap_crtc_dss_set_lcd_config(dssdev->dss->mgr_ops_priv,
 					     dssdev->dispc_channel, config);
 }
-EXPORT_SYMBOL(dss_mgr_set_lcd_config);
 
 int dss_mgr_enable(struct omap_dss_device *dssdev)
 {
-	return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv,
+	return omap_crtc_dss_enable(dssdev->dss->mgr_ops_priv,
 					    dssdev->dispc_channel);
 }
-EXPORT_SYMBOL(dss_mgr_enable);
 
 void dss_mgr_disable(struct omap_dss_device *dssdev)
 {
-	dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv,
+	omap_crtc_dss_disable(dssdev->dss->mgr_ops_priv,
 				      dssdev->dispc_channel);
 }
-EXPORT_SYMBOL(dss_mgr_disable);
 
 void dss_mgr_start_update(struct omap_dss_device *dssdev)
 {
-	dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv,
+	omap_crtc_dss_start_update(dssdev->dss->mgr_ops_priv,
 					   dssdev->dispc_channel);
 }
-EXPORT_SYMBOL(dss_mgr_start_update);
 
 int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
 		void (*handler)(void *), void *data)
 {
 	struct dss_device *dss = dssdev->dss;
 
-	return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv,
+	return omap_crtc_dss_register_framedone(dss->mgr_ops_priv,
 							dssdev->dispc_channel,
 							handler, data);
 }
-EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
 
 void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
 		void (*handler)(void *), void *data)
 {
 	struct dss_device *dss = dssdev->dss;
 
-	dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv,
+	omap_crtc_dss_unregister_framedone(dss->mgr_ops_priv,
 						   dssdev->dispc_channel,
 						   handler, data);
 }
-EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 241a338ace29bad7fc0c98be93383d2bed7fd368..4c8246a3ded9635d0accb385bbe95b521f4efcd2 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -222,6 +222,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
 	n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
 	n_inc = 1;
 
+	if (n_start > n_stop)
+		return false;
+
 	if (hw->errata_i886) {
 		swap(n_start, n_stop);
 		n_inc = -1;
@@ -239,6 +242,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
 				hw->m_max);
 		m_inc = 1;
 
+		if (m_start > m_stop)
+			continue;
+
 		if (hw->errata_i886) {
 			swap(m_start, m_stop);
 			m_inc = -1;
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 282e4c837cd930e5b29bd85ec74020f19aff7ed7..91eaae3b948121cb52eb037bd3cb1d17357df979 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -312,7 +312,6 @@ static int sdi_init_output(struct sdi_device *sdi)
 	out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
 	/* We have SDI only on OMAP3, where it's on port 1 */
 	out->of_port = 1;
-	out->owner = THIS_MODULE;
 	out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE	/* 15.5.9.1.2 */
 		       | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
 
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 94cf50d837b0719daae010a7230ce3865e43c950..e522c17955d04f677db8c4c3fc25f759bab69871 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -733,9 +733,7 @@ static int venc_init_output(struct venc_device *venc)
 	out->type = OMAP_DISPLAY_TYPE_VENC;
 	out->name = "venc.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->owner = THIS_MODULE;
 	out->of_port = 0;
-	out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
 
 	r = omapdss_device_init_output(out, &venc->bridge);
 	if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
deleted file mode 100644
index 47719b92e22bfa304ea9e3295fc1a42b25223f4a..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Rob Clark <rob@ti.com>
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-
-#include "omap_drv.h"
-
-/*
- * connector funcs
- */
-
-#define to_omap_connector(x) container_of(x, struct omap_connector, base)
-
-struct omap_connector {
-	struct drm_connector base;
-	struct omap_dss_device *output;
-};
-
-static enum drm_connector_status omap_connector_detect(
-		struct drm_connector *connector, bool force)
-{
-	return connector_status_connected;
-}
-
-static void omap_connector_destroy(struct drm_connector *connector)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-
-	DBG("%s", connector->name);
-
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-
-	omapdss_device_put(omap_connector->output);
-
-	kfree(omap_connector);
-}
-
-static int omap_connector_get_modes(struct drm_connector *connector)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct omap_dss_device *dssdev = NULL;
-	struct omap_dss_device *d;
-
-	DBG("%s", connector->name);
-
-	/*
-	 * If the display pipeline reports modes (e.g. with a fixed resolution
-	 * panel or an analog TV output), query it.
-	 */
-	for (d = omap_connector->output; d; d = d->next) {
-		if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
-			dssdev = d;
-	}
-
-	if (dssdev)
-		return dssdev->ops->get_modes(dssdev, connector);
-
-	/* We can't retrieve modes. The KMS core will add the default modes. */
-	return 0;
-}
-
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
-					const struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode)
-{
-	int ret;
-
-	drm_mode_copy(adjusted_mode, mode);
-
-	for (; dssdev; dssdev = dssdev->next) {
-		if (!dssdev->ops || !dssdev->ops->check_timings)
-			continue;
-
-		ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
-		if (ret)
-			return MODE_BAD;
-	}
-
-	return MODE_OK;
-}
-
-static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct drm_display_mode new_mode = {};
-	enum drm_mode_status status;
-
-	status = omap_connector_mode_fixup(omap_connector->output, mode,
-					   &new_mode);
-	if (status != MODE_OK)
-		goto done;
-
-	/* Check if vrefresh is still valid. */
-	if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
-		status = MODE_NOCLOCK;
-
-done:
-	DBG("connector: mode %s: " DRM_MODE_FMT,
-			(status == MODE_OK) ? "valid" : "invalid",
-			DRM_MODE_ARG(mode));
-
-	return status;
-}
-
-static const struct drm_connector_funcs omap_connector_funcs = {
-	.reset = drm_atomic_helper_connector_reset,
-	.detect = omap_connector_detect,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = omap_connector_destroy,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
-	.get_modes = omap_connector_get_modes,
-	.mode_valid = omap_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *omap_connector_init(struct drm_device *dev,
-					  struct omap_dss_device *output,
-					  struct drm_encoder *encoder)
-{
-	struct drm_connector *connector = NULL;
-	struct omap_connector *omap_connector;
-
-	DBG("%s", output->name);
-
-	omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
-	if (!omap_connector)
-		goto fail;
-
-	omap_connector->output = omapdss_device_get(output);
-
-	connector = &omap_connector->base;
-	connector->interlace_allowed = 1;
-	connector->doublescan_allowed = 0;
-
-	drm_connector_init(dev, connector, &omap_connector_funcs,
-			   DRM_MODE_CONNECTOR_DSI);
-	drm_connector_helper_add(connector, &omap_connector_helper_funcs);
-
-	return connector;
-
-fail:
-	if (connector)
-		omap_connector_destroy(connector);
-
-	return NULL;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
deleted file mode 100644
index 0ecd4f1655b71a5485c5350216079ab2b8f0e752..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * omap_connector.h -- OMAP DRM Connector
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
- */
-
-#ifndef __OMAPDRM_CONNECTOR_H__
-#define __OMAPDRM_CONNECTOR_H__
-
-#include <linux/types.h>
-
-enum drm_mode_status;
-
-struct drm_connector;
-struct drm_device;
-struct drm_encoder;
-struct omap_dss_device;
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
-					  struct omap_dss_device *output,
-					  struct drm_encoder *encoder);
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
-					const struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode);
-
-#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 7d66269ad998a13df787c64b3109bdcbd9664e28..06a719c104f4e492a7b58f7b71bf7e09e0183479 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -100,14 +100,14 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
  * the upstream part of the video pipe.
  */
 
-static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
 				       enum omap_channel channel)
 {
-	priv->dispc_ops->mgr_enable(priv->dispc, channel, true);
+	dispc_mgr_enable(priv->dispc, channel, true);
 }
 
 /* Called only from the encoder enable/disable and suspend/resume handlers. */
-static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
 {
 	struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
 	struct drm_device *dev = crtc->dev;
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
 	}
 
 	if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
-		priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+		dispc_mgr_enable(priv->dispc, channel, enable);
 		omap_crtc->enabled = enable;
 		return;
 	}
@@ -141,9 +141,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
 		omap_crtc->ignore_digit_sync_lost = true;
 	}
 
-	framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc,
+	framedone_irq = dispc_mgr_get_framedone_irq(priv->dispc,
 							       channel);
-	vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel);
+	vsync_irq = dispc_mgr_get_vsync_irq(priv->dispc, channel);
 
 	if (enable) {
 		wait = omap_irq_wait_init(dev, vsync_irq, 1);
@@ -163,7 +163,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
 			wait = omap_irq_wait_init(dev, vsync_irq, 2);
 	}
 
-	priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+	dispc_mgr_enable(priv->dispc, channel, enable);
 	omap_crtc->enabled = enable;
 
 	ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
@@ -180,21 +180,19 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
 }
 
 
-static int omap_crtc_dss_enable(struct omap_drm_private *priv,
-				enum omap_channel channel)
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel)
 {
 	struct drm_crtc *crtc = priv->channels[channel]->crtc;
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
-	priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
+	dispc_mgr_set_timings(priv->dispc, omap_crtc->channel,
 					 &omap_crtc->vm);
 	omap_crtc_set_enabled(&omap_crtc->base, true);
 
 	return 0;
 }
 
-static void omap_crtc_dss_disable(struct omap_drm_private *priv,
-				  enum omap_channel channel)
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel)
 {
 	struct drm_crtc *crtc = priv->channels[channel]->crtc;
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -202,7 +200,7 @@ static void omap_crtc_dss_disable(struct omap_drm_private *priv,
 	omap_crtc_set_enabled(&omap_crtc->base, false);
 }
 
-static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
 		enum omap_channel channel,
 		const struct videomode *vm)
 {
@@ -213,7 +211,7 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
 	omap_crtc->vm = *vm;
 }
 
-static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
 		enum omap_channel channel,
 		const struct dss_lcd_mgr_config *config)
 {
@@ -221,11 +219,11 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
 	DBG("%s", omap_crtc->name);
-	priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
+	dispc_mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
 					    config);
 }
 
-static int omap_crtc_dss_register_framedone(
+int omap_crtc_dss_register_framedone(
 		struct omap_drm_private *priv, enum omap_channel channel,
 		void (*handler)(void *), void *data)
 {
@@ -244,7 +242,7 @@ static int omap_crtc_dss_register_framedone(
 	return 0;
 }
 
-static void omap_crtc_dss_unregister_framedone(
+void omap_crtc_dss_unregister_framedone(
 		struct omap_drm_private *priv, enum omap_channel channel,
 		void (*handler)(void *), void *data)
 {
@@ -261,16 +259,6 @@ static void omap_crtc_dss_unregister_framedone(
 	omap_crtc->framedone_handler_data = NULL;
 }
 
-static const struct dss_mgr_ops mgr_ops = {
-	.start_update = omap_crtc_dss_start_update,
-	.enable = omap_crtc_dss_enable,
-	.disable = omap_crtc_dss_disable,
-	.set_timings = omap_crtc_dss_set_timings,
-	.set_lcd_config = omap_crtc_dss_set_lcd_config,
-	.register_framedone_handler = omap_crtc_dss_register_framedone,
-	.unregister_framedone_handler = omap_crtc_dss_unregister_framedone,
-};
-
 /* -----------------------------------------------------------------------------
  * Setup, Flush and Page Flip
  */
@@ -300,7 +288,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
 	 * If the dispc is busy we're racing the flush operation. Try again on
 	 * the next vblank interrupt.
 	 */
-	if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) {
+	if (dispc_mgr_go_busy(priv->dispc, omap_crtc->channel)) {
 		spin_unlock(&crtc->dev->event_lock);
 		return;
 	}
@@ -362,27 +350,14 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
 {
 	struct omap_crtc *omap_crtc =
 			container_of(data, struct omap_crtc, update_work.work);
-	struct drm_display_mode *mode = &omap_crtc->pipe->crtc->mode;
-	struct omap_dss_device *dssdev = omap_crtc->pipe->output->next;
+	struct omap_dss_device *dssdev = omap_crtc->pipe->output;
 	struct drm_device *dev = omap_crtc->base.dev;
-	const struct omap_dss_driver *dssdrv;
 	int ret;
 
-	if (!dssdev) {
-		dev_err_once(dev->dev, "missing display dssdev!");
+	if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->update)
 		return;
-	}
-
-	dssdrv = dssdev->driver;
-	if (!dssdrv || !dssdrv->update) {
-		dev_err_once(dev->dev, "missing or incorrect dssdrv!");
-		return;
-	}
-
-	if (dssdrv->sync)
-		dssdrv->sync(dssdev);
 
-	ret = dssdrv->update(dssdev, 0, 0, mode->hdisplay, mode->vdisplay);
+	ret = dssdev->dsi_ops->update(dssdev);
 	if (ret < 0) {
 		spin_lock_irq(&dev->event_lock);
 		omap_crtc->pending = false;
@@ -391,6 +366,33 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
 	}
 }
 
+static s16 omap_crtc_s31_32_to_s2_8(s64 coef)
+{
+	u64 sign_bit = 1ULL << 63;
+	u64 cbits = (u64)coef;
+
+	s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1ff);
+
+	if (cbits & sign_bit)
+		ret = -ret;
+
+	return ret;
+}
+
+static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm,
+					 struct omap_dss_cpr_coefs *cpr)
+{
+	cpr->rr = omap_crtc_s31_32_to_s2_8(ctm->matrix[0]);
+	cpr->rg = omap_crtc_s31_32_to_s2_8(ctm->matrix[1]);
+	cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]);
+	cpr->gr = omap_crtc_s31_32_to_s2_8(ctm->matrix[3]);
+	cpr->gg = omap_crtc_s31_32_to_s2_8(ctm->matrix[4]);
+	cpr->gb = omap_crtc_s31_32_to_s2_8(ctm->matrix[5]);
+	cpr->br = omap_crtc_s31_32_to_s2_8(ctm->matrix[6]);
+	cpr->bg = omap_crtc_s31_32_to_s2_8(ctm->matrix[7]);
+	cpr->bb = omap_crtc_s31_32_to_s2_8(ctm->matrix[8]);
+}
+
 static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
 {
 	struct omap_drm_private *priv = crtc->dev->dev_private;
@@ -402,9 +404,17 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
 	info.default_color = 0x000000;
 	info.trans_enabled = false;
 	info.partial_alpha_enabled = false;
-	info.cpr_enable = false;
 
-	priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
+	if (crtc->state->ctm) {
+		struct drm_color_ctm *ctm = crtc->state->ctm->data;
+
+		info.cpr_enable = true;
+		omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs);
+	} else {
+		info.cpr_enable = false;
+	}
+
+	dispc_mgr_setup(priv->dispc, omap_crtc->channel, &info);
 }
 
 /* -----------------------------------------------------------------------------
@@ -445,7 +455,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
 
 	DBG("%s", omap_crtc->name);
 
-	priv->dispc_ops->runtime_get(priv->dispc);
+	dispc_runtime_get(priv->dispc);
 
 	/* manual updated display will not trigger vsync irq */
 	if (omap_state->manually_updated)
@@ -484,7 +494,7 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
 
 	drm_crtc_vblank_off(crtc);
 
-	priv->dispc_ops->runtime_put(priv->dispc);
+	dispc_runtime_put(priv->dispc);
 }
 
 static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
@@ -502,9 +512,8 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
 	 * valid DISPC mode. DSI will calculate and configure the
 	 * proper DISPC mode later.
 	 */
-	if (omap_crtc->pipe->output->next == NULL ||
-	    omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) {
-		r = priv->dispc_ops->mgr_check_timings(priv->dispc,
+	if (omap_crtc->pipe->output->type != OMAP_DISPLAY_TYPE_DSI) {
+		r = dispc_mgr_check_timings(priv->dispc,
 						       omap_crtc->channel,
 						       &vm);
 		if (r)
@@ -555,17 +564,16 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
 static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-	struct omap_dss_device *display = omap_crtc->pipe->output->next;
+	struct omap_dss_device *dssdev = omap_crtc->pipe->output;
 
-	if (!display)
+	if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->is_video_mode)
 		return false;
 
-	if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
-		DBG("detected manually updated display!");
-		return true;
-	}
+	if (dssdev->dsi_ops->is_video_mode(dssdev))
+		return false;
 
-	return false;
+	DBG("detected manually updated display!");
+	return true;
 }
 
 static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -575,8 +583,8 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
 									  crtc);
 	struct drm_plane_state *pri_state;
 
-	if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
-		unsigned int length = crtc_state->gamma_lut->length /
+	if (crtc_state->color_mgmt_changed && crtc_state->degamma_lut) {
+		unsigned int length = crtc_state->degamma_lut->length /
 			sizeof(struct drm_color_lut);
 
 		if (length < 2)
@@ -617,13 +625,13 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
 		struct drm_color_lut *lut = NULL;
 		unsigned int length = 0;
 
-		if (crtc->state->gamma_lut) {
+		if (crtc->state->degamma_lut) {
 			lut = (struct drm_color_lut *)
-				crtc->state->gamma_lut->data;
-			length = crtc->state->gamma_lut->length /
+				crtc->state->degamma_lut->data;
+			length = crtc->state->degamma_lut->length /
 				sizeof(*lut);
 		}
-		priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel,
+		dispc_mgr_set_gamma(priv->dispc, omap_crtc->channel,
 					       lut, length);
 	}
 
@@ -648,7 +656,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
 	WARN_ON(ret != 0);
 
 	spin_lock_irq(&crtc->dev->event_lock);
-	priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel);
+	dispc_mgr_go(priv->dispc, omap_crtc->channel);
 	omap_crtc_arm_event(crtc);
 	spin_unlock_irq(&crtc->dev->event_lock);
 }
@@ -741,7 +749,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = omap_crtc_destroy,
 	.page_flip = drm_atomic_helper_page_flip,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.atomic_duplicate_state = omap_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 	.atomic_set_property = omap_crtc_atomic_set_property,
@@ -771,16 +778,6 @@ static const char *channel_names[] = {
 	[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
 };
 
-void omap_crtc_pre_init(struct omap_drm_private *priv)
-{
-	dss_install_mgr_ops(priv->dss, &mgr_ops, priv);
-}
-
-void omap_crtc_pre_uninit(struct omap_drm_private *priv)
-{
-	dss_uninstall_mgr_ops(priv->dss);
-}
-
 /* initialize crtc */
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 				struct omap_drm_pipeline *pipe,
@@ -839,10 +836,10 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 	 * extracted with dispc_mgr_gamma_size(). If it returns 0
 	 * gamma table is not supported.
 	 */
-	if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
+	if (dispc_mgr_gamma_size(priv->dispc, channel)) {
 		unsigned int gamma_lut_size = 256;
 
-		drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+		drm_crtc_enable_color_mgmt(crtc, gamma_lut_size, true, 0);
 		drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
 	}
 
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index 2fd57751ae2b16f52812f232c540ce9a9cd82466..a8b9cbee86e07c6eab1b6b8889b73a8013e448e7 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -22,8 +22,6 @@ struct videomode;
 
 struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
 enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(struct omap_drm_private *priv);
-void omap_crtc_pre_uninit(struct omap_drm_private *priv);
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 				struct omap_drm_pipeline *pipe,
 				struct drm_plane *plane);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 42c2ed7520950dd61ed3dac24eefebd600fbf68d..28bbad1353eeac8fc3d85724d345b8b0f84d2cab 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
 	struct drm_device *dev = old_state->dev;
 	struct omap_drm_private *priv = dev->dev_private;
 
-	priv->dispc_ops->runtime_get(priv->dispc);
+	dispc_runtime_get(priv->dispc);
 
 	/* Apply the atomic update. */
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -113,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
 
 	drm_atomic_helper_cleanup_planes(dev, old_state);
 
-	priv->dispc_ops->runtime_put(priv->dispc);
+	dispc_runtime_put(priv->dispc);
 }
 
 static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
@@ -192,7 +192,7 @@ static int omap_compare_pipelines(const void *a, const void *b)
 static int omap_modeset_init_properties(struct drm_device *dev)
 {
 	struct omap_drm_private *priv = dev->dev_private;
-	unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+	unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
 
 	priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
 						      num_planes - 1);
@@ -206,14 +206,7 @@ static int omap_display_id(struct omap_dss_device *output)
 {
 	struct device_node *node = NULL;
 
-	if (output->next) {
-		struct omap_dss_device *display = output;
-
-		while (display->next)
-			display = display->next;
-
-		node = display->dev->of_node;
-	} else if (output->bridge) {
+	if (output->bridge) {
 		struct drm_bridge *bridge = output->bridge;
 
 		while (drm_bridge_get_next_bridge(bridge))
@@ -228,8 +221,8 @@ static int omap_display_id(struct omap_dss_device *output)
 static int omap_modeset_init(struct drm_device *dev)
 {
 	struct omap_drm_private *priv = dev->dev_private;
-	int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
-	int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+	int num_ovls = dispc_get_num_ovls(priv->dispc);
+	int num_mgrs = dispc_get_num_mgrs(priv->dispc);
 	unsigned int i;
 	int ret;
 	u32 plane_crtc_mask;
@@ -332,19 +325,12 @@ static int omap_modeset_init(struct drm_device *dev)
 		struct drm_encoder *encoder = pipe->encoder;
 		struct drm_crtc *crtc;
 
-		if (pipe->output->next) {
-			pipe->connector = omap_connector_init(dev, pipe->output,
-							      encoder);
-			if (!pipe->connector)
-				return -ENOMEM;
-		} else {
-			pipe->connector = drm_bridge_connector_init(dev, encoder);
-			if (IS_ERR(pipe->connector)) {
-				dev_err(priv->dev,
-					"unable to create bridge connector for %s\n",
-					pipe->output->name);
-				return PTR_ERR(pipe->connector);
-			}
+		pipe->connector = drm_bridge_connector_init(dev, encoder);
+		if (IS_ERR(pipe->connector)) {
+			dev_err(priv->dev,
+				"unable to create bridge connector for %s\n",
+				pipe->output->name);
+			return PTR_ERR(pipe->connector);
 		}
 
 		drm_connector_attach_encoder(pipe->connector, encoder);
@@ -568,6 +554,7 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = {
 static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
 {
 	const struct soc_device_attribute *soc;
+	struct dss_pdata *pdata = dev->platform_data;
 	struct drm_device *ddev;
 	int ret;
 
@@ -582,11 +569,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
 	ddev->dev_private = priv;
 
 	priv->dev = dev;
-	priv->dss = omapdss_get_dss();
+	priv->dss = pdata->dss;
 	priv->dispc = dispc_get_dispc(priv->dss);
-	priv->dispc_ops = dispc_get_ops(priv->dss);
 
-	omap_crtc_pre_init(priv);
+	priv->dss->mgr_ops_priv = priv;
 
 	soc = soc_device_match(omapdrm_soc_devices);
 	priv->omaprev = soc ? (unsigned int)soc->data : 0;
@@ -596,9 +582,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
 	INIT_LIST_HEAD(&priv->obj_list);
 
 	/* Get memory bandwidth limits */
-	if (priv->dispc_ops->get_memory_bandwidth_limit)
-		priv->max_bandwidth =
-			priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc);
+	priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
 
 	omap_gem_init(ddev);
 
@@ -641,7 +625,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
 	omap_gem_deinit(ddev);
 	destroy_workqueue(priv->wq);
 	omap_disconnect_pipelines(ddev);
-	omap_crtc_pre_uninit(priv);
 	drm_dev_put(ddev);
 	return ret;
 }
@@ -667,7 +650,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
 	destroy_workqueue(priv->wq);
 
 	omap_disconnect_pipelines(ddev);
-	omap_crtc_pre_uninit(priv);
 
 	drm_dev_put(ddev);
 }
@@ -677,9 +659,6 @@ static int pdev_probe(struct platform_device *pdev)
 	struct omap_drm_private *priv;
 	int ret;
 
-	if (omapdss_is_initialized() == false)
-		return -EPROBE_DEFER;
-
 	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to set the DMA mask\n");
@@ -748,9 +727,21 @@ static struct platform_driver * const drivers[] = {
 
 static int __init omap_drm_init(void)
 {
+	int r;
+
 	DBG("init");
 
-	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+	r = omap_dss_init();
+	if (r)
+		return r;
+
+	r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+	if (r) {
+		omap_dss_exit();
+		return r;
+	}
+
+	return 0;
 }
 
 static void __exit omap_drm_fini(void)
@@ -758,13 +749,15 @@ static void __exit omap_drm_fini(void)
 	DBG("fini");
 
 	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+
+	omap_dss_exit();
 }
 
-/* need late_initcall() so we load after dss_driver's are loaded */
-late_initcall(omap_drm_init);
+module_init(omap_drm_init);
 module_exit(omap_drm_fini);
 
 MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
 MODULE_DESCRIPTION("OMAP DRM Display Driver");
 MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae57e7ada87698bcfe95eff705a6d6fb2caa985d..d6f136984da9434f774be3348680287668072354 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -12,11 +12,11 @@
 #include <linux/workqueue.h>
 
 #include "dss/omapdss.h"
+#include "dss/dss.h"
 
 #include <drm/drm_gem.h>
 #include <drm/omap_drm.h>
 
-#include "omap_connector.h"
 #include "omap_crtc.h"
 #include "omap_encoder.h"
 #include "omap_fb.h"
@@ -47,7 +47,6 @@ struct omap_drm_private {
 
 	struct dss_device *dss;
 	struct dispc_device *dispc;
-	const struct dispc_ops *dispc_ops;
 
 	unsigned int num_pipes;
 	struct omap_drm_pipeline pipes[8];
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 57e92a4d5937309f7ba2750137d40fb6e68b3f54..4dd05bc732daebcedbfcbbc9ba7dffee7415bdfc 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -75,7 +75,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
 {
 	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
 	struct omap_dss_device *output = omap_encoder->output;
-	struct omap_dss_device *dssdev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_connector *connector;
 	struct drm_bridge *bridge;
@@ -98,9 +97,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
 	 *
 	 * A better solution is to use DRM's bus-flags through the whole driver.
 	 */
-	for (dssdev = output; dssdev; dssdev = dssdev->next)
-		omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
-
 	for (bridge = output->bridge; bridge;
 	     bridge = drm_bridge_get_next_bridge(bridge)) {
 		if (!bridge->timings)
@@ -113,65 +109,12 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
 	bus_flags = connector->display_info.bus_flags;
 	omap_encoder_update_videomode_flags(&vm, bus_flags);
 
-	/* Set timings for the dss manager. */
+	/* Set timings for all devices in the display pipeline. */
 	dss_mgr_set_timings(output, &vm);
 }
 
-static void omap_encoder_disable(struct drm_encoder *encoder)
-{
-	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-	struct omap_dss_device *dssdev = omap_encoder->output;
-	struct drm_device *dev = encoder->dev;
-
-	dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
-
-	/*
-	 * Disable the chain of external devices, starting at the one at the
-	 * internal encoder's output. This is used for DSI outputs only, as
-	 * dssdev->next is NULL for all other outputs.
-	 */
-	omapdss_device_disable(dssdev->next);
-}
-
-static void omap_encoder_enable(struct drm_encoder *encoder)
-{
-	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-	struct omap_dss_device *dssdev = omap_encoder->output;
-	struct drm_device *dev = encoder->dev;
-
-	dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
-
-	/*
-	 * Enable the chain of external devices, starting at the one at the
-	 * internal encoder's output. This is used for DSI outputs only, as
-	 * dssdev->next is NULL for all other outputs.
-	 */
-	omapdss_device_enable(dssdev->next);
-}
-
-static int omap_encoder_atomic_check(struct drm_encoder *encoder,
-				     struct drm_crtc_state *crtc_state,
-				     struct drm_connector_state *conn_state)
-{
-	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-	enum drm_mode_status status;
-
-	status = omap_connector_mode_fixup(omap_encoder->output,
-					   &crtc_state->mode,
-					   &crtc_state->adjusted_mode);
-	if (status != MODE_OK) {
-		dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
 	.mode_set = omap_encoder_mode_set,
-	.disable = omap_encoder_disable,
-	.enable = omap_encoder_enable,
-	.atomic_check = omap_encoder_atomic_check,
 };
 
 /* initialize encoder */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 30d299ca8795d6a945287b2c335d88b77f1e671a..38af6195d95936c03aba364b12d31cb3d3c7adb5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
 		}
 
 		omap_obj->pages = pages;
-		ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
-						       npages);
+		ret = drm_prime_sg_to_page_array(sgt, pages, npages);
 		if (ret) {
 			omap_gem_free_object(obj);
 			obj = ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 97c83b959f7e4926cd69f08abcbe9a1694f0a089..15148d4b35b575963a99654e10d00a01d1ca5d1f 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
 
 	DBG("irqmask=%08x", irqmask);
 
-	priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
+	dispc_write_irqenable(priv->dispc, irqmask);
 }
 
 static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -83,7 +83,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
 	unsigned long flags;
 	enum omap_channel channel = omap_crtc_channel(crtc);
 	int framedone_irq =
-		priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
+		dispc_mgr_get_framedone_irq(priv->dispc, channel);
 
 	DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
 
@@ -120,7 +120,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
 	DBG("dev=%p, crtc=%u", dev, channel);
 
 	spin_lock_irqsave(&priv->wait_lock, flags);
-	priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+	priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
 							     channel);
 	omap_irq_update(dev);
 	spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -146,7 +146,7 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc)
 	DBG("dev=%p, crtc=%u", dev, channel);
 
 	spin_lock_irqsave(&priv->wait_lock, flags);
-	priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+	priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
 							      channel);
 	omap_irq_update(dev);
 	spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -211,9 +211,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
 	unsigned int id;
 	u32 irqstatus;
 
-	irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
-	priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
-	priv->dispc_ops->read_irqstatus(priv->dispc);	/* flush posted write */
+	irqstatus = dispc_read_irqstatus(priv->dispc);
+	dispc_clear_irqstatus(priv->dispc, irqstatus);
+	dispc_read_irqstatus(priv->dispc);	/* flush posted write */
 
 	VERB("irqs: %08x", irqstatus);
 
@@ -221,15 +221,15 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
 		struct drm_crtc *crtc = priv->pipes[id].crtc;
 		enum omap_channel channel = omap_crtc_channel(crtc);
 
-		if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
+		if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
 			drm_handle_vblank(dev, id);
 			omap_crtc_vblank_irq(crtc);
 		}
 
-		if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
+		if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
 			omap_crtc_error_irq(crtc, irqstatus);
 
-		if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
+		if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
 			omap_crtc_framedone_irq(crtc, irqstatus);
 	}
 
@@ -263,7 +263,7 @@ static const u32 omap_underflow_irqs[] = {
 int omap_drm_irq_install(struct drm_device *dev)
 {
 	struct omap_drm_private *priv = dev->dev_private;
-	unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+	unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
 	unsigned int max_planes;
 	unsigned int i;
 	int ret;
@@ -281,13 +281,13 @@ int omap_drm_irq_install(struct drm_device *dev)
 	}
 
 	for (i = 0; i < num_mgrs; ++i)
-		priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
+		priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
 
-	priv->dispc_ops->runtime_get(priv->dispc);
-	priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
-	priv->dispc_ops->runtime_put(priv->dispc);
+	dispc_runtime_get(priv->dispc);
+	dispc_clear_irqstatus(priv->dispc, 0xffffffff);
+	dispc_runtime_put(priv->dispc);
 
-	ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
+	ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
 	if (ret < 0)
 		return ret;
 
@@ -305,5 +305,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev)
 
 	dev->irq_enabled = false;
 
-	priv->dispc_ops->free_irq(priv->dispc, dev);
+	dispc_free_irq(priv->dispc, dev);
 }
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 21e0b97855991a5606dde91ff642d04406c96a44..51dc24acea73e149cf699eb352e98d89590a80d0 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -59,6 +59,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
 		info.pre_mult_alpha = 1;
 	else
 		info.pre_mult_alpha = 0;
+	info.color_encoding = state->color_encoding;
+	info.color_range = state->color_range;
 
 	/* update scanout: */
 	omap_framebuffer_update_scanout(state->fb, state, &info);
@@ -70,17 +72,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
 			&info.paddr, &info.p_uv_addr);
 
 	/* and finally, update omapdss: */
-	ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
+	ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
 			      omap_crtc_timings(state->crtc), false,
 			      omap_crtc_channel(state->crtc));
 	if (ret) {
 		dev_err(plane->dev->dev, "Failed to setup plane %s\n",
 			omap_plane->name);
-		priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+		dispc_ovl_enable(priv->dispc, omap_plane->id, false);
 		return;
 	}
 
-	priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
+	dispc_ovl_enable(priv->dispc, omap_plane->id, true);
 }
 
 static void omap_plane_atomic_disable(struct drm_plane *plane,
@@ -93,7 +95,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
 	plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
 			   ? 0 : omap_plane->id;
 
-	priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+	dispc_ovl_enable(priv->dispc, omap_plane->id, false);
 }
 
 static int omap_plane_atomic_check(struct drm_plane *plane,
@@ -189,6 +191,8 @@ static void omap_plane_reset(struct drm_plane *plane)
 	 */
 	plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
 			   ? 0 : omap_plane->id;
+	plane->state->color_encoding = DRM_COLOR_YCBCR_BT601;
+	plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
 }
 
 static int omap_plane_atomic_set_property(struct drm_plane *plane,
@@ -232,6 +236,22 @@ static const struct drm_plane_funcs omap_plane_funcs = {
 	.atomic_get_property = omap_plane_atomic_get_property,
 };
 
+static bool omap_plane_supports_yuv(struct drm_plane *plane)
+{
+	struct omap_drm_private *priv = plane->dev->dev_private;
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	const u32 *formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
+	u32 i;
+
+	for (i = 0; formats[i]; i++)
+		if (formats[i] == DRM_FORMAT_YUYV ||
+		    formats[i] == DRM_FORMAT_UYVY ||
+		    formats[i] == DRM_FORMAT_NV12)
+			return true;
+
+	return false;
+}
+
 static const char *plane_id_to_name[] = {
 	[OMAP_DSS_GFX] = "gfx",
 	[OMAP_DSS_VIDEO1] = "vid1",
@@ -252,7 +272,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
 		u32 possible_crtcs)
 {
 	struct omap_drm_private *priv = dev->dev_private;
-	unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+	unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
 	struct drm_plane *plane;
 	struct omap_plane *omap_plane;
 	enum omap_plane_id id;
@@ -271,7 +291,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
 	if (!omap_plane)
 		return ERR_PTR(-ENOMEM);
 
-	formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
+	formats = dispc_ovl_get_color_modes(priv->dispc, id);
 	for (nformats = 0; formats[nformats]; ++nformats)
 		;
 	omap_plane->id = id;
@@ -293,6 +313,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
 	drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
 					     BIT(DRM_MODE_BLEND_COVERAGE));
 
+	if (omap_plane_supports_yuv(plane))
+		drm_plane_create_color_properties(plane,
+						  BIT(DRM_COLOR_YCBCR_BT601) |
+						  BIT(DRM_COLOR_YCBCR_BT709),
+						  BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+						  BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+						  DRM_COLOR_YCBCR_BT601,
+						  DRM_COLOR_YCBCR_FULL_RANGE);
+
 	return plane;
 
 error:
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index 9e1acbd2c7aa2e9632a03a694034d7ec4f76eab0..8338dc66530153b760b2b43a978c5281f4699360 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -254,6 +254,5 @@ struct tcm *sita_init(u16 width, u16 height)
 	return tcm;
 
 error:
-	kfree(tcm);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index b4e021ea30f9ce3e00d4f06cac8dc2e3c7681939..4894913936e9712b88dc1fdfca2ed7e9450327e0 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -57,6 +57,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6
 	  Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
 	  45NA WUXGA PANEL DSI Video Mode panel
 
+config DRM_PANEL_DSI_CM
+	tristate "Generic DSI command mode panels"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  DRM panel driver for DSI command mode panels with support for
+	  embedded and external backlights.
+
 config DRM_PANEL_LVDS
 	tristate "Generic LVDS panel driver"
 	depends on OF
@@ -145,6 +154,17 @@ config DRM_PANEL_JDI_LT070ME05000
 	  The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
 	  24 bit per pixel.
 
+config DRM_PANEL_KHADAS_TS050
+	tristate "Khadas TS050 panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Khadas TS050 TFT-LCD
+	  panel module. The panel has a 1080x1920 resolution and uses
+	  24 bit RGB per pixel. It provides a MIPI DSI interface to
+	  the host, a built-in LED backlight and touch controller.
+
 config DRM_PANEL_KINGDISPLAY_KD097D04
 	tristate "Kingdisplay kd097d04 panel"
 	depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index ebbf488c7eac4bce595ea5b203e33ca032c44ea0..cae4d976c06959ef53b64d1e5ea1d2823228926e 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
 obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
 obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
 obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
+obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
 obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
@@ -13,6 +14,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
 obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
+obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
 obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
 obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
 obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
new file mode 100644
index 0000000000000000000000000000000000000000..af381d756ac1f6c196f998999194a58858b4f0b4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic DSI Command Mode panel driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define DCS_GET_ID1		0xda
+#define DCS_GET_ID2		0xdb
+#define DCS_GET_ID3		0xdc
+
+#define DCS_REGULATOR_SUPPLY_NUM 2
+
+static const struct of_device_id dsicm_of_match[];
+
+struct dsic_panel_data {
+	u32 xres;
+	u32 yres;
+	u32 refresh;
+	u32 width_mm;
+	u32 height_mm;
+	u32 max_hs_rate;
+	u32 max_lp_rate;
+};
+
+struct panel_drv_data {
+	struct mipi_dsi_device *dsi;
+	struct drm_panel panel;
+	struct drm_display_mode mode;
+
+	struct mutex lock;
+
+	struct backlight_device *bldev;
+	struct backlight_device *extbldev;
+
+	unsigned long	hw_guard_end;	/* next value of jiffies when we can
+					 * issue the next sleep in/out command
+					 */
+	unsigned long	hw_guard_wait;	/* max guard time in jiffies */
+
+	const struct dsic_panel_data *panel_data;
+
+	struct gpio_desc *reset_gpio;
+
+	struct regulator_bulk_data supplies[DCS_REGULATOR_SUPPLY_NUM];
+
+	bool use_dsi_backlight;
+
+	/* runtime variables */
+	bool enabled;
+
+	bool intro_printed;
+};
+
+static inline struct panel_drv_data *panel_to_ddata(struct drm_panel *panel)
+{
+	return container_of(panel, struct panel_drv_data, panel);
+}
+
+static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
+{
+	struct backlight_device *backlight;
+
+	if (ddata->bldev)
+		backlight = ddata->bldev;
+	else if (ddata->extbldev)
+		backlight = ddata->extbldev;
+	else
+		return;
+
+	if (enable) {
+		backlight->props.fb_blank = FB_BLANK_UNBLANK;
+		backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
+		backlight->props.power = FB_BLANK_UNBLANK;
+	} else {
+		backlight->props.fb_blank = FB_BLANK_NORMAL;
+		backlight->props.power = FB_BLANK_POWERDOWN;
+		backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
+	}
+
+	backlight_update_status(backlight);
+}
+
+static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
+{
+	ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
+	ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct panel_drv_data *ddata)
+{
+	unsigned long wait = ddata->hw_guard_end - jiffies;
+
+	if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(wait);
+	}
+}
+
+static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
+{
+	return mipi_dsi_dcs_read(ddata->dsi, dcs_cmd, data, 1);
+}
+
+static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
+{
+	return mipi_dsi_dcs_write(ddata->dsi, dcs_cmd, &param, 1);
+}
+
+static int dsicm_sleep_in(struct panel_drv_data *ddata)
+
+{
+	int r;
+
+	hw_guard_wait(ddata);
+
+	r = mipi_dsi_dcs_enter_sleep_mode(ddata->dsi);
+	if (r)
+		return r;
+
+	hw_guard_start(ddata, 120);
+
+	usleep_range(5000, 10000);
+
+	return 0;
+}
+
+static int dsicm_sleep_out(struct panel_drv_data *ddata)
+{
+	int r;
+
+	hw_guard_wait(ddata);
+
+	r = mipi_dsi_dcs_exit_sleep_mode(ddata->dsi);
+	if (r)
+		return r;
+
+	hw_guard_start(ddata, 120);
+
+	usleep_range(5000, 10000);
+
+	return 0;
+}
+
+static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
+{
+	int r;
+
+	r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
+	if (r)
+		return r;
+	r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
+	if (r)
+		return r;
+	r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int dsicm_set_update_window(struct panel_drv_data *ddata)
+{
+	struct mipi_dsi_device *dsi = ddata->dsi;
+	int r;
+
+	r = mipi_dsi_dcs_set_column_address(dsi, 0, ddata->mode.hdisplay - 1);
+	if (r < 0)
+		return r;
+
+	r = mipi_dsi_dcs_set_page_address(dsi, 0, ddata->mode.vdisplay - 1);
+	if (r < 0)
+		return r;
+
+	return 0;
+}
+
+static int dsicm_bl_update_status(struct backlight_device *dev)
+{
+	struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+	int r = 0;
+	int level;
+
+	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+			dev->props.power == FB_BLANK_UNBLANK)
+		level = dev->props.brightness;
+	else
+		level = 0;
+
+	dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level);
+
+	mutex_lock(&ddata->lock);
+
+	if (ddata->enabled)
+		r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+				      level);
+
+	mutex_unlock(&ddata->lock);
+
+	return r;
+}
+
+static int dsicm_bl_get_intensity(struct backlight_device *dev)
+{
+	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+			dev->props.power == FB_BLANK_UNBLANK)
+		return dev->props.brightness;
+
+	return 0;
+}
+
+static const struct backlight_ops dsicm_bl_ops = {
+	.get_brightness = dsicm_bl_get_intensity,
+	.update_status  = dsicm_bl_update_status,
+};
+
+static ssize_t num_dsi_errors_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct panel_drv_data *ddata = dev_get_drvdata(dev);
+	u8 errors = 0;
+	int r = -ENODEV;
+
+	mutex_lock(&ddata->lock);
+
+	if (ddata->enabled)
+		r = dsicm_dcs_read_1(ddata, MIPI_DCS_GET_ERROR_COUNT_ON_DSI, &errors);
+
+	mutex_unlock(&ddata->lock);
+
+	if (r)
+		return r;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t hw_revision_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct panel_drv_data *ddata = dev_get_drvdata(dev);
+	u8 id1, id2, id3;
+	int r = -ENODEV;
+
+	mutex_lock(&ddata->lock);
+
+	if (ddata->enabled)
+		r = dsicm_get_id(ddata, &id1, &id2, &id3);
+
+	mutex_unlock(&ddata->lock);
+
+	if (r)
+		return r;
+
+	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static DEVICE_ATTR_RO(num_dsi_errors);
+static DEVICE_ATTR_RO(hw_revision);
+
+static struct attribute *dsicm_attrs[] = {
+	&dev_attr_num_dsi_errors.attr,
+	&dev_attr_hw_revision.attr,
+	NULL,
+};
+
+static const struct attribute_group dsicm_attr_group = {
+	.attrs = dsicm_attrs,
+};
+
+static void dsicm_hw_reset(struct panel_drv_data *ddata)
+{
+	gpiod_set_value(ddata->reset_gpio, 1);
+	udelay(10);
+	/* reset the panel */
+	gpiod_set_value(ddata->reset_gpio, 0);
+	/* assert reset */
+	udelay(10);
+	gpiod_set_value(ddata->reset_gpio, 1);
+	/* wait after releasing reset */
+	usleep_range(5000, 10000);
+}
+
+static int dsicm_power_on(struct panel_drv_data *ddata)
+{
+	u8 id1, id2, id3;
+	int r;
+
+	dsicm_hw_reset(ddata);
+
+	ddata->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	r = dsicm_sleep_out(ddata);
+	if (r)
+		goto err;
+
+	r = dsicm_get_id(ddata, &id1, &id2, &id3);
+	if (r)
+		goto err;
+
+	r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
+	if (r)
+		goto err;
+
+	r = dsicm_dcs_write_1(ddata, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+			(1<<2) | (1<<5));	/* BL | BCTRL */
+	if (r)
+		goto err;
+
+	r = mipi_dsi_dcs_set_pixel_format(ddata->dsi, MIPI_DCS_PIXEL_FMT_24BIT);
+	if (r)
+		goto err;
+
+	r = dsicm_set_update_window(ddata);
+	if (r)
+		goto err;
+
+	r = mipi_dsi_dcs_set_display_on(ddata->dsi);
+	if (r)
+		goto err;
+
+	r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+	if (r)
+		goto err;
+
+	/* possible panel bug */
+	msleep(100);
+
+	ddata->enabled = true;
+
+	if (!ddata->intro_printed) {
+		dev_info(&ddata->dsi->dev, "panel revision %02x.%02x.%02x\n",
+			id1, id2, id3);
+		ddata->intro_printed = true;
+	}
+
+	ddata->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	return 0;
+err:
+	dev_err(&ddata->dsi->dev, "error while enabling panel, issuing HW reset\n");
+
+	dsicm_hw_reset(ddata);
+
+	return r;
+}
+
+static int dsicm_power_off(struct panel_drv_data *ddata)
+{
+	int r;
+
+	ddata->enabled = false;
+
+	r = mipi_dsi_dcs_set_display_off(ddata->dsi);
+	if (!r)
+		r = dsicm_sleep_in(ddata);
+
+	if (r) {
+		dev_err(&ddata->dsi->dev,
+				"error disabling panel, issuing HW reset\n");
+		dsicm_hw_reset(ddata);
+	}
+
+	return r;
+}
+
+static int dsicm_prepare(struct drm_panel *panel)
+{
+	struct panel_drv_data *ddata = panel_to_ddata(panel);
+	int r;
+
+	r = regulator_bulk_enable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+	if (r)
+		dev_err(&ddata->dsi->dev, "failed to enable supplies: %d\n", r);
+
+	return r;
+}
+
+static int dsicm_enable(struct drm_panel *panel)
+{
+	struct panel_drv_data *ddata = panel_to_ddata(panel);
+	int r;
+
+	mutex_lock(&ddata->lock);
+
+	r = dsicm_power_on(ddata);
+	if (r)
+		goto err;
+
+	mutex_unlock(&ddata->lock);
+
+	dsicm_bl_power(ddata, true);
+
+	return 0;
+err:
+	dev_err(&ddata->dsi->dev, "enable failed (%d)\n", r);
+	mutex_unlock(&ddata->lock);
+	return r;
+}
+
+static int dsicm_unprepare(struct drm_panel *panel)
+{
+	struct panel_drv_data *ddata = panel_to_ddata(panel);
+	int r;
+
+	r = regulator_bulk_disable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+	if (r)
+		dev_err(&ddata->dsi->dev, "failed to disable supplies: %d\n", r);
+
+	return r;
+}
+
+static int dsicm_disable(struct drm_panel *panel)
+{
+	struct panel_drv_data *ddata = panel_to_ddata(panel);
+	int r;
+
+	dsicm_bl_power(ddata, false);
+
+	mutex_lock(&ddata->lock);
+
+	r = dsicm_power_off(ddata);
+
+	mutex_unlock(&ddata->lock);
+
+	return r;
+}
+
+static int dsicm_get_modes(struct drm_panel *panel,
+			   struct drm_connector *connector)
+{
+	struct panel_drv_data *ddata = panel_to_ddata(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &ddata->mode);
+	if (!mode) {
+		dev_err(&ddata->dsi->dev, "failed to add mode %ux%ux@%u kHz\n",
+			ddata->mode.hdisplay, ddata->mode.vdisplay,
+			ddata->mode.clock);
+		return -ENOMEM;
+	}
+
+	connector->display_info.width_mm = ddata->panel_data->width_mm;
+	connector->display_info.height_mm = ddata->panel_data->height_mm;
+
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs dsicm_panel_funcs = {
+	.unprepare = dsicm_unprepare,
+	.disable = dsicm_disable,
+	.prepare = dsicm_prepare,
+	.enable = dsicm_enable,
+	.get_modes = dsicm_get_modes,
+};
+
+static int dsicm_probe_of(struct mipi_dsi_device *dsi)
+{
+	struct backlight_device *backlight;
+	struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+	int err;
+	struct drm_display_mode *mode = &ddata->mode;
+
+	ddata->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ddata->reset_gpio)) {
+		err = PTR_ERR(ddata->reset_gpio);
+		dev_err(&dsi->dev, "reset gpio request failed: %d", err);
+		return err;
+	}
+
+	mode->hdisplay = mode->hsync_start = mode->hsync_end = mode->htotal =
+		ddata->panel_data->xres;
+	mode->vdisplay = mode->vsync_start = mode->vsync_end = mode->vtotal =
+		ddata->panel_data->yres;
+	mode->clock = ddata->panel_data->xres * ddata->panel_data->yres *
+		ddata->panel_data->refresh / 1000;
+	mode->width_mm = ddata->panel_data->width_mm;
+	mode->height_mm = ddata->panel_data->height_mm;
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(mode);
+
+	ddata->supplies[0].supply = "vpnl";
+	ddata->supplies[1].supply = "vddi";
+	err = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ddata->supplies),
+				      ddata->supplies);
+	if (err)
+		return err;
+
+	backlight = devm_of_find_backlight(&dsi->dev);
+	if (IS_ERR(backlight))
+		return PTR_ERR(backlight);
+
+	/* If no backlight device is found assume native backlight support */
+	if (backlight)
+		ddata->extbldev = backlight;
+	else
+		ddata->use_dsi_backlight = true;
+
+	return 0;
+}
+
+static int dsicm_probe(struct mipi_dsi_device *dsi)
+{
+	struct panel_drv_data *ddata;
+	struct backlight_device *bldev = NULL;
+	struct device *dev = &dsi->dev;
+	int r;
+
+	dev_dbg(dev, "probe\n");
+
+	ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, ddata);
+	ddata->dsi = dsi;
+
+	ddata->panel_data = of_device_get_match_data(dev);
+	if (!ddata->panel_data)
+		return -ENODEV;
+
+	r = dsicm_probe_of(dsi);
+	if (r)
+		return r;
+
+	mutex_init(&ddata->lock);
+
+	dsicm_hw_reset(ddata);
+
+	drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	if (ddata->use_dsi_backlight) {
+		struct backlight_properties props = { 0 };
+		props.max_brightness = 255;
+		props.type = BACKLIGHT_RAW;
+
+		bldev = devm_backlight_device_register(dev, dev_name(dev),
+			dev, ddata, &dsicm_bl_ops, &props);
+		if (IS_ERR(bldev)) {
+			r = PTR_ERR(bldev);
+			goto err_bl;
+		}
+
+		ddata->bldev = bldev;
+	}
+
+	r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
+	if (r) {
+		dev_err(dev, "failed to create sysfs files\n");
+		goto err_bl;
+	}
+
+	dsi->lanes = 2;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			  MIPI_DSI_MODE_EOT_PACKET;
+	dsi->hs_rate = ddata->panel_data->max_hs_rate;
+	dsi->lp_rate = ddata->panel_data->max_lp_rate;
+
+	drm_panel_add(&ddata->panel);
+
+	r = mipi_dsi_attach(dsi);
+	if (r < 0)
+		goto err_dsi_attach;
+
+	return 0;
+
+err_dsi_attach:
+	drm_panel_remove(&ddata->panel);
+	sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+err_bl:
+	if (ddata->extbldev)
+		put_device(&ddata->extbldev->dev);
+
+	return r;
+}
+
+static int dsicm_remove(struct mipi_dsi_device *dsi)
+{
+	struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+
+	dev_dbg(&dsi->dev, "remove\n");
+
+	mipi_dsi_detach(dsi);
+
+	drm_panel_remove(&ddata->panel);
+
+	sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+
+	if (ddata->extbldev)
+		put_device(&ddata->extbldev->dev);
+
+	return 0;
+}
+
+static const struct dsic_panel_data taal_data = {
+	.xres = 864,
+	.yres = 480,
+	.refresh = 60,
+	.width_mm = 0,
+	.height_mm = 0,
+	.max_hs_rate = 300000000,
+	.max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data himalaya_data = {
+	.xres = 480,
+	.yres = 864,
+	.refresh = 60,
+	.width_mm = 49,
+	.height_mm = 88,
+	.max_hs_rate = 300000000,
+	.max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data droid4_data = {
+	.xres = 540,
+	.yres = 960,
+	.refresh = 60,
+	.width_mm = 50,
+	.height_mm = 89,
+	.max_hs_rate = 300000000,
+	.max_lp_rate = 10000000,
+};
+
+static const struct of_device_id dsicm_of_match[] = {
+	{ .compatible = "tpo,taal", .data = &taal_data },
+	{ .compatible = "nokia,himalaya", &himalaya_data },
+	{ .compatible = "motorola,droid4-panel", &droid4_data },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, dsicm_of_match);
+
+static struct mipi_dsi_driver dsicm_driver = {
+	.probe = dsicm_probe,
+	.remove = dsicm_remove,
+	.driver = {
+		.name = "panel-dsi-cm",
+		.of_match_table = dsicm_of_match,
+	},
+};
+module_mipi_dsi_driver(dsicm_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
new file mode 100644
index 0000000000000000000000000000000000000000..8f6ac1a40c31bd1bed85711b40eaa1213f337ee2
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct khadas_ts050_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *link;
+
+	struct regulator *supply;
+	struct gpio_desc *reset_gpio;
+	struct gpio_desc *enable_gpio;
+
+	bool prepared;
+	bool enabled;
+};
+
+struct khadas_ts050_panel_cmd {
+	u8 cmd;
+	u8 data;
+};
+
+/* Only the CMD1 User Command set is documented */
+static const struct khadas_ts050_panel_cmd init_code[] = {
+	/* Select Unknown CMD Page (Undocumented) */
+	{0xff, 0xee},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	{0x1f, 0x45},
+	{0x24, 0x4f},
+	{0x38, 0xc8},
+	{0x39, 0x27},
+	{0x1e, 0x77},
+	{0x1d, 0x0f},
+	{0x7e, 0x71},
+	{0x7c, 0x03},
+	{0xff, 0x00},
+	{0xfb, 0x01},
+	{0x35, 0x01},
+	/* Select CMD2 Page0 (Undocumented) */
+	{0xff, 0x01},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	{0x00, 0x01},
+	{0x01, 0x55},
+	{0x02, 0x40},
+	{0x05, 0x40},
+	{0x06, 0x4a},
+	{0x07, 0x24},
+	{0x08, 0x0c},
+	{0x0b, 0x7d},
+	{0x0c, 0x7d},
+	{0x0e, 0xb0},
+	{0x0f, 0xae},
+	{0x11, 0x10},
+	{0x12, 0x10},
+	{0x13, 0x03},
+	{0x14, 0x4a},
+	{0x15, 0x12},
+	{0x16, 0x12},
+	{0x18, 0x00},
+	{0x19, 0x77},
+	{0x1a, 0x55},
+	{0x1b, 0x13},
+	{0x1c, 0x00},
+	{0x1d, 0x00},
+	{0x1e, 0x13},
+	{0x1f, 0x00},
+	{0x23, 0x00},
+	{0x24, 0x00},
+	{0x25, 0x00},
+	{0x26, 0x00},
+	{0x27, 0x00},
+	{0x28, 0x00},
+	{0x35, 0x00},
+	{0x66, 0x00},
+	{0x58, 0x82},
+	{0x59, 0x02},
+	{0x5a, 0x02},
+	{0x5b, 0x02},
+	{0x5c, 0x82},
+	{0x5d, 0x82},
+	{0x5e, 0x02},
+	{0x5f, 0x02},
+	{0x72, 0x31},
+	/* Select CMD2 Page4 (Undocumented) */
+	{0xff, 0x05},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	{0x00, 0x01},
+	{0x01, 0x0b},
+	{0x02, 0x0c},
+	{0x03, 0x09},
+	{0x04, 0x0a},
+	{0x05, 0x00},
+	{0x06, 0x0f},
+	{0x07, 0x10},
+	{0x08, 0x00},
+	{0x09, 0x00},
+	{0x0a, 0x00},
+	{0x0b, 0x00},
+	{0x0c, 0x00},
+	{0x0d, 0x13},
+	{0x0e, 0x15},
+	{0x0f, 0x17},
+	{0x10, 0x01},
+	{0x11, 0x0b},
+	{0x12, 0x0c},
+	{0x13, 0x09},
+	{0x14, 0x0a},
+	{0x15, 0x00},
+	{0x16, 0x0f},
+	{0x17, 0x10},
+	{0x18, 0x00},
+	{0x19, 0x00},
+	{0x1a, 0x00},
+	{0x1b, 0x00},
+	{0x1c, 0x00},
+	{0x1d, 0x13},
+	{0x1e, 0x15},
+	{0x1f, 0x17},
+	{0x20, 0x00},
+	{0x21, 0x03},
+	{0x22, 0x01},
+	{0x23, 0x40},
+	{0x24, 0x40},
+	{0x25, 0xed},
+	{0x29, 0x58},
+	{0x2a, 0x12},
+	{0x2b, 0x01},
+	{0x4b, 0x06},
+	{0x4c, 0x11},
+	{0x4d, 0x20},
+	{0x4e, 0x02},
+	{0x4f, 0x02},
+	{0x50, 0x20},
+	{0x51, 0x61},
+	{0x52, 0x01},
+	{0x53, 0x63},
+	{0x54, 0x77},
+	{0x55, 0xed},
+	{0x5b, 0x00},
+	{0x5c, 0x00},
+	{0x5d, 0x00},
+	{0x5e, 0x00},
+	{0x5f, 0x15},
+	{0x60, 0x75},
+	{0x61, 0x00},
+	{0x62, 0x00},
+	{0x63, 0x00},
+	{0x64, 0x00},
+	{0x65, 0x00},
+	{0x66, 0x00},
+	{0x67, 0x00},
+	{0x68, 0x04},
+	{0x69, 0x00},
+	{0x6a, 0x00},
+	{0x6c, 0x40},
+	{0x75, 0x01},
+	{0x76, 0x01},
+	{0x7a, 0x80},
+	{0x7b, 0xa3},
+	{0x7c, 0xd8},
+	{0x7d, 0x60},
+	{0x7f, 0x15},
+	{0x80, 0x81},
+	{0x83, 0x05},
+	{0x93, 0x08},
+	{0x94, 0x10},
+	{0x8a, 0x00},
+	{0x9b, 0x0f},
+	{0xea, 0xff},
+	{0xec, 0x00},
+	/* Select CMD2 Page0 (Undocumented) */
+	{0xff, 0x01},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	{0x75, 0x00},
+	{0x76, 0xdf},
+	{0x77, 0x00},
+	{0x78, 0xe4},
+	{0x79, 0x00},
+	{0x7a, 0xed},
+	{0x7b, 0x00},
+	{0x7c, 0xf6},
+	{0x7d, 0x00},
+	{0x7e, 0xff},
+	{0x7f, 0x01},
+	{0x80, 0x07},
+	{0x81, 0x01},
+	{0x82, 0x10},
+	{0x83, 0x01},
+	{0x84, 0x18},
+	{0x85, 0x01},
+	{0x86, 0x20},
+	{0x87, 0x01},
+	{0x88, 0x3d},
+	{0x89, 0x01},
+	{0x8a, 0x56},
+	{0x8b, 0x01},
+	{0x8c, 0x84},
+	{0x8d, 0x01},
+	{0x8e, 0xab},
+	{0x8f, 0x01},
+	{0x90, 0xec},
+	{0x91, 0x02},
+	{0x92, 0x22},
+	{0x93, 0x02},
+	{0x94, 0x23},
+	{0x95, 0x02},
+	{0x96, 0x55},
+	{0x97, 0x02},
+	{0x98, 0x8b},
+	{0x99, 0x02},
+	{0x9a, 0xaf},
+	{0x9b, 0x02},
+	{0x9c, 0xdf},
+	{0x9d, 0x03},
+	{0x9e, 0x01},
+	{0x9f, 0x03},
+	{0xa0, 0x2c},
+	{0xa2, 0x03},
+	{0xa3, 0x39},
+	{0xa4, 0x03},
+	{0xa5, 0x47},
+	{0xa6, 0x03},
+	{0xa7, 0x56},
+	{0xa9, 0x03},
+	{0xaa, 0x66},
+	{0xab, 0x03},
+	{0xac, 0x76},
+	{0xad, 0x03},
+	{0xae, 0x85},
+	{0xaf, 0x03},
+	{0xb0, 0x90},
+	{0xb1, 0x03},
+	{0xb2, 0xcb},
+	{0xb3, 0x00},
+	{0xb4, 0xdf},
+	{0xb5, 0x00},
+	{0xb6, 0xe4},
+	{0xb7, 0x00},
+	{0xb8, 0xed},
+	{0xb9, 0x00},
+	{0xba, 0xf6},
+	{0xbb, 0x00},
+	{0xbc, 0xff},
+	{0xbd, 0x01},
+	{0xbe, 0x07},
+	{0xbf, 0x01},
+	{0xc0, 0x10},
+	{0xc1, 0x01},
+	{0xc2, 0x18},
+	{0xc3, 0x01},
+	{0xc4, 0x20},
+	{0xc5, 0x01},
+	{0xc6, 0x3d},
+	{0xc7, 0x01},
+	{0xc8, 0x56},
+	{0xc9, 0x01},
+	{0xca, 0x84},
+	{0xcb, 0x01},
+	{0xcc, 0xab},
+	{0xcd, 0x01},
+	{0xce, 0xec},
+	{0xcf, 0x02},
+	{0xd0, 0x22},
+	{0xd1, 0x02},
+	{0xd2, 0x23},
+	{0xd3, 0x02},
+	{0xd4, 0x55},
+	{0xd5, 0x02},
+	{0xd6, 0x8b},
+	{0xd7, 0x02},
+	{0xd8, 0xaf},
+	{0xd9, 0x02},
+	{0xda, 0xdf},
+	{0xdb, 0x03},
+	{0xdc, 0x01},
+	{0xdd, 0x03},
+	{0xde, 0x2c},
+	{0xdf, 0x03},
+	{0xe0, 0x39},
+	{0xe1, 0x03},
+	{0xe2, 0x47},
+	{0xe3, 0x03},
+	{0xe4, 0x56},
+	{0xe5, 0x03},
+	{0xe6, 0x66},
+	{0xe7, 0x03},
+	{0xe8, 0x76},
+	{0xe9, 0x03},
+	{0xea, 0x85},
+	{0xeb, 0x03},
+	{0xec, 0x90},
+	{0xed, 0x03},
+	{0xee, 0xcb},
+	{0xef, 0x00},
+	{0xf0, 0xbb},
+	{0xf1, 0x00},
+	{0xf2, 0xc0},
+	{0xf3, 0x00},
+	{0xf4, 0xcc},
+	{0xf5, 0x00},
+	{0xf6, 0xd6},
+	{0xf7, 0x00},
+	{0xf8, 0xe1},
+	{0xf9, 0x00},
+	{0xfa, 0xea},
+	/* Select CMD2 Page2 (Undocumented) */
+	{0xff, 0x02},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	{0x00, 0x00},
+	{0x01, 0xf4},
+	{0x02, 0x00},
+	{0x03, 0xef},
+	{0x04, 0x01},
+	{0x05, 0x07},
+	{0x06, 0x01},
+	{0x07, 0x28},
+	{0x08, 0x01},
+	{0x09, 0x44},
+	{0x0a, 0x01},
+	{0x0b, 0x76},
+	{0x0c, 0x01},
+	{0x0d, 0xa0},
+	{0x0e, 0x01},
+	{0x0f, 0xe7},
+	{0x10, 0x02},
+	{0x11, 0x1f},
+	{0x12, 0x02},
+	{0x13, 0x22},
+	{0x14, 0x02},
+	{0x15, 0x54},
+	{0x16, 0x02},
+	{0x17, 0x8b},
+	{0x18, 0x02},
+	{0x19, 0xaf},
+	{0x1a, 0x02},
+	{0x1b, 0xe0},
+	{0x1c, 0x03},
+	{0x1d, 0x01},
+	{0x1e, 0x03},
+	{0x1f, 0x2d},
+	{0x20, 0x03},
+	{0x21, 0x39},
+	{0x22, 0x03},
+	{0x23, 0x47},
+	{0x24, 0x03},
+	{0x25, 0x57},
+	{0x26, 0x03},
+	{0x27, 0x65},
+	{0x28, 0x03},
+	{0x29, 0x77},
+	{0x2a, 0x03},
+	{0x2b, 0x85},
+	{0x2d, 0x03},
+	{0x2f, 0x8f},
+	{0x30, 0x03},
+	{0x31, 0xcb},
+	{0x32, 0x00},
+	{0x33, 0xbb},
+	{0x34, 0x00},
+	{0x35, 0xc0},
+	{0x36, 0x00},
+	{0x37, 0xcc},
+	{0x38, 0x00},
+	{0x39, 0xd6},
+	{0x3a, 0x00},
+	{0x3b, 0xe1},
+	{0x3d, 0x00},
+	{0x3f, 0xea},
+	{0x40, 0x00},
+	{0x41, 0xf4},
+	{0x42, 0x00},
+	{0x43, 0xfe},
+	{0x44, 0x01},
+	{0x45, 0x07},
+	{0x46, 0x01},
+	{0x47, 0x28},
+	{0x48, 0x01},
+	{0x49, 0x44},
+	{0x4a, 0x01},
+	{0x4b, 0x76},
+	{0x4c, 0x01},
+	{0x4d, 0xa0},
+	{0x4e, 0x01},
+	{0x4f, 0xe7},
+	{0x50, 0x02},
+	{0x51, 0x1f},
+	{0x52, 0x02},
+	{0x53, 0x22},
+	{0x54, 0x02},
+	{0x55, 0x54},
+	{0x56, 0x02},
+	{0x58, 0x8b},
+	{0x59, 0x02},
+	{0x5a, 0xaf},
+	{0x5b, 0x02},
+	{0x5c, 0xe0},
+	{0x5d, 0x03},
+	{0x5e, 0x01},
+	{0x5f, 0x03},
+	{0x60, 0x2d},
+	{0x61, 0x03},
+	{0x62, 0x39},
+	{0x63, 0x03},
+	{0x64, 0x47},
+	{0x65, 0x03},
+	{0x66, 0x57},
+	{0x67, 0x03},
+	{0x68, 0x65},
+	{0x69, 0x03},
+	{0x6a, 0x77},
+	{0x6b, 0x03},
+	{0x6c, 0x85},
+	{0x6d, 0x03},
+	{0x6e, 0x8f},
+	{0x6f, 0x03},
+	{0x70, 0xcb},
+	{0x71, 0x00},
+	{0x72, 0x00},
+	{0x73, 0x00},
+	{0x74, 0x21},
+	{0x75, 0x00},
+	{0x76, 0x4c},
+	{0x77, 0x00},
+	{0x78, 0x6b},
+	{0x79, 0x00},
+	{0x7a, 0x85},
+	{0x7b, 0x00},
+	{0x7c, 0x9a},
+	{0x7d, 0x00},
+	{0x7e, 0xad},
+	{0x7f, 0x00},
+	{0x80, 0xbe},
+	{0x81, 0x00},
+	{0x82, 0xcd},
+	{0x83, 0x01},
+	{0x84, 0x01},
+	{0x85, 0x01},
+	{0x86, 0x29},
+	{0x87, 0x01},
+	{0x88, 0x68},
+	{0x89, 0x01},
+	{0x8a, 0x98},
+	{0x8b, 0x01},
+	{0x8c, 0xe5},
+	{0x8d, 0x02},
+	{0x8e, 0x1e},
+	{0x8f, 0x02},
+	{0x90, 0x30},
+	{0x91, 0x02},
+	{0x92, 0x52},
+	{0x93, 0x02},
+	{0x94, 0x88},
+	{0x95, 0x02},
+	{0x96, 0xaa},
+	{0x97, 0x02},
+	{0x98, 0xd7},
+	{0x99, 0x02},
+	{0x9a, 0xf7},
+	{0x9b, 0x03},
+	{0x9c, 0x21},
+	{0x9d, 0x03},
+	{0x9e, 0x2e},
+	{0x9f, 0x03},
+	{0xa0, 0x3d},
+	{0xa2, 0x03},
+	{0xa3, 0x4c},
+	{0xa4, 0x03},
+	{0xa5, 0x5e},
+	{0xa6, 0x03},
+	{0xa7, 0x71},
+	{0xa9, 0x03},
+	{0xaa, 0x86},
+	{0xab, 0x03},
+	{0xac, 0x94},
+	{0xad, 0x03},
+	{0xae, 0xfa},
+	{0xaf, 0x00},
+	{0xb0, 0x00},
+	{0xb1, 0x00},
+	{0xb2, 0x21},
+	{0xb3, 0x00},
+	{0xb4, 0x4c},
+	{0xb5, 0x00},
+	{0xb6, 0x6b},
+	{0xb7, 0x00},
+	{0xb8, 0x85},
+	{0xb9, 0x00},
+	{0xba, 0x9a},
+	{0xbb, 0x00},
+	{0xbc, 0xad},
+	{0xbd, 0x00},
+	{0xbe, 0xbe},
+	{0xbf, 0x00},
+	{0xc0, 0xcd},
+	{0xc1, 0x01},
+	{0xc2, 0x01},
+	{0xc3, 0x01},
+	{0xc4, 0x29},
+	{0xc5, 0x01},
+	{0xc6, 0x68},
+	{0xc7, 0x01},
+	{0xc8, 0x98},
+	{0xc9, 0x01},
+	{0xca, 0xe5},
+	{0xcb, 0x02},
+	{0xcc, 0x1e},
+	{0xcd, 0x02},
+	{0xce, 0x20},
+	{0xcf, 0x02},
+	{0xd0, 0x52},
+	{0xd1, 0x02},
+	{0xd2, 0x88},
+	{0xd3, 0x02},
+	{0xd4, 0xaa},
+	{0xd5, 0x02},
+	{0xd6, 0xd7},
+	{0xd7, 0x02},
+	{0xd8, 0xf7},
+	{0xd9, 0x03},
+	{0xda, 0x21},
+	{0xdb, 0x03},
+	{0xdc, 0x2e},
+	{0xdd, 0x03},
+	{0xde, 0x3d},
+	{0xdf, 0x03},
+	{0xe0, 0x4c},
+	{0xe1, 0x03},
+	{0xe2, 0x5e},
+	{0xe3, 0x03},
+	{0xe4, 0x71},
+	{0xe5, 0x03},
+	{0xe6, 0x86},
+	{0xe7, 0x03},
+	{0xe8, 0x94},
+	{0xe9, 0x03},
+	{0xea, 0xfa},
+	/* Select CMD2 Page0 (Undocumented) */
+	{0xff, 0x01},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	/* Select CMD2 Page1 (Undocumented) */
+	{0xff, 0x02},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	/* Select CMD2 Page3 (Undocumented) */
+	{0xff, 0x04},
+	/* Reload CMD1: Don't reload default value to register */
+	{0xfb, 0x01},
+	/* Select CMD1 */
+	{0xff, 0x00},
+	{0xd3, 0x05}, /* RGBMIPICTRL: VSYNC back porch = 5 */
+	{0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+};
+
+static inline
+struct khadas_ts050_panel *to_khadas_ts050_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct khadas_ts050_panel, base);
+}
+
+static int khadas_ts050_panel_prepare(struct drm_panel *panel)
+{
+	struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+	unsigned int i;
+	int err;
+
+	if (khadas_ts050->prepared)
+		return 0;
+
+	gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+
+	err = regulator_enable(khadas_ts050->supply);
+	if (err < 0)
+		return err;
+
+	gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 1);
+
+	msleep(60);
+
+	gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+	usleep_range(10000, 11000);
+
+	gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 0);
+
+	/* Select CMD2 page 4 (Undocumented) */
+	mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x05 }, 1);
+
+	/* Reload CMD1: Don't reload default value to register */
+	mipi_dsi_dcs_write(khadas_ts050->link, 0xfb, (u8[]){ 0x01 }, 1);
+
+	mipi_dsi_dcs_write(khadas_ts050->link, 0xc5, (u8[]){ 0x01 }, 1);
+
+	msleep(100);
+
+	for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+		err = mipi_dsi_dcs_write(khadas_ts050->link,
+					 init_code[i].cmd,
+					 &init_code[i].data, 1);
+		if (err < 0) {
+			dev_err(panel->dev, "failed write cmds: %d\n", err);
+			goto poweroff;
+		}
+	}
+
+	err = mipi_dsi_dcs_exit_sleep_mode(khadas_ts050->link);
+	if (err < 0) {
+		dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+		goto poweroff;
+	}
+
+	msleep(120);
+
+	/* Select CMD1 */
+	mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x00 }, 1);
+
+	err = mipi_dsi_dcs_set_tear_on(khadas_ts050->link,
+				       MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+	if (err < 0) {
+		dev_err(panel->dev, "failed to set tear on: %d\n", err);
+		goto poweroff;
+	}
+
+	err = mipi_dsi_dcs_set_display_on(khadas_ts050->link);
+	if (err < 0) {
+		dev_err(panel->dev, "failed to set display on: %d\n", err);
+		goto poweroff;
+	}
+
+	usleep_range(10000, 11000);
+
+	khadas_ts050->prepared = true;
+
+	return 0;
+
+poweroff:
+	gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+	gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+	regulator_disable(khadas_ts050->supply);
+
+	return err;
+}
+
+static int khadas_ts050_panel_unprepare(struct drm_panel *panel)
+{
+	struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+	int err;
+
+	if (!khadas_ts050->prepared)
+		return 0;
+
+	khadas_ts050->prepared = false;
+
+	err = mipi_dsi_dcs_enter_sleep_mode(khadas_ts050->link);
+	if (err < 0)
+		dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+	msleep(150);
+
+	gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+	gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+	err = regulator_disable(khadas_ts050->supply);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int khadas_ts050_panel_enable(struct drm_panel *panel)
+{
+	struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+
+	khadas_ts050->enabled = true;
+
+	return 0;
+}
+
+static int khadas_ts050_panel_disable(struct drm_panel *panel)
+{
+	struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+	int err;
+
+	if (!khadas_ts050->enabled)
+		return 0;
+
+	err = mipi_dsi_dcs_set_display_off(khadas_ts050->link);
+	if (err < 0)
+		dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+	usleep_range(10000, 11000);
+
+	khadas_ts050->enabled = false;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 120000,
+	.hdisplay = 1088,
+	.hsync_start = 1088 + 104,
+	.hsync_end = 1088 + 104 + 4,
+	.htotal = 1088 + 104 + 4 + 127,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 4,
+	.vsync_end = 1920 + 4 + 2,
+	.vtotal = 1920 + 4 + 2 + 3,
+	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int khadas_ts050_panel_get_modes(struct drm_panel *panel,
+					struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
+	if (!mode) {
+		dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			drm_mode_vrefresh(&default_mode));
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(connector, mode);
+
+	connector->display_info.width_mm = 64;
+	connector->display_info.height_mm = 118;
+	connector->display_info.bpc = 8;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
+	.prepare = khadas_ts050_panel_prepare,
+	.unprepare = khadas_ts050_panel_unprepare,
+	.enable = khadas_ts050_panel_enable,
+	.disable = khadas_ts050_panel_disable,
+	.get_modes = khadas_ts050_panel_get_modes,
+};
+
+static const struct of_device_id khadas_ts050_of_match[] = {
+	{ .compatible = "khadas,ts050", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
+
+static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
+{
+	struct device *dev = &khadas_ts050->link->dev;
+	int err;
+
+	khadas_ts050->supply = devm_regulator_get(dev, "power");
+	if (IS_ERR(khadas_ts050->supply))
+		return dev_err_probe(dev, PTR_ERR(khadas_ts050->supply),
+				     "failed to get power supply");
+
+	khadas_ts050->reset_gpio = devm_gpiod_get(dev, "reset",
+						  GPIOD_OUT_LOW);
+	if (IS_ERR(khadas_ts050->reset_gpio))
+		return dev_err_probe(dev, PTR_ERR(khadas_ts050->reset_gpio),
+				     "failed to get reset gpio");
+
+	khadas_ts050->enable_gpio = devm_gpiod_get(dev, "enable",
+						   GPIOD_OUT_HIGH);
+	if (IS_ERR(khadas_ts050->enable_gpio))
+		return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
+				     "failed to get enable gpio");
+
+	drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
+		       &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+	err = drm_panel_of_backlight(&khadas_ts050->base);
+	if (err)
+		return err;
+
+	drm_panel_add(&khadas_ts050->base);
+
+	return 0;
+}
+
+static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct khadas_ts050_panel *khadas_ts050;
+	int err;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+	khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
+				    GFP_KERNEL);
+	if (!khadas_ts050)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, khadas_ts050);
+	khadas_ts050->link = dsi;
+
+	err = khadas_ts050_panel_add(khadas_ts050);
+	if (err < 0)
+		return err;
+
+	err = mipi_dsi_attach(dsi);
+	if (err)
+		drm_panel_remove(&khadas_ts050->base);
+
+	return err;
+}
+
+static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+	int err;
+
+	err = mipi_dsi_detach(dsi);
+	if (err < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+	drm_panel_remove(&khadas_ts050->base);
+	drm_panel_disable(&khadas_ts050->base);
+	drm_panel_unprepare(&khadas_ts050->base);
+
+	return 0;
+}
+
+static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+
+	drm_panel_disable(&khadas_ts050->base);
+	drm_panel_unprepare(&khadas_ts050->base);
+}
+
+static struct mipi_dsi_driver khadas_ts050_panel_driver = {
+	.driver = {
+		.name = "panel-khadas-ts050",
+		.of_match_table = khadas_ts050_of_match,
+	},
+	.probe = khadas_ts050_panel_probe,
+	.remove = khadas_ts050_panel_remove,
+	.shutdown = khadas_ts050_panel_shutdown,
+};
+module_mipi_dsi_driver(khadas_ts050_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Khadas TS050 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 0c5f22e95c2dbbafb9850faf2aeb4771788d09ad..30f28ad4df6b60249fe3abb6a5bd11e3df51525c 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -9,6 +9,7 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
@@ -22,6 +23,7 @@
 /* Manufacturer specific Commands send via DSI */
 #define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
 #define MANTIX_CMD_INT_CANCEL           0x4C
+#define MANTIX_CMD_SPI_FINISH           0x90
 
 struct mantix {
 	struct device *dev;
@@ -33,6 +35,8 @@ struct mantix {
 	struct regulator *avdd;
 	struct regulator *avee;
 	struct regulator *vddi;
+
+	const struct drm_display_mode *default_mode;
 };
 
 static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
@@ -66,6 +70,10 @@ static int mantix_init_sequence(struct mantix *ctx)
 	dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
 	msleep(20);
 
+	dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
+	dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
+	msleep(20);
+
 	dev_dbg(dev, "Panel init sequence done\n");
 	return 0;
 }
@@ -182,7 +190,7 @@ static int mantix_prepare(struct drm_panel *panel)
 	return 0;
 }
 
-static const struct drm_display_mode default_mode = {
+static const struct drm_display_mode default_mode_mantix = {
 	.hdisplay    = 720,
 	.hsync_start = 720 + 45,
 	.hsync_end   = 720 + 45 + 14,
@@ -197,17 +205,32 @@ static const struct drm_display_mode default_mode = {
 	.height_mm   = 130,
 };
 
+static const struct drm_display_mode default_mode_ys = {
+	.hdisplay    = 720,
+	.hsync_start = 720 + 45,
+	.hsync_end   = 720 + 45 + 14,
+	.htotal	     = 720 + 45 + 14 + 25,
+	.vdisplay    = 1440,
+	.vsync_start = 1440 + 175,
+	.vsync_end   = 1440 + 175 + 8,
+	.vtotal	     = 1440 + 175 + 8 + 50,
+	.clock	     = 85298,
+	.flags	     = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+	.width_mm    = 65,
+	.height_mm   = 130,
+};
+
 static int mantix_get_modes(struct drm_panel *panel,
 			    struct drm_connector *connector)
 {
 	struct mantix *ctx = panel_to_mantix(panel);
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(connector->dev, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, ctx->default_mode);
 	if (!mode) {
 		dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
-			default_mode.hdisplay, default_mode.vdisplay,
-			drm_mode_vrefresh(&default_mode));
+			ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+			drm_mode_vrefresh(ctx->default_mode));
 		return -ENOMEM;
 	}
 
@@ -238,6 +261,7 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
+	ctx->default_mode = of_device_get_match_data(dev);
 
 	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
 	if (IS_ERR(ctx->reset_gpio)) {
@@ -288,8 +312,8 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
 	}
 
 	dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
-		 default_mode.hdisplay, default_mode.vdisplay,
-		 drm_mode_vrefresh(&default_mode),
+		 ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+		 drm_mode_vrefresh(ctx->default_mode),
 		 mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
 
 	return 0;
@@ -316,7 +340,8 @@ static int mantix_remove(struct mipi_dsi_device *dsi)
 }
 
 static const struct of_device_id mantix_of_match[] = {
-	{ .compatible = "mantix,mlaf057we51-x" },
+	{ .compatible = "mantix,mlaf057we51-x", .data = &default_mode_mantix },
+	{ .compatible = "ys,ys57pss36bh5gq", .data = &default_mode_ys },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mantix_of_match);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6b4e97bfd46ee01e3c0b8db6d338663af37ecc7e..603c5dfe87682b70c33bcbda9d67e98662071d64 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -25,6 +25,14 @@
 /* Manufacturer Command Set */
 #define MCS_ELVSS_ON		0xb1
 #define MCS_TEMP_SWIRE		0xb2
+#define MCS_PENTILE_1		0xb3
+#define MCS_PENTILE_2		0xb4
+#define MCS_GAMMA_DELTA_Y_RED	0xb5
+#define MCS_GAMMA_DELTA_X_RED	0xb6
+#define MCS_GAMMA_DELTA_Y_GREEN	0xb7
+#define MCS_GAMMA_DELTA_X_GREEN	0xb8
+#define MCS_GAMMA_DELTA_Y_BLUE	0xb9
+#define MCS_GAMMA_DELTA_X_BLUE	0xba
 #define MCS_MIECTL1		0xc0
 #define MCS_BCMODE		0xc1
 #define MCS_ERROR_CHECK		0xd5
@@ -281,6 +289,7 @@ struct s6e63m0 {
 	struct backlight_device *bl_dev;
 	u8 lcd_type;
 	u8 elvss_pulse;
+	bool dsi_mode;
 
 	struct regulator_bulk_data supplies[2];
 	struct gpio_desc *reset_gpio;
@@ -395,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
 
 static void s6e63m0_init(struct s6e63m0 *ctx)
 {
-	s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
-				     0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
-				     0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+	/*
+	 * We do not know why there is a difference in the DSI mode.
+	 * (No datasheet.)
+	 *
+	 * In the vendor driver this sequence is called
+	 * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
+	 */
+	if (ctx->dsi_mode)
+		s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+					     0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
+					     0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
+	else
+		s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+					     0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+					     0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
 
 	s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
 				     0x02, 0x03, 0x1c, 0x10, 0x10);
@@ -414,40 +435,40 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
 
 	s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
 				     0x00, 0x8e, 0x07);
-	s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
+	s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
 
-	s6e63m0_dcs_write_seq_static(ctx, 0xb5,
+	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
 				     0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
 				     0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
 				     0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
 				     0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
 				     0x21, 0x20, 0x1e, 0x1e);
 
-	s6e63m0_dcs_write_seq_static(ctx, 0xb6,
+	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
 				     0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
 				     0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
 				     0x66, 0x66);
 
-	s6e63m0_dcs_write_seq_static(ctx, 0xb7,
+	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
 				     0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
 				     0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
 				     0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
 				     0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
 				     0x21, 0x20, 0x1e, 0x1e);
 
-	s6e63m0_dcs_write_seq_static(ctx, 0xb8,
+	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
 				     0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
 				     0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
 				     0x66, 0x66);
 
-	s6e63m0_dcs_write_seq_static(ctx, 0xb9,
+	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
 				     0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
 				     0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
 				     0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
 				     0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
 				     0x21, 0x20, 0x1e, 0x1e);
 
-	s6e63m0_dcs_write_seq_static(ctx, 0xba,
+	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
 				     0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
 				     0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
 				     0x66, 0x66);
@@ -671,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = {
 	.update_status	= s6e63m0_set_brightness,
 };
 
-static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
+static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
 {
 	struct backlight_properties props = {
 		.type		= BACKLIGHT_RAW,
-		.brightness	= MAX_BRIGHTNESS,
-		.max_brightness = MAX_BRIGHTNESS
+		.brightness	= max_brightness,
+		.max_brightness = max_brightness,
 	};
 	struct device *dev = ctx->dev;
 	int ret = 0;
@@ -698,12 +719,14 @@ int s6e63m0_probe(struct device *dev,
 		  bool dsi_mode)
 {
 	struct s6e63m0 *ctx;
+	u32 max_brightness;
 	int ret;
 
 	ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
+	ctx->dsi_mode = dsi_mode;
 	ctx->dcs_read = dcs_read;
 	ctx->dcs_write = dcs_write;
 	dev_set_drvdata(dev, ctx);
@@ -712,6 +735,14 @@ int s6e63m0_probe(struct device *dev,
 	ctx->enabled = false;
 	ctx->prepared = false;
 
+	ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+	if (ret)
+		max_brightness = MAX_BRIGHTNESS;
+	if (max_brightness > MAX_BRIGHTNESS) {
+		dev_err(dev, "illegal max brightness specified\n");
+		max_brightness = MAX_BRIGHTNESS;
+	}
+
 	ctx->supplies[0].supply = "vdd3";
 	ctx->supplies[1].supply = "vci";
 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
@@ -731,7 +762,7 @@ int s6e63m0_probe(struct device *dev,
 		       dsi_mode ? DRM_MODE_CONNECTOR_DSI :
 		       DRM_MODE_CONNECTOR_DPI);
 
-	ret = s6e63m0_backlight_register(ctx);
+	ret = s6e63m0_backlight_register(ctx, max_brightness);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 41bbec72b2dad99dbb04306a3c2f0aae10528b13..4e2dad314c795342ff2e11076c641bca52f69552 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -39,72 +39,145 @@
 #include <drm/drm_panel.h>
 
 /**
- * struct panel_desc
- * @modes: Pointer to array of fixed modes appropriate for this panel.  If
- *         only one mode then this can just be the address of this the mode.
- *         NOTE: cannot be used with "timings" and also if this is specified
- *         then you cannot override the mode in the device tree.
- * @num_modes: Number of elements in modes array.
- * @timings: Pointer to array of display timings.  NOTE: cannot be used with
- *           "modes" and also these will be used to validate a device tree
- *           override if one is present.
- * @num_timings: Number of elements in timings array.
- * @bpc: Bits per color.
- * @size: Structure containing the physical size of this panel.
- * @delay: Structure containing various delay values for this panel.
- * @bus_format: See MEDIA_BUS_FMT_... defines.
- * @bus_flags: See DRM_BUS_FLAG_... defines.
- * @connector_type: LVDS, eDP, DSI, DPI, etc.
+ * struct panel_desc - Describes a simple panel.
  */
 struct panel_desc {
+	/**
+	 * @modes: Pointer to array of fixed modes appropriate for this panel.
+	 *
+	 * If only one mode then this can just be the address of the mode.
+	 * NOTE: cannot be used with "timings" and also if this is specified
+	 * then you cannot override the mode in the device tree.
+	 */
 	const struct drm_display_mode *modes;
+
+	/** @num_modes: Number of elements in modes array. */
 	unsigned int num_modes;
+
+	/**
+	 * @timings: Pointer to array of display timings
+	 *
+	 * NOTE: cannot be used with "modes" and also these will be used to
+	 * validate a device tree override if one is present.
+	 */
 	const struct display_timing *timings;
+
+	/** @num_timings: Number of elements in timings array. */
 	unsigned int num_timings;
 
+	/** @bpc: Bits per color. */
 	unsigned int bpc;
 
-	/**
-	 * @width: width (in millimeters) of the panel's active display area
-	 * @height: height (in millimeters) of the panel's active display area
-	 */
+	/** @size: Structure containing the physical size of this panel. */
 	struct {
+		/**
+		 * @size.width: Width (in mm) of the active display area.
+		 */
 		unsigned int width;
+
+		/**
+		 * @size.height: Height (in mm) of the active display area.
+		 */
 		unsigned int height;
 	} size;
 
-	/**
-	 * @prepare: the time (in milliseconds) that it takes for the panel to
-	 *           become ready and start receiving video data
-	 * @hpd_absent_delay: Add this to the prepare delay if we know Hot
-	 *                    Plug Detect isn't used.
-	 * @enable: the time (in milliseconds) that it takes for the panel to
-	 *          display the first valid frame after starting to receive
-	 *          video data
-	 * @disable: the time (in milliseconds) that it takes for the panel to
-	 *           turn the display off (no content is visible)
-	 * @unprepare: the time (in milliseconds) that it takes for the panel
-	 *             to power itself down completely
-	 */
+	/** @delay: Structure containing various delay values for this panel. */
 	struct {
+		/**
+		 * @delay.prepare: Time for the panel to become ready.
+		 *
+		 * The time (in milliseconds) that it takes for the panel to
+		 * become ready and start receiving video data
+		 */
 		unsigned int prepare;
+
+		/**
+		 * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
+		 *
+		 * Add this to the prepare delay if we know Hot Plug Detect
+		 * isn't used.
+		 */
 		unsigned int hpd_absent_delay;
+
+		/**
+		 * @delay.prepare_to_enable: Time between prepare and enable.
+		 *
+		 * The minimum time, in milliseconds, that needs to have passed
+		 * between when prepare finished and enable may begin. If at
+		 * enable time less time has passed since prepare finished,
+		 * the driver waits for the remaining time.
+		 *
+		 * If a fixed enable delay is also specified, we'll start
+		 * counting before delaying for the fixed delay.
+		 *
+		 * If a fixed prepare delay is also specified, we won't start
+		 * counting until after the fixed delay. We can't overlap this
+		 * fixed delay with the min time because the fixed delay
+		 * doesn't happen at the end of the function if a HPD GPIO was
+		 * specified.
+		 *
+		 * In other words:
+		 *   prepare()
+		 *     ...
+		 *     // do fixed prepare delay
+		 *     // wait for HPD GPIO if applicable
+		 *     // start counting for prepare_to_enable
+		 *
+		 *   enable()
+		 *     // do fixed enable delay
+		 *     // enforce prepare_to_enable min time
+		 */
+		unsigned int prepare_to_enable;
+
+		/**
+		 * @delay.enable: Time for the panel to display a valid frame.
+		 *
+		 * The time (in milliseconds) that it takes for the panel to
+		 * display the first valid frame after starting to receive
+		 * video data.
+		 */
 		unsigned int enable;
+
+		/**
+		 * @delay.disable: Time for the panel to turn the display off.
+		 *
+		 * The time (in milliseconds) that it takes for the panel to
+		 * turn the display off (no content is visible).
+		 */
 		unsigned int disable;
+
+		/**
+		 * @delay.unprepare: Time to power down completely.
+		 *
+		 * The time (in milliseconds) that it takes for the panel
+		 * to power itself down completely.
+		 *
+		 * This time is used to prevent a future "prepare" from
+		 * starting until at least this many milliseconds has passed.
+		 * If at prepare time less time has passed since unprepare
+		 * finished, the driver waits for the remaining time.
+		 */
 		unsigned int unprepare;
 	} delay;
 
+	/** @bus_format: See MEDIA_BUS_FMT_... defines. */
 	u32 bus_format;
+
+	/** @bus_flags: See DRM_BUS_FLAG_... defines. */
 	u32 bus_flags;
+
+	/** @connector_type: LVDS, eDP, DSI, DPI, etc. */
 	int connector_type;
 };
 
 struct panel_simple {
 	struct drm_panel base;
-	bool prepared;
 	bool enabled;
 	bool no_hpd;
 
+	ktime_t prepared_time;
+	ktime_t unprepared_time;
+
 	const struct panel_desc *desc;
 
 	struct regulator *supply;
@@ -232,6 +305,20 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
 	return num;
 }
 
+static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+	ktime_t now_ktime, min_ktime;
+
+	if (!min_ms)
+		return;
+
+	min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+	now_ktime = ktime_get();
+
+	if (ktime_before(now_ktime, min_ktime))
+		msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
 static int panel_simple_disable(struct drm_panel *panel)
 {
 	struct panel_simple *p = to_panel_simple(panel);
@@ -251,17 +338,15 @@ static int panel_simple_unprepare(struct drm_panel *panel)
 {
 	struct panel_simple *p = to_panel_simple(panel);
 
-	if (!p->prepared)
+	if (p->prepared_time == 0)
 		return 0;
 
 	gpiod_set_value_cansleep(p->enable_gpio, 0);
 
 	regulator_disable(p->supply);
 
-	if (p->desc->delay.unprepare)
-		msleep(p->desc->delay.unprepare);
-
-	p->prepared = false;
+	p->prepared_time = 0;
+	p->unprepared_time = ktime_get();
 
 	return 0;
 }
@@ -298,9 +383,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
 	int err;
 	int hpd_asserted;
 
-	if (p->prepared)
+	if (p->prepared_time != 0)
 		return 0;
 
+	panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
+
 	err = regulator_enable(p->supply);
 	if (err < 0) {
 		dev_err(panel->dev, "failed to enable supply: %d\n", err);
@@ -335,7 +422,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
 		}
 	}
 
-	p->prepared = true;
+	p->prepared_time = ktime_get();
 
 	return 0;
 }
@@ -350,6 +437,8 @@ static int panel_simple_enable(struct drm_panel *panel)
 	if (p->desc->delay.enable)
 		msleep(p->desc->delay.enable);
 
+	panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
 	p->enabled = true;
 
 	return 0;
@@ -516,7 +605,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 		return -ENOMEM;
 
 	panel->enabled = false;
-	panel->prepared = false;
+	panel->prepared_time = 0;
 	panel->desc = desc;
 
 	panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
@@ -1318,6 +1407,51 @@ static const struct panel_desc boe_nv101wxmn51 = {
 	},
 };
 
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+	{
+		.clock = 207800,
+		.hdisplay = 2160,
+		.hsync_start = 2160 + 48,
+		.hsync_end = 2160 + 48 + 32,
+		.htotal = 2160 + 48 + 32 + 100,
+		.vdisplay = 1440,
+		.vsync_start = 1440 + 3,
+		.vsync_end = 1440 + 3 + 6,
+		.vtotal = 1440 + 3 + 6 + 31,
+		.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+	},
+	{
+		.clock = 138500,
+		.hdisplay = 2160,
+		.hsync_start = 2160 + 48,
+		.hsync_end = 2160 + 48 + 32,
+		.htotal = 2160 + 48 + 32 + 100,
+		.vdisplay = 1440,
+		.vsync_start = 1440 + 3,
+		.vsync_end = 1440 + 3 + 6,
+		.vtotal = 1440 + 3 + 6 + 31,
+		.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+	},
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+	.modes = boe_nv110wtm_n61_modes,
+	.num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+	.bpc = 8,
+	.size = {
+		.width = 233,
+		.height = 155,
+	},
+	.delay = {
+		.hpd_absent_delay = 200,
+		.prepare_to_enable = 80,
+		.unprepare = 500,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+	.connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 /* Also used for boe_nv133fhm_n62 */
 static const struct drm_display_mode boe_nv133fhm_n61_modes = {
 	.clock = 147840,
@@ -2265,6 +2399,8 @@ static const struct panel_desc innolux_n116bge = {
 		.width = 256,
 		.height = 144,
 	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.connector_type = DRM_MODE_CONNECTOR_eDP,
 };
 
 static const struct drm_display_mode innolux_n125hce_gn1_mode = {
@@ -4033,6 +4169,9 @@ static const struct of_device_id platform_of_match[] = {
 	}, {
 		.compatible = "boe,nv101wxmn51",
 		.data = &boe_nv101wxmn51,
+	}, {
+		.compatible = "boe,nv110wtm-n61",
+		.data = &boe_nv110wtm_n61,
 	}, {
 		.compatible = "boe,nv133fhm-n61",
 		.data = &boe_nv133fhm_n61,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index b30510b1696a04d078b5092966e163dce9312517..a2c303e5732c0d815d885e92b2172fb925719b39 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -530,10 +530,8 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
 		return -ENOMEM;
 
 	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
-	if (IS_ERR(ctx->reset_gpio)) {
-		dev_err(dev, "cannot get reset gpio\n");
-		return PTR_ERR(ctx->reset_gpio);
-	}
+	if (IS_ERR(ctx->reset_gpio))
+		return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n");
 
 	mipi_dsi_set_drvdata(dsi, ctx);
 
@@ -545,19 +543,13 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
 	dsi->lanes = ctx->desc->lanes;
 
 	ctx->vcc = devm_regulator_get(dev, "vcc");
-	if (IS_ERR(ctx->vcc)) {
-		ret = PTR_ERR(ctx->vcc);
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
-		return ret;
-	}
+	if (IS_ERR(ctx->vcc))
+		return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to request vcc regulator\n");
+
 	ctx->iovcc = devm_regulator_get(dev, "iovcc");
-	if (IS_ERR(ctx->iovcc)) {
-		ret = PTR_ERR(ctx->iovcc);
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
-		return ret;
-	}
+	if (IS_ERR(ctx->iovcc))
+		return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+				     "Failed to request iovcc regulator\n");
 
 	drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
 		       DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f44d28fad085bd001faa0874160a1bf4727c0549..56b3f5935703ac88d9c904041fb953dc045df349 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -76,6 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
 }
 
 static struct devfreq_dev_profile panfrost_devfreq_profile = {
+	.timer = DEVFREQ_TIMER_DELAYED,
 	.polling_ms = 50, /* ~3 frames */
 	.target = panfrost_devfreq_target,
 	.get_dev_status = panfrost_devfreq_get_dev_status,
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 40e6708fbbe24280668c17f105b16cef8c936097..e4dcaef6c1431292be1d548290551779f0d816d3 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -228,7 +228,7 @@ static const struct drm_driver pl111_drm_driver = {
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_import_sg_table = pl111_gem_import_sg_table,
-	.gem_prime_mmap = drm_gem_cma_prime_mmap,
+	.gem_prime_mmap = drm_gem_prime_mmap,
 
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = pl111_debugfs_init,
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index a7bc31f6d565f4a79b9bd1f39b90e11623d43268..06caa61b5d6666736e419042f543c0187212e83f 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -271,7 +271,7 @@ struct qxl_mode {
 /* qxl-1 compat: fixed */
 struct qxl_modes {
 	uint32_t n_modes;
-	struct qxl_mode modes[0];
+	struct qxl_mode modes[];
 };
 
 /* qxl-1 compat: append only */
@@ -382,12 +382,12 @@ struct qxl_data_chunk {
 	uint32_t data_size;
 	QXLPHYSICAL prev_chunk;
 	QXLPHYSICAL next_chunk;
-	uint8_t data[0];
+	uint8_t data[];
 };
 
 struct qxl_message {
 	union qxl_release_info release_info;
-	uint8_t data[0];
+	uint8_t data[];
 };
 
 struct qxl_compat_update_cmd {
@@ -469,7 +469,7 @@ struct qxl_raster_glyph {
 	struct qxl_point glyph_origin;
 	uint16_t width;
 	uint16_t height;
-	uint8_t data[0];
+	uint8_t data[];
 };
 
 struct qxl_string {
@@ -768,7 +768,7 @@ enum {
 struct qxl_path_seg {
 	uint32_t flags;
 	uint32_t count;
-	struct qxl_point_fix points[0];
+	struct qxl_point_fix points[];
 };
 
 struct qxl_path {
@@ -819,7 +819,7 @@ struct qxl_image_descriptor {
 struct qxl_palette {
 	uint64_t unique;
 	uint16_t num_ents;
-	uint32_t ents[0];
+	uint32_t ents[];
 };
 
 struct qxl_bitmap {
@@ -838,7 +838,7 @@ struct qxl_surface_id {
 
 struct qxl_encoder_data {
 	uint32_t data_size;
-	uint8_t data[0];
+	uint8_t data[];
 };
 
 struct qxl_image {
@@ -868,7 +868,7 @@ struct qxl_monitors_config {
 	uint16_t count;
 	uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
 				 driver */
-	struct qxl_head heads[0];
+	struct qxl_head heads[];
 };
 
 #pragma pack(pop)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6e7f16f4cec794dd0ec82f295dc0a203fd956eac..fb5f6a5e81d764f8107bb157d9dc377e94f7c906 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -163,7 +163,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
 
 static int qxl_drm_freeze(struct drm_device *dev)
 {
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct qxl_device *qdev = to_qxl(dev);
 	int ret;
 
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8bd0f916dfbc25a70f4e8088ca2061c8d6456262..83b54f0dad61886f0582b8657ce4f9b2f220b28a 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -46,7 +46,6 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "qxl_dev.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 16e1e589508e20a57f2e2125edb4beb99748fe84..b6075f452b9e02a1294b04563432969d437528cd 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -370,13 +370,14 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv)
 {
 	struct qxl_device *qdev = to_qxl(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct drm_qxl_clientcap *param = data;
 	int byte, idx;
 
 	byte = param->index / 8;
 	idx = param->index % 8;
 
-	if (dev->pdev->revision < 4)
+	if (pdev->revision < 4)
 		return -ENOSYS;
 
 	if (byte >= 58)
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 1ba5a702d7636d862756b9b3cb2705ca16021d40..ddf6588a2a3822ff1b6cc57bdc4b1c162ef87153 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -81,6 +81,7 @@ static void qxl_client_monitors_config_work_func(struct work_struct *work)
 
 int qxl_irq_init(struct qxl_device *qdev)
 {
+	struct pci_dev *pdev = to_pci_dev(qdev->ddev.dev);
 	int ret;
 
 	init_waitqueue_head(&qdev->display_event);
@@ -93,7 +94,7 @@ int qxl_irq_init(struct qxl_device *qdev)
 	atomic_set(&qdev->irq_received_cursor, 0);
 	atomic_set(&qdev->irq_received_io_cmd, 0);
 	qdev->irq_received_error = 0;
-	ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
+	ret = drm_irq_install(&qdev->ddev, pdev->irq);
 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 228e2b9198f102b16d42869b222d8a6e5b8863d6..4a60a52ab62ee094b2ef00f05eeb248b6525ceb3 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -111,7 +111,6 @@ int qxl_device_init(struct qxl_device *qdev,
 {
 	int r, sb;
 
-	qdev->ddev.pdev = pdev;
 	pci_set_drvdata(pdev, &qdev->ddev);
 
 	mutex_init(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ebf24c9d2bf26e71b88a6c6aa09432288c05f23e..e60a8f88e2262e7318ac67dad52483429cb83033 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
 
 static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
 {
-	return bo->tbo.num_pages << PAGE_SHIFT;
+	return bo->tbo.base.size;
 }
 
 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e75e364655b81ce8203b95ff45b662f067752f6a..0fcfc952d5e9e24ef1cd397116778b2b0cd976fb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 		bo = entry->bo;
 
 		dma_resv_add_shared_fence(bo->base.resv, &release->base);
-		ttm_bo_move_to_lru_tail(bo, NULL);
+		ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
 		dma_resv_unlock(bo->base.resv);
 	}
 	spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7dd0c69baa478dc02c20f73d6ba698384f479895..33c09dc94f8b3540a1d2606d2928953fdff8c2cb 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -31,7 +31,6 @@
 #include <drm/qxl_drm.h>
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "qxl_drv.h"
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 6ac71755c22ddf7d467dbdacfcc05c274e2afeb2..cdeb1db8722267b35e0764d666065e781a7677fb 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
  * \file r128_ioc32.c
  *
  * 32-bit ioctl compatibility routines for the R128 DRM.
@@ -170,13 +170,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
 };
 
 /**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * r128_compat_ioctl - Called whenever a 32-bit process running under
+ *                     a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
  *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
  */
 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 683de198e18d9add42ac180389dd27cc29d8a651..0fce73b9a64691ae0dbfd8b2f597d633a31d1df0 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2062,9 +2062,9 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
 	/* Funky macbooks */
-	if ((dev->pdev->device == 0x71C5) &&
-	    (dev->pdev->subsystem_vendor == 0x106b) &&
-	    (dev->pdev->subsystem_device == 0x0080)) {
+	if ((rdev->pdev->device == 0x71C5) &&
+	    (rdev->pdev->subsystem_vendor == 0x106b) &&
+	    (rdev->pdev->subsystem_device == 0x0080)) {
 		if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
 			uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
 
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aef4efc692b1af59a5defaa67810e6a0e6f5a212..2955bb32d5ad675c2830ebc7cfbd251ae5f741a2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2612,7 +2612,6 @@ int r100_asic_reset(struct radeon_device *rdev, bool hard)
 
 void r100_set_common_regs(struct radeon_device *rdev)
 {
-	struct drm_device *dev = rdev->ddev;
 	bool force_dac2 = false;
 	u32 tmp;
 
@@ -2630,7 +2629,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
 	 * don't report it in the bios connector
 	 * table.
 	 */
-	switch (dev->pdev->device) {
+	switch (rdev->pdev->device) {
 		/* RN50 */
 	case 0x515e:
 	case 0x5969:
@@ -2640,17 +2639,17 @@ void r100_set_common_regs(struct radeon_device *rdev)
 	case 0x5159:
 	case 0x515a:
 		/* DELL triple head servers */
-		if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
-		    ((dev->pdev->subsystem_device == 0x016c) ||
-		     (dev->pdev->subsystem_device == 0x016d) ||
-		     (dev->pdev->subsystem_device == 0x016e) ||
-		     (dev->pdev->subsystem_device == 0x016f) ||
-		     (dev->pdev->subsystem_device == 0x0170) ||
-		     (dev->pdev->subsystem_device == 0x017d) ||
-		     (dev->pdev->subsystem_device == 0x017e) ||
-		     (dev->pdev->subsystem_device == 0x0183) ||
-		     (dev->pdev->subsystem_device == 0x018a) ||
-		     (dev->pdev->subsystem_device == 0x019a)))
+		if ((rdev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+		    ((rdev->pdev->subsystem_device == 0x016c) ||
+		     (rdev->pdev->subsystem_device == 0x016d) ||
+		     (rdev->pdev->subsystem_device == 0x016e) ||
+		     (rdev->pdev->subsystem_device == 0x016f) ||
+		     (rdev->pdev->subsystem_device == 0x0170) ||
+		     (rdev->pdev->subsystem_device == 0x017d) ||
+		     (rdev->pdev->subsystem_device == 0x017e) ||
+		     (rdev->pdev->subsystem_device == 0x0183) ||
+		     (rdev->pdev->subsystem_device == 0x018a) ||
+		     (rdev->pdev->subsystem_device == 0x019a)))
 			force_dac2 = true;
 		break;
 	}
@@ -2798,7 +2797,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
 			rdev->mc.real_vram_size = 8192 * 1024;
 			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
 		}
-		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
+		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
 		 * Novell bug 204882 + along with lots of ubuntu ones
 		 */
 		if (rdev->mc.aper_size > config_aper_size)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index dc68e538d5a97c1317939fac97167a70a6ace643..34b7c6f1647985e5138dbbbe90a7ba8c0f5294ed 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -220,7 +220,7 @@ int r600_fmt_get_nblocksx(u32 format, u32 w)
 	if (bw == 0)
 		return 0;
 
-	return (w + bw - 1) / bw;
+	return DIV_ROUND_UP(w, bw);
 }
 
 int r600_fmt_get_nblocksy(u32 format, u32 h)
@@ -234,7 +234,7 @@ int r600_fmt_get_nblocksy(u32 format, u32 h)
 	if (bh == 0)
 		return 0;
 
-	return (h + bh - 1) / bh;
+	return DIV_ROUND_UP(h, bh);
 }
 
 struct array_mode_checker {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5f3adba43e4786ba88cf9d6d05d94006ae1be84c..f09989bdce9840a5c955d9a53d2fa9e25d2f61ba 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,7 +75,6 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 
 #include <drm/drm_gem.h>
@@ -2314,6 +2313,9 @@ struct radeon_device {
 	struct device			*dev;
 	struct drm_device		*ddev;
 	struct pci_dev			*pdev;
+#ifdef __alpha__
+	struct pci_controller		*hose;
+#endif
 	struct rw_semaphore		exclusive_lock;
 	/* ASIC */
 	union radeon_asic_config	config;
@@ -2623,14 +2625,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 		(rdev->family == CHIP_RV410) ||			\
 		(rdev->family == CHIP_RS400) ||			\
 		(rdev->family == CHIP_RS480))
-#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
-		(rdev->ddev->pdev->device == 0x9443) || \
-		(rdev->ddev->pdev->device == 0x944B) || \
-		(rdev->ddev->pdev->device == 0x9506) || \
-		(rdev->ddev->pdev->device == 0x9509) || \
-		(rdev->ddev->pdev->device == 0x950F) || \
-		(rdev->ddev->pdev->device == 0x689C) || \
-		(rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_X2(rdev) ((rdev->pdev->device == 0x9441) || \
+		(rdev->pdev->device == 0x9443) || \
+		(rdev->pdev->device == 0x944B) || \
+		(rdev->pdev->device == 0x9506) || \
+		(rdev->pdev->device == 0x9509) || \
+		(rdev->pdev->device == 0x950F) || \
+		(rdev->pdev->device == 0x689C) || \
+		(rdev->pdev->device == 0x689D))
 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
 #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
 			    (rdev->family == CHIP_RS690)  ||	\
@@ -2653,14 +2655,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
 			     (rdev->family == CHIP_MULLINS))
 
-#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
-			      (rdev->ddev->pdev->device == 0x6850) || \
-			      (rdev->ddev->pdev->device == 0x6858) || \
-			      (rdev->ddev->pdev->device == 0x6859) || \
-			      (rdev->ddev->pdev->device == 0x6840) || \
-			      (rdev->ddev->pdev->device == 0x6841) || \
-			      (rdev->ddev->pdev->device == 0x6842) || \
-			      (rdev->ddev->pdev->device == 0x6843))
+#define ASIC_IS_LOMBOK(rdev) ((rdev->pdev->device == 0x6849) || \
+			      (rdev->pdev->device == 0x6850) || \
+			      (rdev->pdev->device == 0x6858) || \
+			      (rdev->pdev->device == 0x6859) || \
+			      (rdev->pdev->device == 0x6840) || \
+			      (rdev->pdev->device == 0x6841) || \
+			      (rdev->pdev->device == 0x6842) || \
+			      (rdev->pdev->device == 0x6843))
 
 /*
  * BIOS helpers.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8becbe09af2f5c594172b061eb82f44d4b7114d3..bfacf8fe5cc16931cbc96313d00019380143180c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2478,6 +2478,9 @@ int radeon_asic_init(struct radeon_device *rdev)
 		if (rdev->family == CHIP_HAINAN) {
 			rdev->has_uvd = false;
 			rdev->has_vce = false;
+		} else if (rdev->family == CHIP_OLAND) {
+			rdev->has_uvd = true;
+			rdev->has_vce = false;
 		} else {
 			rdev->has_uvd = true;
 			rdev->has_vce = true;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index be96d9b64e43b787ad2d5f99bdc2d0cb99d24f19..42301b4e56f5062a854e69e7887ed8cd0881f9fb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -284,46 +284,47 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 				     uint16_t *line_mux,
 				     struct radeon_hpd *hpd)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
 	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
-	if ((dev->pdev->device == 0x791e) &&
-	    (dev->pdev->subsystem_vendor == 0x1043) &&
-	    (dev->pdev->subsystem_device == 0x826d)) {
+	if ((pdev->device == 0x791e) &&
+	    (pdev->subsystem_vendor == 0x1043) &&
+	    (pdev->subsystem_device == 0x826d)) {
 		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
 		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
 			*connector_type = DRM_MODE_CONNECTOR_DVID;
 	}
 
 	/* Asrock RS600 board lists the DVI port as HDMI */
-	if ((dev->pdev->device == 0x7941) &&
-	    (dev->pdev->subsystem_vendor == 0x1849) &&
-	    (dev->pdev->subsystem_device == 0x7941)) {
+	if ((pdev->device == 0x7941) &&
+	    (pdev->subsystem_vendor == 0x1849) &&
+	    (pdev->subsystem_device == 0x7941)) {
 		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
 		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
 			*connector_type = DRM_MODE_CONNECTOR_DVID;
 	}
 
 	/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
-	if ((dev->pdev->device == 0x796e) &&
-	    (dev->pdev->subsystem_vendor == 0x1462) &&
-	    (dev->pdev->subsystem_device == 0x7302)) {
+	if ((pdev->device == 0x796e) &&
+	    (pdev->subsystem_vendor == 0x1462) &&
+	    (pdev->subsystem_device == 0x7302)) {
 		if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
 		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
 			return false;
 	}
 
 	/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
-	if ((dev->pdev->device == 0x7941) &&
-	    (dev->pdev->subsystem_vendor == 0x147b) &&
-	    (dev->pdev->subsystem_device == 0x2412)) {
+	if ((pdev->device == 0x7941) &&
+	    (pdev->subsystem_vendor == 0x147b) &&
+	    (pdev->subsystem_device == 0x2412)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_DVII)
 			return false;
 	}
 
 	/* Falcon NW laptop lists vga ddc line for LVDS */
-	if ((dev->pdev->device == 0x5653) &&
-	    (dev->pdev->subsystem_vendor == 0x1462) &&
-	    (dev->pdev->subsystem_device == 0x0291)) {
+	if ((pdev->device == 0x5653) &&
+	    (pdev->subsystem_vendor == 0x1462) &&
+	    (pdev->subsystem_device == 0x0291)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
 			i2c_bus->valid = false;
 			*line_mux = 53;
@@ -331,26 +332,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 	}
 
 	/* HIS X1300 is DVI+VGA, not DVI+DVI */
-	if ((dev->pdev->device == 0x7146) &&
-	    (dev->pdev->subsystem_vendor == 0x17af) &&
-	    (dev->pdev->subsystem_device == 0x2058)) {
+	if ((pdev->device == 0x7146) &&
+	    (pdev->subsystem_vendor == 0x17af) &&
+	    (pdev->subsystem_device == 0x2058)) {
 		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
 			return false;
 	}
 
 	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
-	if ((dev->pdev->device == 0x7142) &&
-	    (dev->pdev->subsystem_vendor == 0x1458) &&
-	    (dev->pdev->subsystem_device == 0x2134)) {
+	if ((pdev->device == 0x7142) &&
+	    (pdev->subsystem_vendor == 0x1458) &&
+	    (pdev->subsystem_device == 0x2134)) {
 		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
 			return false;
 	}
 
 
 	/* Funky macbooks */
-	if ((dev->pdev->device == 0x71C5) &&
-	    (dev->pdev->subsystem_vendor == 0x106b) &&
-	    (dev->pdev->subsystem_device == 0x0080)) {
+	if ((pdev->device == 0x71C5) &&
+	    (pdev->subsystem_vendor == 0x106b) &&
+	    (pdev->subsystem_device == 0x0080)) {
 		if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
 		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
 			return false;
@@ -366,27 +367,27 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 	}
 
 	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
-	if ((dev->pdev->device == 0x9598) &&
-	    (dev->pdev->subsystem_vendor == 0x1043) &&
-	    (dev->pdev->subsystem_device == 0x01da)) {
+	if ((pdev->device == 0x9598) &&
+	    (pdev->subsystem_vendor == 0x1043) &&
+	    (pdev->subsystem_device == 0x01da)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
 			*connector_type = DRM_MODE_CONNECTOR_DVII;
 		}
 	}
 
 	/* ASUS HD 3600 board lists the DVI port as HDMI */
-	if ((dev->pdev->device == 0x9598) &&
-	    (dev->pdev->subsystem_vendor == 0x1043) &&
-	    (dev->pdev->subsystem_device == 0x01e4)) {
+	if ((pdev->device == 0x9598) &&
+	    (pdev->subsystem_vendor == 0x1043) &&
+	    (pdev->subsystem_device == 0x01e4)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
 			*connector_type = DRM_MODE_CONNECTOR_DVII;
 		}
 	}
 
 	/* ASUS HD 3450 board lists the DVI port as HDMI */
-	if ((dev->pdev->device == 0x95C5) &&
-	    (dev->pdev->subsystem_vendor == 0x1043) &&
-	    (dev->pdev->subsystem_device == 0x01e2)) {
+	if ((pdev->device == 0x95C5) &&
+	    (pdev->subsystem_vendor == 0x1043) &&
+	    (pdev->subsystem_device == 0x01e2)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
 			*connector_type = DRM_MODE_CONNECTOR_DVII;
 		}
@@ -411,9 +412,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 	 * with different crtcs which isn't possible on the hardware
 	 * side and leaves no crtcs for LVDS or VGA.
 	 */
-	if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
-	    (dev->pdev->subsystem_vendor == 0x1025) &&
-	    (dev->pdev->subsystem_device == 0x013c)) {
+	if (((pdev->device == 0x95c4) || (pdev->device == 0x9591)) &&
+	    (pdev->subsystem_vendor == 0x1025) &&
+	    (pdev->subsystem_device == 0x013c)) {
 		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
 		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
 			/* actually it's a DVI-D port not DVI-I */
@@ -425,9 +426,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 	/* XFX Pine Group device rv730 reports no VGA DDC lines
 	 * even though they are wired up to record 0x93
 	 */
-	if ((dev->pdev->device == 0x9498) &&
-	    (dev->pdev->subsystem_vendor == 0x1682) &&
-	    (dev->pdev->subsystem_device == 0x2452) &&
+	if ((pdev->device == 0x9498) &&
+	    (pdev->subsystem_vendor == 0x1682) &&
+	    (pdev->subsystem_device == 0x2452) &&
 	    (i2c_bus->valid == false) &&
 	    !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
 		struct radeon_device *rdev = dev->dev_private;
@@ -435,11 +436,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 	}
 
 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
-	if (((dev->pdev->device == 0x9802) ||
-	     (dev->pdev->device == 0x9805) ||
-	     (dev->pdev->device == 0x9806)) &&
-	    (dev->pdev->subsystem_vendor == 0x1734) &&
-	    (dev->pdev->subsystem_device == 0x11bd)) {
+	if (((pdev->device == 0x9802) ||
+	     (pdev->device == 0x9805) ||
+	     (pdev->device == 0x9806)) &&
+	    (pdev->subsystem_vendor == 0x1734) &&
+	    (pdev->subsystem_device == 0x11bd)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
 			*connector_type = DRM_MODE_CONNECTOR_DVII;
 			*line_mux = 0x3103;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 43bbbfd6ade87d148b029856339073d6adc1abe0..33121655d50bbe49caf1cad466cdbc5f5b3f4cd8 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -528,7 +528,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
 	fp2_gen_cntl = 0;
 
-	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
 	}
 
@@ -565,7 +565,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 		(RADEON_CRTC_SYNC_TRISTAT |
 		 RADEON_CRTC_DISPLAY_DIS)));
 
-	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
 	}
 
@@ -583,7 +583,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
 	}
 	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
-	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 	}
 	return r;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index ff2135059c071a287619b9511bae72b592b66064..783a6b8802d5d8f049e83287570b540e7ac635f8 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -894,13 +894,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
 
 	/* quirks */
 	/* Radeon 7000 (RV100) */
-	if (((dev->pdev->device == 0x5159) &&
-	    (dev->pdev->subsystem_vendor == 0x174B) &&
-	    (dev->pdev->subsystem_device == 0x7c28)) ||
+	if (((rdev->pdev->device == 0x5159) &&
+	    (rdev->pdev->subsystem_vendor == 0x174B) &&
+	    (rdev->pdev->subsystem_device == 0x7c28)) ||
 	/* Radeon 9100 (R200) */
-	   ((dev->pdev->device == 0x514D) &&
-	    (dev->pdev->subsystem_vendor == 0x174B) &&
-	    (dev->pdev->subsystem_device == 0x7149))) {
+	   ((rdev->pdev->device == 0x514D) &&
+	    (rdev->pdev->subsystem_vendor == 0x174B) &&
+	    (rdev->pdev->subsystem_device == 0x7149))) {
 		/* vbios value is bad, use the default */
 		found = 0;
 	}
@@ -2221,20 +2221,21 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
 				       struct radeon_i2c_bus_rec *ddc_i2c,
 				       struct radeon_hpd *hpd)
 {
+	struct radeon_device *rdev = dev->dev_private;
 
 	/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
 	   one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
-	if (dev->pdev->device == 0x515e &&
-	    dev->pdev->subsystem_vendor == 0x1014) {
+	if (rdev->pdev->device == 0x515e &&
+	    rdev->pdev->subsystem_vendor == 0x1014) {
 		if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
 		    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
 			return false;
 	}
 
 	/* X300 card with extra non-existent DVI port */
-	if (dev->pdev->device == 0x5B60 &&
-	    dev->pdev->subsystem_vendor == 0x17af &&
-	    dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+	if (rdev->pdev->device == 0x5B60 &&
+	    rdev->pdev->subsystem_vendor == 0x17af &&
+	    rdev->pdev->subsystem_device == 0x201e && bios_index == 2) {
 		if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
 			return false;
 	}
@@ -2244,22 +2245,24 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
 
 static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
 {
+	struct radeon_device *rdev = dev->dev_private;
+
 	/* Acer 5102 has non-existent TV port */
-	if (dev->pdev->device == 0x5975 &&
-	    dev->pdev->subsystem_vendor == 0x1025 &&
-	    dev->pdev->subsystem_device == 0x009f)
+	if (rdev->pdev->device == 0x5975 &&
+	    rdev->pdev->subsystem_vendor == 0x1025 &&
+	    rdev->pdev->subsystem_device == 0x009f)
 		return false;
 
 	/* HP dc5750 has non-existent TV port */
-	if (dev->pdev->device == 0x5974 &&
-	    dev->pdev->subsystem_vendor == 0x103c &&
-	    dev->pdev->subsystem_device == 0x280a)
+	if (rdev->pdev->device == 0x5974 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x280a)
 		return false;
 
 	/* MSI S270 has non-existent TV port */
-	if (dev->pdev->device == 0x5955 &&
-	    dev->pdev->subsystem_vendor == 0x1462 &&
-	    dev->pdev->subsystem_device == 0x0131)
+	if (rdev->pdev->device == 0x5955 &&
+	    rdev->pdev->subsystem_vendor == 0x1462 &&
+	    rdev->pdev->subsystem_device == 0x0131)
 		return false;
 
 	return true;
@@ -2413,9 +2416,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
 				/* RV100 board with external TDMS bit mis-set.
 				 * Actually uses internal TMDS, clear the bit.
 				 */
-				if (dev->pdev->device == 0x5159 &&
-				    dev->pdev->subsystem_vendor == 0x1014 &&
-				    dev->pdev->subsystem_device == 0x029A) {
+				if (rdev->pdev->device == 0x5159 &&
+				    rdev->pdev->subsystem_vendor == 0x1014 &&
+				    rdev->pdev->subsystem_device == 0x029A) {
 					tmp &= ~(1 << 4);
 				}
 				if ((tmp >> 4) & 0x1) {
@@ -2707,9 +2710,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
 		/* boards with a thermal chip, but no overdrive table */
 
 		/* Asus 9600xt has an f75375 on the monid bus */
-		if ((dev->pdev->device == 0x4152) &&
-		    (dev->pdev->subsystem_vendor == 0x1043) &&
-		    (dev->pdev->subsystem_device == 0xc002)) {
+		if ((rdev->pdev->device == 0x4152) &&
+		    (rdev->pdev->subsystem_vendor == 0x1043) &&
+		    (rdev->pdev->subsystem_device == 0xc002)) {
 			i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
 			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
 			if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c6262fce744002bb78aeb1f1d9f3a6263dea422b..35e937d39b51893e270029d4b7d566ce6546c8a5 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -130,8 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 		 * IGP chips to avoid image corruptions
 		 */
 		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-		    (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
-						   PCI_CAP_ID_AGP) ||
+		    (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
 		     p->rdev->family == CHIP_RS780 ||
 		     p->rdev->family == CHIP_RS880)) {
 
@@ -401,7 +400,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
 	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
 	/* Sort A before B if A is smaller. */
-	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+	return (int)la->robj->tbo.mem.num_pages -
+		(int)lb->robj->tbo.mem.num_pages;
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ebccaa5b2d0ee72f79d74a6f05ba65c21b7aef72..2cbf14fc6ece5023998f49c8440895159735c827 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1562,6 +1562,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 		       bool fbcon, bool freeze)
 {
 	struct radeon_device *rdev;
+	struct pci_dev *pdev;
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
 	int i, r;
@@ -1571,6 +1572,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 	}
 
 	rdev = dev->dev_private;
+	pdev = to_pci_dev(dev->dev);
 
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
@@ -1636,14 +1638,14 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 
 	radeon_agp_suspend(rdev);
 
-	pci_save_state(dev->pdev);
+	pci_save_state(pdev);
 	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
 		rdev->asic->asic_reset(rdev, true);
-		pci_restore_state(dev->pdev);
+		pci_restore_state(pdev);
 	} else if (suspend) {
 		/* Shut down the device */
-		pci_disable_device(dev->pdev);
-		pci_set_power_state(dev->pdev, PCI_D3hot);
+		pci_disable_device(pdev);
+		pci_set_power_state(pdev, PCI_D3hot);
 	}
 
 	if (fbcon) {
@@ -1665,6 +1667,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
 	struct drm_connector *connector;
 	struct radeon_device *rdev = dev->dev_private;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct drm_crtc *crtc;
 	int r;
 
@@ -1675,9 +1678,9 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 		console_lock();
 	}
 	if (resume) {
-		pci_set_power_state(dev->pdev, PCI_D0);
-		pci_restore_state(dev->pdev);
-		if (pci_enable_device(dev->pdev)) {
+		pci_set_power_state(pdev, PCI_D0);
+		pci_restore_state(pdev);
+		if (pci_enable_device(pdev)) {
 			if (fbcon)
 				console_unlock();
 			return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3a6fedad002d7dd9f3b0d2e8a407ffdc94026975..652af7a134bd0bd9e92ece804430da2671c9caea 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1317,7 +1317,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
 
 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (obj ==  NULL) {
-		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+		dev_err(dev->dev, "No GEM object associated to handle 0x%08X, "
 			"can't create framebuffer\n", mode_cmd->handles[0]);
 		return ERR_PTR(-ENOENT);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e45d7344ac2b4009c2f419fccf98f976bdb2cb63..efeb115ae70ece1ccb52dcfd877cb9e7e2d01820 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -342,14 +342,9 @@ static int radeon_pci_probe(struct pci_dev *pdev,
 	if (ret)
 		goto err_free;
 
-	dev->pdev = pdev;
-#ifdef __alpha__
-	dev->hose = pdev->sysdata;
-#endif
-
 	pci_set_drvdata(pdev, dev);
 
-	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+	if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
 		dev->agp = drm_agp_init(dev);
 	if (dev->agp) {
 		dev->agp->agp_mtrr = arch_phys_wc_add(
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fc4212633bdf13e7e605c05eaff8536d6a9461b9..0b206b0529726307f8ce98338e62999ac48df093 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	DRM_INFO("fb depth is %d\n", fb->format->depth);
 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
-	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+	vga_switcheroo_client_fb_set(rdev->pdev, info);
 	return 0;
 
 out:
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b6b21d2e726241627f4f6084bed24c0c08358367..941826923247329b5da554a3d35d10a6083123ff 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -651,7 +651,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
 	}
 
 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
-		dev_err(&dev->pdev->dev,
+		dev_err(dev->dev,
 			"offset 0x%lX is in reserved area 0x%X\n",
 			(unsigned long)args->offset,
 			RADEON_VA_RESERVED_SIZE);
@@ -665,7 +665,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
 	 */
 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
 	if ((args->flags & invalid_flags)) {
-		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
 			args->flags, invalid_flags);
 		args->operation = RADEON_VA_RESULT_ERROR;
 		return -EINVAL;
@@ -676,7 +676,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
 	case RADEON_VA_UNMAP:
 		break;
 	default:
-		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+		dev_err(dev->dev, "unsupported operation %d\n",
 			args->operation);
 		args->operation = RADEON_VA_RESULT_ERROR;
 		return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e543d993f73ee2554ec5b8560f464fabb3902b6c..314d066e68e9d0dddb240f09526eedce8eac3828 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -919,7 +919,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
 	i2c->rec = *rec;
 	i2c->adapter.owner = THIS_MODULE;
 	i2c->adapter.class = I2C_CLASS_DDC;
-	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->adapter.dev.parent = dev->dev;
 	i2c->dev = dev;
 	i2c_set_adapdata(&i2c->adapter, i2c);
 	mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index b8b7f627f0a9c175930c93b7b92a10180c7125f2..84d0b1a3355f36c427923b9c7f220e79b6a9521e 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -314,7 +314,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
 	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 
 	rdev->irq.installed = true;
-	r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
+	r = drm_irq_install(rdev->ddev, rdev->pdev->irq);
 	if (r) {
 		rdev->irq.installed = false;
 		flush_delayed_work(&rdev->hotplug_work);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 50cee4880bb46606638cea938d741c54b2673d6f..2479d6ab7a368c5f2d4e8fe84b1c1aa49d8a1892 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -76,7 +76,7 @@ void radeon_driver_unload_kms(struct drm_device *dev)
 	}
 
 	radeon_acpi_fini(rdev);
-	
+
 	radeon_modeset_fini(rdev);
 	radeon_device_fini(rdev);
 
@@ -105,6 +105,7 @@ void radeon_driver_unload_kms(struct drm_device *dev)
  */
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct radeon_device *rdev;
 	int r, acpi_status;
 
@@ -114,10 +115,14 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	}
 	dev->dev_private = (void *)rdev;
 
+#ifdef __alpha__
+	rdev->hose = pdev->sysdata;
+#endif
+
 	/* update BUS flag */
-	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
+	if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
 		flags |= RADEON_IS_AGP;
-	} else if (pci_is_pcie(dev->pdev)) {
+	} else if (pci_is_pcie(pdev)) {
 		flags |= RADEON_IS_PCIE;
 	} else {
 		flags |= RADEON_IS_PCI;
@@ -126,7 +131,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	if ((radeon_runtime_pm != 0) &&
 	    radeon_has_atpx() &&
 	    ((flags & RADEON_IS_IGP) == 0) &&
-	    !pci_is_thunderbolt_attached(dev->pdev))
+	    !pci_is_thunderbolt_attached(pdev))
 		flags |= RADEON_IS_PX;
 
 	/* radeon_device_init should report only fatal error
@@ -135,9 +140,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	 * properly initialize the GPU MC controller and permit
 	 * VRAM allocation
 	 */
-	r = radeon_device_init(rdev, dev, dev->pdev, flags);
+	r = radeon_device_init(rdev, dev, pdev, flags);
 	if (r) {
-		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		dev_err(dev->dev, "Fatal error during GPU init\n");
 		goto out;
 	}
 
@@ -147,7 +152,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	 */
 	r = radeon_modeset_init(rdev);
 	if (r)
-		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+		dev_err(dev->dev, "Fatal error during modeset init\n");
 
 	/* Call ACPI methods: require modeset init
 	 * but failure is not fatal
@@ -155,8 +160,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	if (!r) {
 		acpi_status = radeon_acpi_init(rdev);
 		if (acpi_status)
-		dev_dbg(&dev->pdev->dev,
-				"Error during ACPI methods call\n");
+		dev_dbg(dev->dev, "Error during ACPI methods call\n");
 	}
 
 	if (radeon_is_px(dev)) {
@@ -239,7 +243,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
 	switch (info->request) {
 	case RADEON_INFO_DEVICE_ID:
-		*value = dev->pdev->device;
+		*value = to_pci_dev(dev->dev)->device;
 		break;
 	case RADEON_INFO_NUM_GB_PIPES:
 		*value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index e64fd0ce6707048f3031f5f0c5b8412618cfcc53..7fdb77d48d6a32ff41d29c8898c7e2ff7f58ba68 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -974,9 +974,9 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
 
 		/* XXX: these are oem specific */
 		if (ASIC_IS_R300(rdev)) {
-			if ((dev->pdev->device == 0x4850) &&
-			    (dev->pdev->subsystem_vendor == 0x1028) &&
-			    (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+			if ((rdev->pdev->device == 0x4850) &&
+			    (rdev->pdev->subsystem_vendor == 0x1028) &&
+			    (rdev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
 				fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
 			else
 				fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8bc5ad1d6585720c6eff1ef9cf2473fbce86dc13..9b81786782de75601fb1c1c4191fbf4aa587b397 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -53,20 +53,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
 				       unsigned mem_type, int sign)
 {
 	struct radeon_device *rdev = bo->rdev;
-	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
 
 	switch (mem_type) {
 	case TTM_PL_TT:
 		if (sign > 0)
-			atomic64_add(size, &rdev->gtt_usage);
+			atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
 		else
-			atomic64_sub(size, &rdev->gtt_usage);
+			atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
 		break;
 	case TTM_PL_VRAM:
 		if (sign > 0)
-			atomic64_add(size, &rdev->vram_usage);
+			atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
 		else
-			atomic64_sub(size, &rdev->vram_usage);
+			atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
 		break;
 	}
 }
@@ -255,7 +254,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 		}
 		return 0;
 	}
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
 	if (r) {
 		return r;
 	}
@@ -609,7 +608,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 out:
 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
 			       bo->tbo.mem.start << PAGE_SHIFT,
-			       bo->tbo.num_pages << PAGE_SHIFT);
+			       bo->tbo.base.size);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d606e9a935e3346d546d109167b2d50ce86041c1..9896d8231fe5c57d39977790250717853505f8f4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 
 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 {
-	return bo->tbo.num_pages << PAGE_SHIFT;
+	return bo->tbo.base.size;
 }
 
 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
 {
-	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+	return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
 }
 
 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index dd482edc819c508b3a4423b346ad4b80ce207989..ab29eb9e8667596cb525ae189b193f2f329a3910 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -35,9 +35,9 @@
 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
-	int npages = bo->tbo.num_pages;
 
-	return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
+	return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
+				     bo->tbo.ttm->num_pages);
 }
 
 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index c93f3ab3c4e300dde29ea3b9021f2dfc1336f825..1729cb9a95c556f75cf1daa457546f7830c37043 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
 
 	    TP_fast_assign(
 			   __entry->bo = bo;
-			   __entry->pages = bo->tbo.num_pages;
+			   __entry->pages = bo->tbo.mem.num_pages;
 			   ),
 	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 23195d5d4e9191bce05d2eeeded4ed1196f6684b..e8c66d10478f03bed887e97caa8886d67fc57c81 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -46,7 +46,6 @@
 #include <drm/radeon_drm.h>
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "radeon_reg.h"
@@ -276,7 +275,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 
 out:
 	/* update statistics */
-	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+	atomic64_add(bo->base.size, &rdev->num_bytes_moved);
 	radeon_bo_move_notify(bo, evict, new_mem);
 	return 0;
 }
@@ -325,7 +324,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
 		 * access, as done in ttm_bo_vm_fault().
 		 */
 		mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
-			rdev->ddev->hose->dense_mem_base;
+			rdev->hose->dense_mem_base;
 #endif
 		break;
 	default:
@@ -396,8 +395,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
 	if (r)
 		goto release_sg;
 
-	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
-					 gtt->ttm.dma_address, ttm->num_pages);
+	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+				       ttm->num_pages);
 
 	return 0;
 
@@ -535,7 +534,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
 	else
 		caching = ttm_cached;
 
-	if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
+	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
 		kfree(gtt);
 		return NULL;
 	}
@@ -573,8 +572,8 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
 	}
 
 	if (slave && ttm->sg) {
-		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
-						 gtt->ttm.dma_address, ttm->num_pages);
+		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+					       ttm->num_pages);
 		return 0;
 	}
 
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 39c1c339be7b0f20ef9817f4467bde7322d9f960..dfa9fdbe98da25fc2360769b583419405b7f962f 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -781,7 +781,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
 	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
 		RADEON_GPU_PAGE_SIZE;
 
-	uint32_t *msg = rdev->uvd.cpu_addr + offs;
+	uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
 	uint64_t addr = rdev->uvd.gpu_addr + offs;
 
 	int r, i;
@@ -791,19 +791,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
 		return r;
 
 	/* stitch together an UVD create msg */
-	msg[0] = cpu_to_le32(0x00000de4);
-	msg[1] = cpu_to_le32(0x00000000);
-	msg[2] = cpu_to_le32(handle);
-	msg[3] = cpu_to_le32(0x00000000);
-	msg[4] = cpu_to_le32(0x00000000);
-	msg[5] = cpu_to_le32(0x00000000);
-	msg[6] = cpu_to_le32(0x00000000);
-	msg[7] = cpu_to_le32(0x00000780);
-	msg[8] = cpu_to_le32(0x00000440);
-	msg[9] = cpu_to_le32(0x00000000);
-	msg[10] = cpu_to_le32(0x01b37000);
+	writel(cpu_to_le32(0x00000de4), &msg[0]);
+	writel(0x0, (void __iomem *)&msg[1]);
+	writel(cpu_to_le32(handle), &msg[2]);
+	writel(0x0, &msg[3]);
+	writel(0x0, &msg[4]);
+	writel(0x0, &msg[5]);
+	writel(0x0, &msg[6]);
+	writel(cpu_to_le32(0x00000780), &msg[7]);
+	writel(cpu_to_le32(0x00000440), &msg[8]);
+	writel(0x0, &msg[9]);
+	writel(cpu_to_le32(0x01b37000), &msg[10]);
 	for (i = 11; i < 1024; ++i)
-		msg[i] = cpu_to_le32(0x0);
+		writel(0x0, &msg[i]);
 
 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
@@ -817,7 +817,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
 	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
 		RADEON_GPU_PAGE_SIZE;
 
-	uint32_t *msg = rdev->uvd.cpu_addr + offs;
+	uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
 	uint64_t addr = rdev->uvd.gpu_addr + offs;
 
 	int r, i;
@@ -827,12 +827,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
 		return r;
 
 	/* stitch together an UVD destroy msg */
-	msg[0] = cpu_to_le32(0x00000de4);
-	msg[1] = cpu_to_le32(0x00000002);
-	msg[2] = cpu_to_le32(handle);
-	msg[3] = cpu_to_le32(0x00000000);
+	writel(cpu_to_le32(0x00000de4), &msg[0]);
+	writel(cpu_to_le32(0x00000002), &msg[1]);
+	writel(cpu_to_le32(handle), &msg[2]);
+	writel(0x0, &msg[3]);
 	for (i = 4; i < 1024; ++i)
-		msg[i] = cpu_to_le32(0x0);
+		writel(0x0, &msg[i]);
 
 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index a450497368b22462846cadd478aa810435b357bd..511a942e851db8d273a85120c66f0d14b4cb0529 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -68,7 +68,6 @@ int radeon_vce_init(struct radeon_device *rdev)
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
 	case CHIP_VERDE:
-	case CHIP_OLAND:
 	case CHIP_ARUBA:
 		fw_name = FIRMWARE_TAHITI;
 		break;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index c296f94f9700e3967901a9c056aacbd4f44fa318..7bc302a89232ae44cb4aaaeb1fb6234fb934550d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -187,7 +187,7 @@ static void rs690_mc_init(struct radeon_device *rdev)
 		/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 
 		 * memory is present.
 		 */
-		if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+		if (!rdev->mc.igp_sideport_enabled && radeon_fastfb == 1) {
 			DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 
 					(unsigned long long)rdev->mc.aper_base, k8_addr);
 			rdev->mc.aper_base = (resource_size_t)k8_addr;
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 17390074277a9736c6fe7b536a2ed8d7ce9cde07..24ad124091207938d5cfe7a9d9babd771e1a9128 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -223,16 +223,15 @@ static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
 static void rs780_voltage_scaling_init(struct radeon_device *rdev)
 {
 	struct igp_power_info *pi = rs780_get_pi(rdev);
-	struct drm_device *dev = rdev->ddev;
 	u32 fv_throt_pwm_fb_div_range[3];
 	u32 fv_throt_pwm_range[4];
 
-	if (dev->pdev->device == 0x9614) {
+	if (rdev->pdev->device == 0x9614) {
 		fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
 		fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
 		fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
-	} else if ((dev->pdev->device == 0x9714) ||
-		   (dev->pdev->device == 0x9715)) {
+	} else if ((rdev->pdev->device == 0x9714) ||
+		   (rdev->pdev->device == 0x9715)) {
 		fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
 		fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
 		fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 70c5da2141d7598003704b76aba04158f7188794..bdfbcf14b86405b2684de932a34c63192e43b976 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -169,7 +169,6 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
 		chip_id = 0x01000015;
 		break;
 	case CHIP_PITCAIRN:
-	case CHIP_OLAND:
 		chip_id = 0x01000016;
 		break;
 	case CHIP_ARUBA:
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
index c578095b09a53e89cc5777da7bcd800eac25ac03..382d53f8a22e85cdc13399d3c228c9839d591f37 100644
--- a/drivers/gpu/drm/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev)
 {
 	int ret;
 
-	ret = pm_runtime_get_sync(&pdev->dev);
+	ret = pm_runtime_resume_and_get(&pdev->dev);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index b5fb941e0f534c590da278445620983599f3e523..ea7e39d03545728853df1cb03611700535ac6161 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
 	 */
 	if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
-		struct rcar_du_encoder *encoder =
-			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+		struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
 		const struct drm_display_mode *mode =
 			&crtc->state->adjusted_mode;
-		struct drm_bridge *bridge;
 
-		bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
 		rcar_lvds_clk_enable(bridge, mode->clock * 1000);
 	}
 
@@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
 
 	if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
-		struct rcar_du_encoder *encoder =
-			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
-		struct drm_bridge *bridge;
+		struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
 
 		/*
 		 * Disable the LVDS clock output, see
 		 * rcar_du_crtc_atomic_enable().
 		 */
-		bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
 		rcar_lvds_clk_disable(bridge);
 	}
 
@@ -1144,7 +1138,6 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
 	.set_crc_source = rcar_du_crtc_set_crc_source,
 	.verify_crc_source = rcar_du_crtc_verify_crc_source,
 	.get_crc_sources = rcar_du_crtc_get_crc_sources,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
 /* -----------------------------------------------------------------------------
@@ -1257,7 +1250,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
 	else
 		primary = &rgrp->planes[swindex % 2].plane;
 
-	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
+	ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
 					rcdu->info->gen <= 2 ?
 					&crtc_funcs_gen2 : &crtc_funcs_gen3,
 					NULL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 600056dff37438ba6cef39c996f3906819e2267f..bfbff90588cbf647e3d9c6f110140c48e8811574 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -18,10 +18,11 @@
 #include <linux/wait.h>
 
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_drv.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "rcar_du_drv.h"
@@ -527,14 +528,14 @@ static int rcar_du_pm_suspend(struct device *dev)
 {
 	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
 
-	return drm_mode_config_helper_suspend(rcdu->ddev);
+	return drm_mode_config_helper_suspend(&rcdu->ddev);
 }
 
 static int rcar_du_pm_resume(struct device *dev)
 {
 	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
 
-	return drm_mode_config_helper_resume(rcdu->ddev);
+	return drm_mode_config_helper_resume(&rcdu->ddev);
 }
 #endif
 
@@ -549,7 +550,7 @@ static const struct dev_pm_ops rcar_du_pm_ops = {
 static int rcar_du_remove(struct platform_device *pdev)
 {
 	struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
-	struct drm_device *ddev = rcdu->ddev;
+	struct drm_device *ddev = &rcdu->ddev;
 
 	drm_dev_unregister(ddev);
 
@@ -563,14 +564,14 @@ static int rcar_du_remove(struct platform_device *pdev)
 static int rcar_du_probe(struct platform_device *pdev)
 {
 	struct rcar_du_device *rcdu;
-	struct drm_device *ddev;
 	struct resource *mem;
 	int ret;
 
 	/* Allocate and initialize the R-Car device structure. */
-	rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
-	if (rcdu == NULL)
-		return -ENOMEM;
+	rcdu = devm_drm_dev_alloc(&pdev->dev, &rcar_du_driver,
+				  struct rcar_du_device, ddev);
+	if (IS_ERR(rcdu))
+		return PTR_ERR(rcdu);
 
 	rcdu->dev = &pdev->dev;
 	rcdu->info = of_device_get_match_data(rcdu->dev);
@@ -584,13 +585,6 @@ static int rcar_du_probe(struct platform_device *pdev)
 		return PTR_ERR(rcdu->mmio);
 
 	/* DRM/KMS objects */
-	ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
-	if (IS_ERR(ddev))
-		return PTR_ERR(ddev);
-
-	rcdu->ddev = ddev;
-	ddev->dev_private = rcdu;
-
 	ret = rcar_du_modeset_init(rcdu);
 	if (ret < 0) {
 		if (ret != -EPROBE_DEFER)
@@ -599,25 +593,24 @@ static int rcar_du_probe(struct platform_device *pdev)
 		goto error;
 	}
 
-	ddev->irq_enabled = 1;
+	rcdu->ddev.irq_enabled = 1;
 
 	/*
 	 * Register the DRM device with the core and the connectors with
 	 * sysfs.
 	 */
-	ret = drm_dev_register(ddev, 0);
+	ret = drm_dev_register(&rcdu->ddev, 0);
 	if (ret)
 		goto error;
 
 	DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
 
-	drm_fbdev_generic_setup(ddev, 32);
+	drm_fbdev_generic_setup(&rcdu->ddev, 32);
 
 	return 0;
 
 error:
-	rcar_du_remove(pdev);
-
+	drm_kms_helper_poll_fini(&rcdu->ddev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 61504c54e2ecf249ca81525ff3116b740b2cfb37..02ca2d0e1b55b0aeb20d812e878b9034cbc45cc1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,8 @@
 #include <linux/kernel.h>
 #include <linux/wait.h>
 
+#include <drm/drm_device.h>
+
 #include "rcar_cmm.h"
 #include "rcar_du_crtc.h"
 #include "rcar_du_group.h"
@@ -20,10 +22,9 @@
 
 struct clk;
 struct device;
-struct drm_device;
+struct drm_bridge;
 struct drm_property;
 struct rcar_du_device;
-struct rcar_du_encoder;
 
 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK	BIT(0)	/* Per-CRTC IRQ and clock */
 #define RCAR_DU_FEATURE_VSP1_SOURCE	BIT(1)	/* Has inputs from VSP1 */
@@ -71,6 +72,7 @@ struct rcar_du_device_info {
 #define RCAR_DU_MAX_CRTCS		4
 #define RCAR_DU_MAX_GROUPS		DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
 #define RCAR_DU_MAX_VSPS		4
+#define RCAR_DU_MAX_LVDS		2
 
 struct rcar_du_device {
 	struct device *dev;
@@ -78,16 +80,15 @@ struct rcar_du_device {
 
 	void __iomem *mmio;
 
-	struct drm_device *ddev;
+	struct drm_device ddev;
 
 	struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
 	unsigned int num_crtcs;
 
-	struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
-
 	struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
 	struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
 	struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
+	struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
 
 	struct {
 		struct drm_property *colorkey;
@@ -98,6 +99,11 @@ struct rcar_du_device {
 	unsigned int vspd1_sink;
 };
 
+static inline struct rcar_du_device *to_rcar_du_device(struct drm_device *dev)
+{
+	return container_of(dev, struct rcar_du_device, ddev);
+}
+
 static inline bool rcar_du_has(struct rcar_du_device *rcdu,
 			       unsigned int feature)
 {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b0335da0c1614609846bbc78594e00970518e82a..ba8c6038cd63c9fac7c9738c8391a678e0415b6a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -8,12 +8,13 @@
  */
 
 #include <linux/export.h>
+#include <linux/slab.h>
 
 #include <drm/drm_bridge.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_panel.h>
-#include <drm/drm_simple_kms_helper.h>
 
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
@@ -44,26 +45,25 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
 	return num_ports;
 }
 
+static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
+};
+
+static void rcar_du_encoder_release(struct drm_device *dev, void *res)
+{
+	struct rcar_du_encoder *renc = res;
+
+	drm_encoder_cleanup(&renc->base);
+	kfree(renc);
+}
+
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
 			 enum rcar_du_output output,
 			 struct device_node *enc_node)
 {
 	struct rcar_du_encoder *renc;
-	struct drm_encoder *encoder;
 	struct drm_bridge *bridge;
 	int ret;
 
-	renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
-	if (renc == NULL)
-		return -ENOMEM;
-
-	rcdu->encoders[output] = renc;
-	renc->output = output;
-	encoder = rcar_encoder_to_drm_encoder(renc);
-
-	dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
-		enc_node, output);
-
 	/*
 	 * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
 	 * DT node has a single port, assume that it describes a panel and
@@ -74,57 +74,57 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
 	    rcar_du_encoder_count_ports(enc_node) == 1) {
 		struct drm_panel *panel = of_drm_find_panel(enc_node);
 
-		if (IS_ERR(panel)) {
-			ret = PTR_ERR(panel);
-			goto done;
-		}
+		if (IS_ERR(panel))
+			return PTR_ERR(panel);
 
 		bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
 							 DRM_MODE_CONNECTOR_DPI);
-		if (IS_ERR(bridge)) {
-			ret = PTR_ERR(bridge);
-			goto done;
-		}
+		if (IS_ERR(bridge))
+			return PTR_ERR(bridge);
 	} else {
 		bridge = of_drm_find_bridge(enc_node);
-		if (!bridge) {
-			ret = -EPROBE_DEFER;
-			goto done;
-		}
+		if (!bridge)
+			return -EPROBE_DEFER;
+
+		if (output == RCAR_DU_OUTPUT_LVDS0 ||
+		    output == RCAR_DU_OUTPUT_LVDS1)
+			rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
 	}
 
 	/*
-	 * On Gen3 skip the LVDS1 output if the LVDS1 encoder is used as a
-	 * companion for LVDS0 in dual-link mode.
+	 * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+	 * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
+	 * mode.
 	 */
 	if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
-		if (rcar_lvds_dual_link(bridge)) {
-			ret = -ENOLINK;
-			goto done;
-		}
+		if (rcar_lvds_dual_link(bridge))
+			return -ENOLINK;
 	}
 
-	ret = drm_simple_encoder_init(rcdu->ddev, encoder,
-				      DRM_MODE_ENCODER_NONE);
-	if (ret < 0)
-		goto done;
+	renc = kzalloc(sizeof(*renc), GFP_KERNEL);
+	if (renc == NULL)
+		return -ENOMEM;
 
-	/*
-	 * Attach the bridge to the encoder. The bridge will create the
-	 * connector.
-	 */
-	ret = drm_bridge_attach(encoder, bridge, NULL, 0);
-	if (ret) {
-		drm_encoder_cleanup(encoder);
-		return ret;
-	}
+	renc->output = output;
+
+	dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
+		enc_node, output);
 
-done:
+	ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
+			       DRM_MODE_ENCODER_NONE, NULL);
 	if (ret < 0) {
-		if (encoder->name)
-			encoder->funcs->destroy(encoder);
-		devm_kfree(rcdu->dev, renc);
+		kfree(renc);
+		return ret;
 	}
 
-	return ret;
+	ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
+				       renc);
+	if (ret)
+		return ret;
+
+	/*
+	 * Attach the bridge to the encoder. The bridge will create the
+	 * connector.
+	 */
+	return drm_bridge_attach(&renc->base, bridge, NULL, 0);
 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index df9be4524301942bafe388da0eed0d275fcc9a4b..73560563fb31380801640086586b5e1b240b40f0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -22,8 +22,6 @@ struct rcar_du_encoder {
 #define to_rcar_encoder(e) \
 	container_of(e, struct rcar_du_encoder, base)
 
-#define rcar_encoder_to_drm_encoder(e)	(&(e)->base)
-
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
 			 enum rcar_du_output output,
 			 struct device_node *enc_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 72dda446355fee897039214ce95942f7bc5d9f07..fdb8a0d127ad3058e2e9b02aea1b8184442afea6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -14,6 +14,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -327,7 +328,7 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
 int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
 			struct drm_mode_create_dumb *args)
 {
-	struct rcar_du_device *rcdu = dev->dev_private;
+	struct rcar_du_device *rcdu = to_rcar_du_device(dev);
 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 	unsigned int align;
 
@@ -349,7 +350,7 @@ static struct drm_framebuffer *
 rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 		  const struct drm_mode_fb_cmd2 *mode_cmd)
 {
-	struct rcar_du_device *rcdu = dev->dev_private;
+	struct rcar_du_device *rcdu = to_rcar_du_device(dev);
 	const struct rcar_du_format_info *format;
 	unsigned int chroma_pitch;
 	unsigned int max_pitch;
@@ -421,7 +422,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 static int rcar_du_atomic_check(struct drm_device *dev,
 				struct drm_atomic_state *state)
 {
-	struct rcar_du_device *rcdu = dev->dev_private;
+	struct rcar_du_device *rcdu = to_rcar_du_device(dev);
 	int ret;
 
 	ret = drm_atomic_helper_check(dev, state);
@@ -437,7 +438,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
 static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
-	struct rcar_du_device *rcdu = dev->dev_private;
+	struct rcar_du_device *rcdu = to_rcar_du_device(dev);
 	struct drm_crtc_state *crtc_state;
 	struct drm_crtc *crtc;
 	unsigned int i;
@@ -583,7 +584,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
 	 * or enable source color keying (1).
 	 */
 	rcdu->props.colorkey =
-		drm_property_create_range(rcdu->ddev, 0, "colorkey",
+		drm_property_create_range(&rcdu->ddev, 0, "colorkey",
 					  0, 0x01ffffff);
 	if (rcdu->props.colorkey == NULL)
 		return -ENOMEM;
@@ -700,10 +701,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
 		int ret;
 
 		cmm = of_parse_phandle(np, "renesas,cmms", i);
-		if (IS_ERR(cmm)) {
+		if (!cmm) {
 			dev_err(rcdu->dev,
 				"Failed to parse 'renesas,cmms' property\n");
-			return PTR_ERR(cmm);
+			return -EINVAL;
 		}
 
 		if (!of_device_is_available(cmm)) {
@@ -713,10 +714,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
 		}
 
 		pdev = of_find_device_by_node(cmm);
-		if (IS_ERR(pdev)) {
+		if (!pdev) {
 			dev_err(rcdu->dev, "No device found for CMM%u\n", i);
 			of_node_put(cmm);
-			return PTR_ERR(pdev);
+			return -EINVAL;
 		}
 
 		of_node_put(cmm);
@@ -726,8 +727,12 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
 		 * disabled: return 0 and let the DU continue probing.
 		 */
 		ret = rcar_cmm_init(pdev);
-		if (ret)
+		if (ret) {
+			platform_device_put(pdev);
 			return ret == -ENODEV ? 0 : ret;
+		}
+
+		rcdu->cmms[i] = pdev;
 
 		/*
 		 * Enforce suspend/resume ordering by making the CMM a provider
@@ -739,20 +744,27 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
 				"Failed to create device link to CMM%u\n", i);
 			return -EINVAL;
 		}
-
-		rcdu->cmms[i] = pdev;
 	}
 
 	return 0;
 }
 
+static void rcar_du_modeset_cleanup(struct drm_device *dev, void *res)
+{
+	struct rcar_du_device *rcdu = to_rcar_du_device(dev);
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->cmms); ++i)
+		platform_device_put(rcdu->cmms[i]);
+}
+
 int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 {
 	static const unsigned int mmio_offsets[] = {
 		DU0_REG_OFFSET, DU2_REG_OFFSET
 	};
 
-	struct drm_device *dev = rcdu->ddev;
+	struct drm_device *dev = &rcdu->ddev;
 	struct drm_encoder *encoder;
 	unsigned int dpad0_sources;
 	unsigned int num_encoders;
@@ -766,6 +778,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	if (ret)
 		return ret;
 
+	ret = drmm_add_action(&rcdu->ddev, rcar_du_modeset_cleanup, NULL);
+	if (ret)
+		return ret;
+
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
 	dev->mode_config.normalize_zpos = true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a0021fc25b27c12054b5fd29bfbcc9e9a8f9b164..02e5f11f38eb7f6d072268375fac9aec3801337b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -128,7 +128,7 @@ static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
 int rcar_du_atomic_check_planes(struct drm_device *dev,
 				struct drm_atomic_state *state)
 {
-	struct rcar_du_device *rcdu = dev->dev_private;
+	struct rcar_du_device *rcdu = to_rcar_du_device(dev);
 	unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
 	unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
 	bool needs_realloc = false;
@@ -773,9 +773,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
 
 		plane->group = rgrp;
 
-		ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
-					       &rcar_du_plane_funcs, formats,
-					       ARRAY_SIZE(formats),
+		ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+					       crtcs, &rcar_du_plane_funcs,
+					       formats, ARRAY_SIZE(formats),
 					       NULL, type, NULL);
 		if (ret < 0)
 			return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index f6a69aa116e6cc704628c69c743fe497b9030ddf..53221d8473c15e1745be5161f93e54fdbaefd359 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/of_platform.h>
 #include <linux/scatterlist.h>
+#include <linux/slab.h>
 #include <linux/videodev2.h>
 
 #include <media/vsp1.h>
@@ -344,6 +345,15 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
 static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
 {
 	struct rcar_du_vsp *vsp = res;
+	unsigned int i;
+
+	for (i = 0; i < vsp->num_planes; ++i) {
+		struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+
+		drm_plane_cleanup(&plane->plane);
+	}
+
+	kfree(vsp->planes);
 
 	put_device(vsp->vsp);
 }
@@ -354,6 +364,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 	struct rcar_du_device *rcdu = vsp->dev;
 	struct platform_device *pdev;
 	unsigned int num_crtcs = hweight32(crtcs);
+	unsigned int num_planes;
 	unsigned int i;
 	int ret;
 
@@ -364,7 +375,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 
 	vsp->vsp = &pdev->dev;
 
-	ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
+	ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_vsp_cleanup, vsp);
 	if (ret < 0)
 		return ret;
 
@@ -376,14 +387,13 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 	  * The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
 	  * 4 RPFs.
 	  */
-	vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
+	num_planes = rcdu->info->gen >= 3 ? 5 : 4;
 
-	vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes,
-				   sizeof(*vsp->planes), GFP_KERNEL);
+	vsp->planes = kcalloc(num_planes, sizeof(*vsp->planes), GFP_KERNEL);
 	if (!vsp->planes)
 		return -ENOMEM;
 
-	for (i = 0; i < vsp->num_planes; ++i) {
+	for (i = 0; i < num_planes; ++i) {
 		enum drm_plane_type type = i < num_crtcs
 					 ? DRM_PLANE_TYPE_PRIMARY
 					 : DRM_PLANE_TYPE_OVERLAY;
@@ -392,8 +402,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 		plane->vsp = vsp;
 		plane->index = i;
 
-		ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
-					       &rcar_du_vsp_plane_funcs,
+		ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+					       crtcs, &rcar_du_vsp_plane_funcs,
 					       rcar_du_vsp_formats,
 					       ARRAY_SIZE(rcar_du_vsp_formats),
 					       NULL, type, NULL);
@@ -409,8 +419,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 		} else {
 			drm_plane_create_alpha_property(&plane->plane);
 			drm_plane_create_zpos_property(&plane->plane, 1, 1,
-						       vsp->num_planes - 1);
+						       num_planes - 1);
 		}
+
+		vsp->num_planes++;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 04efa78d70b6ea50bb8e354ea3697fc99d3562e6..c79d1259e49ba22b8733496b5489cae701fe2761 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -204,7 +204,7 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu,
 	drm_connector_helper_add(&wb_conn->base,
 				 &rcar_du_wb_conn_helper_funcs);
 
-	return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+	return drm_writeback_connector_init(&rcdu->ddev, wb_conn,
 					    &rcar_du_wb_conn_funcs,
 					    &rcar_du_wb_enc_helper_funcs,
 					    writeback_formats,
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 310aa15468935c23d93a001bf4133205cba231e3..cb25c0e8fc9b2e6547e2c901452019b58ea991c6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -49,7 +49,7 @@ config ROCKCHIP_DW_MIPI_DSI
 	select GENERIC_PHY_MIPI_DPHY
 	help
 	  This selects support for Rockchip SoC specific extensions
-	  for the Synopsys DesignWare HDMI driver. If you want to
+	  for the Synopsys DesignWare dsi driver. If you want to
 	  enable MIPI DSI on RK3288 or RK3399 based SoC, you should
 	  select this option.
 
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index e84325e56d9804bd739730c348f9d6b015eaa4b6..24a71091759cc917c216c1457ae35065361411a3 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1089,7 +1089,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
 
 	dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
 	if (IS_ERR(dsi->grf_regmap)) {
-		DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+		DRM_DEV_ERROR(dev, "Unable to get rockchip,grf\n");
 		return PTR_ERR(dsi->grf_regmap);
 	}
 
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 23de359a1dec60116f29a22f045f663b2b7ec707..830bdd5e9b7ce16cbb033a33969800fc47292a78 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -202,7 +202,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
 	} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
 		return -EPROBE_DEFER;
 	} else if (IS_ERR(hdmi->vpll_clk)) {
-		DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
+		DRM_DEV_ERROR(hdmi->dev, "failed to get vpll clock\n");
 		return PTR_ERR(hdmi->vpll_clk);
 	}
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d1e05482641b5f9ca04a90d8507cb5d8b5f9dc12..8d15cabdcb02a8f1f8dc55a682dec0268b895ba5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1643,7 +1643,6 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
 	.disable_vblank = vop_crtc_disable_vblank,
 	.set_crc_source = vop_crtc_set_crc_source,
 	.verify_crc_source = vop_crtc_verify_crc_source,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
 static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b498d474ef9e4a91a91593c9c16e25f8aab30c0e..92637b70c9bf6fda2b17357141eb2ffae0b70f5e 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
 #define to_drm_sched_job(sched_job)		\
 		container_of((sched_job), struct drm_sched_job, queue_node)
 
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-
 /**
  * drm_sched_rq_init - initialize a given run queue struct
  *
@@ -163,6 +161,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 	return NULL;
 }
 
+/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job)
+{
+	struct drm_sched_fence *s_fence = s_job->s_fence;
+	struct drm_gpu_scheduler *sched = s_fence->sched;
+
+	atomic_dec(&sched->hw_rq_count);
+	atomic_dec(&sched->score);
+
+	trace_drm_sched_process_job(s_fence);
+
+	dma_fence_get(&s_fence->finished);
+	drm_sched_fence_finished(s_fence);
+	dma_fence_put(&s_fence->finished);
+	wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+	drm_sched_job_done(s_job);
+}
+
 /**
  * drm_sched_dependency_optimized
  *
@@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 {
 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-	    !list_empty(&sched->ring_mirror_list))
+	    !list_empty(&sched->pending_list))
 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
 }
 
@@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 {
 	spin_lock(&sched->job_list_lock);
 
-	if (list_empty(&sched->ring_mirror_list))
+	if (list_empty(&sched->pending_list))
 		cancel_delayed_work(&sched->work_tdr);
 	else
 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
@@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
 	struct drm_gpu_scheduler *sched = s_job->sched;
 
 	spin_lock(&sched->job_list_lock);
-	list_add_tail(&s_job->node, &sched->ring_mirror_list);
+	list_add_tail(&s_job->list, &sched->pending_list);
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
@@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
 
 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
 	spin_lock(&sched->job_list_lock);
-	job = list_first_entry_or_null(&sched->ring_mirror_list,
-				       struct drm_sched_job, node);
+	job = list_first_entry_or_null(&sched->pending_list,
+				       struct drm_sched_job, list);
 
 	if (job) {
 		/*
@@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
 		 * is parked at which point it's safe.
 		 */
-		list_del_init(&job->node);
+		list_del_init(&job->list);
 		spin_unlock(&sched->job_list_lock);
 
 		job->sched->ops->timedout_job(job);
@@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
  * Stop the scheduler and also removes and frees all completed jobs.
  * Note: bad job will not be freed as it might be used later and so it's
  * callers responsibility to release it manually if it's not part of the
- * mirror list any more.
+ * pending list any more.
  *
  */
 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
@@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 		 * Add at the head of the queue to reflect it was the earliest
 		 * job extracted.
 		 */
-		list_add(&bad->node, &sched->ring_mirror_list);
+		list_add(&bad->list, &sched->pending_list);
 
 	/*
 	 * Iterate the job list from later to  earlier one and either deactive
-	 * their HW callbacks or remove them from mirror list if they already
+	 * their HW callbacks or remove them from pending list if they already
 	 * signaled.
 	 * This iteration is thread safe as sched thread is stopped.
 	 */
-	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
+					 list) {
 		if (s_job->s_fence->parent &&
 		    dma_fence_remove_callback(s_job->s_fence->parent,
 					      &s_job->cb)) {
 			atomic_dec(&sched->hw_rq_count);
 		} else {
 			/*
-			 * remove job from ring_mirror_list.
+			 * remove job from pending_list.
 			 * Locking here is for concurrent resume timeout
 			 */
 			spin_lock(&sched->job_list_lock);
-			list_del_init(&s_job->node);
+			list_del_init(&s_job->list);
 			spin_unlock(&sched->job_list_lock);
 
 			/*
@@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 	 * so no new jobs are being inserted or removed. Also concurrent
 	 * GPU recovers can't run in parallel.
 	 */
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 		struct dma_fence *fence = s_job->s_fence->parent;
 
 		atomic_inc(&sched->hw_rq_count);
@@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 
 		if (fence) {
 			r = dma_fence_add_callback(fence, &s_job->cb,
-						   drm_sched_process_job);
+						   drm_sched_job_done_cb);
 			if (r == -ENOENT)
-				drm_sched_process_job(fence, &s_job->cb);
+				drm_sched_job_done(s_job);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 		} else
-			drm_sched_process_job(NULL, &s_job->cb);
+			drm_sched_job_done(s_job);
 	}
 
 	if (full_recovery) {
@@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 EXPORT_SYMBOL(drm_sched_start);
 
 /**
- * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
  *
  * @sched: scheduler instance
  *
@@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 	bool found_guilty = false;
 	struct dma_fence *fence;
 
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 
 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		return -ENOMEM;
 	job->id = atomic64_inc_return(&sched->job_id_count);
 
-	INIT_LIST_HEAD(&job->node);
+	INIT_LIST_HEAD(&job->list);
 
 	return 0;
 }
@@ -635,37 +668,12 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 	return entity;
 }
 
-/**
- * drm_sched_process_job - process a job
- *
- * @f: fence
- * @cb: fence callbacks
- *
- * Called after job has finished execution.
- */
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
-{
-	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
-	struct drm_sched_fence *s_fence = s_job->s_fence;
-	struct drm_gpu_scheduler *sched = s_fence->sched;
-
-	atomic_dec(&sched->hw_rq_count);
-	atomic_dec(&sched->score);
-
-	trace_drm_sched_process_job(s_fence);
-
-	dma_fence_get(&s_fence->finished);
-	drm_sched_fence_finished(s_fence);
-	dma_fence_put(&s_fence->finished);
-	wake_up_interruptible(&sched->wake_up_worker);
-}
-
 /**
  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
  *
  * @sched: scheduler instance
  *
- * Returns the next finished job from the mirror list (if there is one)
+ * Returns the next finished job from the pending list (if there is one)
  * ready for it to be destroyed.
  */
 static struct drm_sched_job *
@@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 
 	/*
 	 * Don't destroy jobs while the timeout worker is running  OR thread
-	 * is being parked and hence assumed to not touch ring_mirror_list
+	 * is being parked and hence assumed to not touch pending_list
 	 */
 	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
 	    !cancel_delayed_work(&sched->work_tdr)) ||
@@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 
 	spin_lock(&sched->job_list_lock);
 
-	job = list_first_entry_or_null(&sched->ring_mirror_list,
-				       struct drm_sched_job, node);
+	job = list_first_entry_or_null(&sched->pending_list,
+				       struct drm_sched_job, list);
 
 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
-		/* remove job from ring_mirror_list */
-		list_del_init(&job->node);
+		/* remove job from pending_list */
+		list_del_init(&job->list);
 	} else {
 		job = NULL;
 		/* queue timeout for next job */
@@ -809,9 +817,9 @@ static int drm_sched_main(void *param)
 		if (!IS_ERR_OR_NULL(fence)) {
 			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &sched_job->cb,
-						   drm_sched_process_job);
+						   drm_sched_job_done_cb);
 			if (r == -ENOENT)
-				drm_sched_process_job(fence, &sched_job->cb);
+				drm_sched_job_done(sched_job);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
@@ -820,7 +828,7 @@ static int drm_sched_main(void *param)
 			if (IS_ERR(fence))
 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
 
-			drm_sched_process_job(NULL, &sched_job->cb);
+			drm_sched_job_done(sched_job);
 		}
 
 		wake_up(&sched->job_scheduled);
@@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 
 	init_waitqueue_head(&sched->wake_up_worker);
 	init_waitqueue_head(&sched->job_scheduled);
-	INIT_LIST_HEAD(&sched->ring_mirror_list);
+	INIT_LIST_HEAD(&sched->pending_list);
 	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
@@ -891,6 +899,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
 	if (sched->thread)
 		kthread_stop(sched->thread);
 
+	/* Confirm no work left behind accessing device structures */
+	cancel_delayed_work_sync(&sched->work_tdr);
+
 	sched->ready = false;
 }
 EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index a9805743102375bb55696c33dc2fa8f317b618f6..7476301d7142a4f6abda782615bd98a62ece085c 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -330,13 +330,6 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
 	.atomic_disable = sti_cursor_atomic_disable,
 };
 
-static void sti_cursor_destroy(struct drm_plane *drm_plane)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	drm_plane_cleanup(drm_plane);
-}
-
 static int sti_cursor_late_register(struct drm_plane *drm_plane)
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -350,7 +343,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
 static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = sti_cursor_destroy,
+	.destroy = drm_plane_cleanup,
 	.reset = sti_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 2d5a2b5b78b8e5dd5f202dbb009b09c1615e8ef4..2f4a34f14d33e27be4b82f8befa09d3fa070875a 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -884,13 +884,6 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
 	.atomic_disable = sti_gdp_atomic_disable,
 };
 
-static void sti_gdp_destroy(struct drm_plane *drm_plane)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	drm_plane_cleanup(drm_plane);
-}
-
 static int sti_gdp_late_register(struct drm_plane *drm_plane)
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -902,7 +895,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane)
 static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = sti_gdp_destroy,
+	.destroy = drm_plane_cleanup,
 	.reset = sti_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 5a4e12194a77dbef4c2958d2e55fd261546ed2c1..62f824cd5f2139819ec37bd2e37aa18ac6dc6b77 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1262,13 +1262,6 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
 	.atomic_disable = sti_hqvdp_atomic_disable,
 };
 
-static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	drm_plane_cleanup(drm_plane);
-}
-
 static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -1282,7 +1275,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
 static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = sti_hqvdp_destroy,
+	.destroy = drm_plane_cleanup,
 	.reset = sti_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3980677435cbf0a2df5c2af37959ace06a615caa..7812094f93d6b80f198fb59403b90383bc0ab49d 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -713,7 +713,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
 	.enable_vblank = ltdc_crtc_enable_vblank,
 	.disable_vblank = ltdc_crtc_disable_vblank,
 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
 /*
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 1e643bc7e786a81d8bc7a937ac9bc050f0dc6938..9f06dec0fc61d826b0cce32196d06bf8bd701432 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 	if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
 		val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
 
-	/*
-	 * On A20 and similar SoCs, the only way to achieve Positive Edge
-	 * (Rising Edge), is setting dclk clock phase to 2/3(240°).
-	 * By default TCON works in Negative Edge(Falling Edge),
-	 * this is why phase is set to 0 in that case.
-	 * Unfortunately there's no way to logically invert dclk through
-	 * IO_POL register.
-	 * The only acceptable way to work, triple checked with scope,
-	 * is using clock phase set to 0° for Negative Edge and set to 240°
-	 * for Positive Edge.
-	 * On A33 and similar SoCs there would be a 90° phase option,
-	 * but it divides also dclk by 2.
-	 * Following code is a way to avoid quirks all around TCON
-	 * and DOTCLOCK drivers.
-	 */
-	if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
-		clk_set_phase(tcon->dclk, 240);
-
 	if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
-		clk_set_phase(tcon->dclk, 0);
+		val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
 
 	regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
 			   SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
 			   SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+			   SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
 			   SUN4I_TCON0_IO_POL_DE_NEGATIVE,
 			   val);
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index ee555318e3c2f3513f4d25724abcd4240f25db55..e624f6977eb84b4a04a8a5bb8d33e5de241626ad 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -113,6 +113,7 @@
 #define SUN4I_TCON0_IO_POL_REG			0x88
 #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase)		((phase & 3) << 28)
 #define SUN4I_TCON0_IO_POL_DE_NEGATIVE			BIT(27)
+#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE		BIT(26)
 #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE		BIT(25)
 #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE		BIT(24)
 
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 781955dd4995d4add13845c4c40a2158ab151121..9bd62de0c288c8a3e692da3085f4d48f71a8277c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -46,33 +46,6 @@ static const u32 yuv2rgb[2][2][12] = {
 	},
 };
 
-static const u32 yvu2rgb[2][2][12] = {
-	[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
-		[DRM_COLOR_YCBCR_BT601] = {
-			0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451,
-			0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D,
-			0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9,
-		},
-		[DRM_COLOR_YCBCR_BT709] = {
-			0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99,
-			0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383,
-			0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF,
-		}
-	},
-	[DRM_COLOR_YCBCR_FULL_RANGE] = {
-		[DRM_COLOR_YCBCR_BT601] = {
-			0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E,
-			0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5,
-			0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD,
-		},
-		[DRM_COLOR_YCBCR_BT709] = {
-			0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4,
-			0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96,
-			0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF,
-		}
-	},
-};
-
 /*
  * DE3 has a bit different CSC units. Factors are in two's complement format.
  * First three factors in a row are multiplication factors which have 17 bits
@@ -96,7 +69,7 @@ static const u32 yvu2rgb[2][2][12] = {
  * c20 c21 c22 [d2 const2]
  */
 
-static const u32 yuv2rgb_de3[2][2][12] = {
+static const u32 yuv2rgb_de3[2][3][12] = {
 	[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
 		[DRM_COLOR_YCBCR_BT601] = {
 			0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
@@ -107,6 +80,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
 			0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
 			0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
 			0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
+		},
+		[DRM_COLOR_YCBCR_BT2020] = {
+			0x0002542A, 0x00000000, 0x00035B7B, 0xFFC00000,
+			0x0002542A, 0xFFFFA017, 0xFFFEB2FC, 0xFE000000,
+			0x0002542A, 0x00044896, 0x00000000, 0xFE000000,
 		}
 	},
 	[DRM_COLOR_YCBCR_FULL_RANGE] = {
@@ -119,33 +97,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
 			0x00020000, 0x00000000, 0x0003264C, 0x00000000,
 			0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
 			0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
-		}
-	},
-};
-
-static const u32 yvu2rgb_de3[2][2][12] = {
-	[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
-		[DRM_COLOR_YCBCR_BT601] = {
-			0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000,
-			0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000,
-			0x0002542A, 0x00000000, 0x000408D2, 0xFE000000,
 		},
-		[DRM_COLOR_YCBCR_BT709] = {
-			0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000,
-			0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000,
-			0x0002542A, 0x00000000, 0x0004398C, 0xFE000000,
-		}
-	},
-	[DRM_COLOR_YCBCR_FULL_RANGE] = {
-		[DRM_COLOR_YCBCR_BT601] = {
-			0x00020000, 0x0002CDD2, 0x00000000, 0x00000000,
-			0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000,
-			0x00020000, 0x00000000, 0x00038B43, 0xFE000000,
-		},
-		[DRM_COLOR_YCBCR_BT709] = {
-			0x00020000, 0x0003264C, 0x00000000, 0x00000000,
-			0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000,
-			0x00020000, 0x00000000, 0x0003B611, 0xFE000000,
+		[DRM_COLOR_YCBCR_BT2020] = {
+			0x00020000, 0x00000000, 0x0002F2FE, 0x00000000,
+			0x00020000, 0xFFFFABC0, 0xFFFEDB78, 0xFE000000,
+			0x00020000, 0x0003C346, 0x00000000, 0xFE000000,
 		}
 	},
 };
@@ -157,21 +113,30 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
 {
 	const u32 *table;
 	u32 base_reg;
+	int i;
+
+	table = yuv2rgb[range][encoding];
 
 	switch (mode) {
 	case SUN8I_CSC_MODE_YUV2RGB:
-		table = yuv2rgb[range][encoding];
+		base_reg = SUN8I_CSC_COEFF(base, 0);
+		regmap_bulk_write(map, base_reg, table, 12);
 		break;
 	case SUN8I_CSC_MODE_YVU2RGB:
-		table = yvu2rgb[range][encoding];
+		for (i = 0; i < 12; i++) {
+			if ((i & 3) == 1)
+				base_reg = SUN8I_CSC_COEFF(base, i + 1);
+			else if ((i & 3) == 2)
+				base_reg = SUN8I_CSC_COEFF(base, i - 1);
+			else
+				base_reg = SUN8I_CSC_COEFF(base, i);
+			regmap_write(map, base_reg, table[i]);
+		}
 		break;
 	default:
 		DRM_WARN("Wrong CSC mode specified.\n");
 		return;
 	}
-
-	base_reg = SUN8I_CSC_COEFF(base, 0);
-	regmap_bulk_write(map, base_reg, table, 12);
 }
 
 static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
@@ -180,22 +145,36 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
 					    enum drm_color_range range)
 {
 	const u32 *table;
-	u32 base_reg;
+	u32 addr;
+	int i;
+
+	table = yuv2rgb_de3[range][encoding];
 
 	switch (mode) {
 	case SUN8I_CSC_MODE_YUV2RGB:
-		table = yuv2rgb_de3[range][encoding];
+		addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0);
+		regmap_bulk_write(map, addr, table, 12);
 		break;
 	case SUN8I_CSC_MODE_YVU2RGB:
-		table = yvu2rgb_de3[range][encoding];
+		for (i = 0; i < 12; i++) {
+			if ((i & 3) == 1)
+				addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+								    layer,
+								    i + 1);
+			else if ((i & 3) == 2)
+				addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+								    layer,
+								    i - 1);
+			else
+				addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+								    layer, i);
+			regmap_write(map, addr, table[i]);
+		}
 		break;
 	default:
 		DRM_WARN("Wrong CSC mode specified.\n");
 		return;
 	}
-
-	base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
-	regmap_bulk_write(map, base_reg, table, 12);
 }
 
 static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 7576b523fdbb1409c720ca5874425a71be9cc411..145833a9d82d46c0d65e08797ca8c7e0e3618f26 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -50,10 +50,8 @@
 #define SUN8I_MIXER_BLEND_CK_MIN(base, x)	((base) + 0xe0 + 0x04 * (x))
 #define SUN8I_MIXER_BLEND_OUTCTL(base)		((base) + 0xfc)
 #define SUN50I_MIXER_BLEND_CSC_CTL(base)	((base) + 0x100)
-#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
-	((base) + 0x110 + (layer) * 0x30 +  (x) * 0x10 + 4 * (y))
-#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
-	((base) + 0x110 + (layer) * 0x30 +  (i) * 0x10 + 0x0c)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x) \
+	((base) + 0x110 + (layer) * 0x30 +  (x) * 4)
 
 #define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK	GENMASK(12, 8)
 #define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe)	BIT(8 + pipe)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 76393fc976fee3131b1ea7d2efcb6ccd809041bf..8cc294a9969d3088118cd61d8465a80c20adef38 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -543,6 +543,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
 
 	supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
 			      BIT(DRM_COLOR_YCBCR_BT709);
+	if (mixer->cfg->is_de3)
+		supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
 
 	supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
 			   BIT(DRM_COLOR_YCBCR_FULL_RANGE);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ab699bf0ac5c0f1b5d967b8577a5dcdb693dc3e2..58c185c299f411dfcd514a4b8726a700decfc761 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -56,7 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
 	.llseek = noop_llseek,
 };
 
-static struct drm_driver driver = {
+static const struct drm_driver driver = {
 	.driver_features = DRIVER_LEGACY,
 	.fops = &tdfx_driver_fops,
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 85dd7131553afa065592032aa0ba9eff93785482..0ae3a025efe9d6f9566ef69a05021c627c16583a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2186,7 +2186,7 @@ static int tegra_dc_runtime_resume(struct host1x_client *client)
 	struct device *dev = client->dev;
 	int err;
 
-	err = pm_runtime_get_sync(dev);
+	err = pm_runtime_resume_and_get(dev);
 	if (err < 0) {
 		dev_err(dev, "failed to get runtime PM: %d\n", err);
 		return err;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e45c8414e2a33c073080c6f811dac2af1a48ff89..e9ce7d6992d2189ea736a18b63e1744c1fbf9ddb 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1301,8 +1301,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
 	{ .compatible = "nvidia,tegra30-hdmi", },
 	{ .compatible = "nvidia,tegra30-gr2d", },
 	{ .compatible = "nvidia,tegra30-gr3d", },
+	{ .compatible = "nvidia,tegra114-dc", },
 	{ .compatible = "nvidia,tegra114-dsi", },
 	{ .compatible = "nvidia,tegra114-hdmi", },
+	{ .compatible = "nvidia,tegra114-gr2d", },
 	{ .compatible = "nvidia,tegra114-gr3d", },
 	{ .compatible = "nvidia,tegra124-dc", },
 	{ .compatible = "nvidia,tegra124-sor", },
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 5691ef1b0e586e42de911ff5fb6176afdb3ff0bd..f46d377f0c3046309c2bf1d12415f593f6aed467 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1111,7 +1111,7 @@ static int tegra_dsi_runtime_resume(struct host1x_client *client)
 	struct device *dev = client->dev;
 	int err;
 
-	err = pm_runtime_get_sync(dev);
+	err = pm_runtime_resume_and_get(dev);
 	if (err < 0) {
 		dev_err(dev, "failed to get runtime PM: %d\n", err);
 		return err;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 56edef06c48e7f6bc7dd864ce5c01cfd207339c7..223ab2ceb7e626a8fd69e88ddbb68784dd86d2ed 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -72,7 +72,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
 	struct falcon_fw_os_header_v1 *os;
 
 	/* endian problems would show up right here */
-	if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
+	if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
 		dev_err(falcon->dev, "incorrect firmware magic\n");
 		return -EINVAL;
 	}
@@ -178,9 +178,10 @@ int falcon_boot(struct falcon *falcon)
 				  falcon->firmware.data.offset + offset,
 				  offset, FALCON_MEMORY_DATA);
 
-	/* copy the first code segment into Falcon internal memory */
-	falcon_copy_chunk(falcon, falcon->firmware.code.offset,
-			  0, FALCON_MEMORY_IMEM);
+	/* copy the code segment into Falcon internal memory */
+	for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
+		falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
+				  offset, FALCON_MEMORY_IMEM);
 
 	/* setup falcon interrupts */
 	falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1a0d3ba6e525ae4139e1aedda54ed22a3159a897..adbe2ddcda197c7c0e26cb0b7ac94b3ef49045b2 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -161,9 +161,14 @@ static const struct gr2d_soc tegra30_gr2d_soc = {
 	.version = 0x30,
 };
 
+static const struct gr2d_soc tegra114_gr2d_soc = {
+	.version = 0x35,
+};
+
 static const struct of_device_id gr2d_match[] = {
-	{ .compatible = "nvidia,tegra30-gr2d", .data = &tegra20_gr2d_soc },
-	{ .compatible = "nvidia,tegra20-gr2d", .data = &tegra30_gr2d_soc },
+	{ .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
+	{ .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
+	{ .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, gr2d_match);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d09a24931c87cbb8499490fdfba287401d0fbf04..e5d2a40260288742fd956931fd48416f46bdd1b5 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1510,7 +1510,7 @@ static int tegra_hdmi_runtime_resume(struct host1x_client *client)
 	struct device *dev = client->dev;
 	int err;
 
-	err = pm_runtime_get_sync(dev);
+	err = pm_runtime_resume_and_get(dev);
 	if (err < 0) {
 		dev_err(dev, "failed to get runtime PM: %d\n", err);
 		return err;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 22a03f7ffdc12f91cee667089dc79b9f9603bf13..5ce771cba1335f4d59466e88294e3bb993963e44 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -789,7 +789,7 @@ static int tegra_display_hub_runtime_resume(struct host1x_client *client)
 	unsigned int i;
 	int err;
 
-	err = pm_runtime_get_sync(dev);
+	err = pm_runtime_resume_and_get(dev);
 	if (err < 0) {
 		dev_err(dev, "failed to get runtime PM: %d\n", err);
 		return err;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index cc2aa2308a515820e9c1590cd598523032f91e6e..f02a035dda4532083540cef021d0c0e90d9b96d7 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3218,7 +3218,7 @@ static int tegra_sor_runtime_resume(struct host1x_client *client)
 	struct device *dev = client->dev;
 	int err;
 
-	err = pm_runtime_get_sync(dev);
+	err = pm_runtime_resume_and_get(dev);
 	if (err < 0) {
 		dev_err(dev, "failed to get runtime PM: %d\n", err);
 		return err;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index ade56b860cf9de7155d390b1ef04dc5293590262..77e128832920eecb9b88cfc6e70545ed1dc74ca8 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -117,7 +117,19 @@ static int vic_boot(struct vic *vic)
 		if (spec->num_ids > 0) {
 			value = spec->ids[0] & 0xffff;
 
+			/*
+			 * STREAMID0 is used for input/output buffers.
+			 * Initialize it to SID_VIC in case context isolation
+			 * is not enabled, and SID_VIC is used for both firmware
+			 * and data buffers.
+			 *
+			 * If context isolation is enabled, it will be
+			 * overridden by the SETSTREAMID opcode as part of
+			 * each job.
+			 */
 			vic_writel(vic, value, VIC_THI_STREAMID0);
+
+			/* STREAMID1 is used for firmware loading. */
 			vic_writel(vic, value, VIC_THI_STREAMID1);
 		}
 	}
@@ -135,16 +147,21 @@ static int vic_boot(struct vic *vic)
 
 	hdr = vic->falcon.firmware.virt;
 	fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
-	hdr = vic->falcon.firmware.virt +
-		*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
-	fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
 
 	falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
-	falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
-			      fce_ucode_size);
-	falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
-			      (vic->falcon.firmware.iova + fce_bin_data_offset)
-				>> 8);
+
+	/* Old VIC firmware needs kernel help with setting up FCE microcode. */
+	if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+		hdr = vic->falcon.firmware.virt +
+			*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+		fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+		falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+				      fce_ucode_size);
+		falcon_execute_method(
+			&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+			(vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
+	}
 
 	err = falcon_wait_idle(&vic->falcon);
 	if (err < 0) {
@@ -314,7 +331,7 @@ static int vic_open_channel(struct tegra_drm_client *client,
 	struct vic *vic = to_vic(client);
 	int err;
 
-	err = pm_runtime_get_sync(vic->dev);
+	err = pm_runtime_resume_and_get(vic->dev);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 561c49d8657a44ba155c4531ed1804e3dbd2e0f1..a043e602199e5c4f7d4e7aecb980326899161e9d 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -602,7 +602,6 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
 
 	drm_mode_config_reset(dev);
 
-	dev->pdev = pdev;
 	pci_set_drvdata(pdev, dev);
 	ret = drm_dev_register(dev, 0);
 	if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 03c86628e4ac4aaade39d85d5f10dd54e0edcde5..8f9fa418889723e84e01d3656107d5ca2664b9e7 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -32,7 +32,6 @@
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <linux/agp_backend.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9a03c7834b1eded3c7f9b26e6d3d69f3a81c1367..b65f4b12f9866995edf2fc1a2424ff33e850a403 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,7 +31,6 @@
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <linux/jiffies.h>
@@ -43,6 +42,8 @@
 #include <linux/atomic.h>
 #include <linux/dma-resv.h>
 
+#include "ttm_module.h"
+
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
 /*
@@ -71,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
 	struct ttm_resource_manager *man;
 	int i, mem_type;
 
-	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
-		   bo, bo->mem.num_pages, bo->mem.size >> 10,
-		   bo->mem.size >> 20);
+	drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
+		   bo, bo->mem.num_pages, bo->base.size >> 10,
+		   bo->base.size >> 20);
 	for (i = 0; i < placement->num_placement; i++) {
 		mem_type = placement->placement[i].mem_type;
 		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
@@ -109,40 +110,14 @@ static struct kobj_type ttm_bo_glob_kobj_type  = {
 	.default_attrs = ttm_bo_global_attrs
 };
 
-static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
-				  struct ttm_resource *mem)
-{
-	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_resource_manager *man;
-
-	if (!list_empty(&bo->lru) || bo->pin_count)
-		return;
-
-	man = ttm_manager_type(bdev, mem->mem_type);
-	list_add_tail(&bo->lru, &man->lru[bo->priority]);
-
-	if (man->use_tt && bo->ttm &&
-	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
-				     TTM_PAGE_FLAG_SWAPPED))) {
-		list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
-	}
-}
-
 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	bool notify = false;
 
-	if (!list_empty(&bo->swap)) {
-		list_del_init(&bo->swap);
-		notify = true;
-	}
-	if (!list_empty(&bo->lru)) {
-		list_del_init(&bo->lru);
-		notify = true;
-	}
+	list_del_init(&bo->swap);
+	list_del_init(&bo->lru);
 
-	if (notify && bdev->driver->del_from_lru_notify)
+	if (bdev->driver->del_from_lru_notify)
 		bdev->driver->del_from_lru_notify(bo);
 }
 
@@ -155,12 +130,32 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
 }
 
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+			     struct ttm_resource *mem,
 			     struct ttm_lru_bulk_move *bulk)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_resource_manager *man;
+
 	dma_resv_assert_held(bo->base.resv);
 
-	ttm_bo_del_from_lru(bo);
-	ttm_bo_add_mem_to_lru(bo, &bo->mem);
+	if (bo->pin_count) {
+		ttm_bo_del_from_lru(bo);
+		return;
+	}
+
+	man = ttm_manager_type(bdev, mem->mem_type);
+	list_move_tail(&bo->lru, &man->lru[bo->priority]);
+	if (man->use_tt && bo->ttm &&
+	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+				     TTM_PAGE_FLAG_SWAPPED))) {
+		struct list_head *swap;
+
+		swap = &ttm_bo_glob.swap_lru[bo->priority];
+		list_move_tail(&bo->swap, swap);
+	}
+
+	if (bdev->driver->del_from_lru_notify)
+		bdev->driver->del_from_lru_notify(bo);
 
 	if (bulk && !bo->pin_count) {
 		switch (bo->mem.mem_type) {
@@ -267,7 +262,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 		goto out_err;
 	}
 
-	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
+	ctx->bytes_moved += bo->base.size;
 	return 0;
 
 out_err:
@@ -514,10 +509,9 @@ static void ttm_bo_release(struct kref *kref)
 		 * shrinkers, now that they are queued for
 		 * destruction.
 		 */
-		if (bo->pin_count) {
+		if (WARN_ON(bo->pin_count)) {
 			bo->pin_count = 0;
-			ttm_bo_del_from_lru(bo);
-			ttm_bo_add_mem_to_lru(bo, &bo->mem);
+			ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
 		}
 
 		kref_init(&bo->kref);
@@ -859,8 +853,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
 	mem->placement = place->flags;
 
 	spin_lock(&ttm_bo_glob.lru_lock);
-	ttm_bo_del_from_lru(bo);
-	ttm_bo_add_mem_to_lru(bo, mem);
+	ttm_bo_move_to_lru_tail(bo, mem, NULL);
 	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	return 0;
@@ -937,9 +930,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	}
 
 error:
-	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
+	if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
 		ttm_bo_move_to_lru_tail_unlocked(bo);
-	}
 
 	return ret;
 }
@@ -984,8 +976,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
 	memset(&hop, 0, sizeof(hop));
 
-	mem.num_pages = bo->num_pages;
-	mem.size = mem.num_pages << PAGE_SHIFT;
+	mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
 	mem.bus.offset = 0;
 	mem.bus.addr = NULL;
@@ -1101,7 +1092,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
 
 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct ttm_buffer_object *bo,
-			 unsigned long size,
+			 size_t size,
 			 enum ttm_bo_type type,
 			 struct ttm_placement *placement,
 			 uint32_t page_alignment,
@@ -1112,9 +1103,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 void (*destroy) (struct ttm_buffer_object *))
 {
 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-	int ret = 0;
-	unsigned long num_pages;
 	bool locked;
+	int ret = 0;
 
 	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
 	if (ret) {
@@ -1126,16 +1116,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		return -ENOMEM;
 	}
 
-	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (num_pages == 0) {
-		pr_err("Illegal buffer object size\n");
-		if (destroy)
-			(*destroy)(bo);
-		else
-			kfree(bo);
-		ttm_mem_global_free(mem_glob, acc_size);
-		return -EINVAL;
-	}
 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
 	kref_init(&bo->kref);
@@ -1144,10 +1124,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	INIT_LIST_HEAD(&bo->swap);
 	bo->bdev = bdev;
 	bo->type = type;
-	bo->num_pages = num_pages;
-	bo->mem.size = num_pages << PAGE_SHIFT;
 	bo->mem.mem_type = TTM_PL_SYSTEM;
-	bo->mem.num_pages = bo->num_pages;
+	bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	bo->mem.mm_node = NULL;
 	bo->mem.page_alignment = page_alignment;
 	bo->mem.bus.offset = 0;
@@ -1165,9 +1143,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	}
 	if (!ttm_bo_uses_embedded_gem_object(bo)) {
 		/*
-		 * bo.gem is not initialized, so we have to setup the
+		 * bo.base is not initialized, so we have to setup the
 		 * struct elements we want use regardless.
 		 */
+		bo->base.size = size;
 		dma_resv_init(&bo->base._resv);
 		drm_vma_node_reset(&bo->base.vma_node);
 	}
@@ -1209,7 +1188,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
 
 int ttm_bo_init(struct ttm_bo_device *bdev,
 		struct ttm_buffer_object *bo,
-		unsigned long size,
+		size_t size,
 		enum ttm_bo_type type,
 		struct ttm_placement *placement,
 		uint32_t page_alignment,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7ccb2295cac1a31226f3b85c6075808d27e21aa3..398d5013fc397f24d1938ab7a4e6ecba01a9e5a3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -310,7 +310,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	kref_init(&fbo->base.kref);
 	fbo->base.destroy = &ttm_transfered_destroy;
 	fbo->base.acc_size = 0;
-	fbo->base.pin_count = 1;
+	fbo->base.pin_count = 0;
 	if (bo->type != ttm_bo_type_sg)
 		fbo->base.base.resv = &fbo->base.base._resv;
 
@@ -319,6 +319,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	ret = dma_resv_trylock(&fbo->base.base._resv);
 	WARN_ON(!ret);
 
+	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
+
 	*new_obj = &fbo->base;
 	return 0;
 }
@@ -429,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 
 	map->virtual = NULL;
 	map->bo = bo;
-	if (num_pages > bo->num_pages)
+	if (num_pages > bo->mem.num_pages)
 		return -EINVAL;
-	if (start_page > bo->num_pages)
+	if ((start_page + num_pages) > bo->mem.num_pages)
 		return -EINVAL;
 
 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
@@ -483,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 
 	if (mem->bus.is_iomem) {
 		void __iomem *vaddr_iomem;
-		size_t size = bo->num_pages << PAGE_SHIFT;
 
 		if (mem->bus.addr)
 			vaddr_iomem = (void __iomem *)mem->bus.addr;
 		else if (mem->bus.caching == ttm_write_combined)
-			vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+			vaddr_iomem = ioremap_wc(mem->bus.offset,
+						 bo->base.size);
 		else
-			vaddr_iomem = ioremap(mem->bus.offset, size);
+			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
 
 		if (!vaddr_iomem)
 			return -ENOMEM;
@@ -515,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 		 * or to make the buffer object look contiguous.
 		 */
 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
-		vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
 		if (!vaddr)
 			return -ENOMEM;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2944fa0af493550e9eac609a7b17ba82a4ff6840..6dc96cf667449f1541a467f2e669f9042e94d378 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,7 +31,6 @@
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drm_vma_manager.h>
@@ -199,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
 
 	/* Fault should not cross bo boundary. */
 	page_offset &= ~(fault_page_size - 1);
-	if (page_offset + fault_page_size > bo->num_pages)
+	if (page_offset + fault_page_size > bo->mem.num_pages)
 		goto out_fallback;
 
 	if (bo->mem.bus.is_iomem)
@@ -307,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	page_last = vma_pages(vma) + vma->vm_pgoff -
 		drm_vma_node_start(&bo->base.vma_node);
 
-	if (unlikely(page_offset >= bo->num_pages))
+	if (unlikely(page_offset >= bo->mem.num_pages))
 		return VM_FAULT_SIGBUS;
 
 	prot = ttm_io_prot(bo, &bo->mem, prot);
@@ -470,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
 		 << PAGE_SHIFT);
 	int ret;
 
-	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
 		return -EIO;
 
 	ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8a8f1a6a83a613cfd34bab5bfefe636ed182a215..9fa36ed59429eb486c8cdc40affce56820461dde 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-		ttm_bo_move_to_lru_tail(bo, NULL);
+		ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
 		dma_resv_unlock(bo->base.resv);
 	}
 	spin_unlock(&ttm_bo_glob.lru_lock);
@@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 			dma_resv_add_shared_fence(bo->base.resv, fence);
 		else
 			dma_resv_add_excl_fence(bo->base.resv, fence);
-		ttm_bo_move_to_lru_tail(bo, NULL);
+		ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
 		dma_resv_unlock(bo->base.resv);
 	}
 	spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 5ed1fc8f2ace3fdc7097a718ed1a0d23dea5848d..a3bfbd9cea680d3b6bf80ec3acbf7d6bd272a35f 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -29,7 +29,6 @@
 #define pr_fmt(fmt) "[TTM] " fmt
 
 #include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
 #include <linux/spinlock.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
@@ -39,6 +38,8 @@
 #include <linux/swap.h>
 #include <drm/ttm/ttm_pool.h>
 
+#include "ttm_module.h"
+
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
 struct ttm_mem_global ttm_mem_glob;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 6ff40c041d799d2bb8c24eb42b5a6a513926caad..c0906437cb1c9bcae9698aa13d91b8f4ecb5a6d2 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -32,9 +32,10 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/sched.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/drm_sysfs.h>
 
+#include "ttm_module.h"
+
 static DECLARE_WAIT_QUEUE_HEAD(exit_q);
 static atomic_t device_released;
 
diff --git a/include/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
similarity index 100%
rename from include/drm/ttm/ttm_module.h
rename to drivers/gpu/drm/ttm/ttm_module.h
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index e0952444cea930485d9f19534da36b39b4a9476d..a39305f742da209759044b53cbe560eaea6a1273 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,7 +29,6 @@
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
 
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drm_mm.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index da9eeffe0c6d75118536614dbc2c9bdd55d2dbc3..7f75a13163f08f2e5f992610a47563cdf8de9c19 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
 			       uint32_t page_flags,
 			       enum ttm_caching caching)
 {
-	ttm->num_pages = bo->num_pages;
+	ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
 	ttm->caching = ttm_cached;
 	ttm->page_flags = page_flags;
 	ttm->dma_address = NULL;
@@ -162,19 +162,6 @@ void ttm_tt_fini(struct ttm_tt *ttm)
 }
 EXPORT_SYMBOL(ttm_tt_fini);
 
-int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
-		    uint32_t page_flags, enum ttm_caching caching)
-{
-	ttm_tt_init_fields(ttm, bo, page_flags, caching);
-
-	if (ttm_dma_tt_alloc_page_directory(ttm)) {
-		pr_err("Failed allocating page table\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-EXPORT_SYMBOL(ttm_dma_tt_init);
-
 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
 		   uint32_t page_flags, enum ttm_caching caching)
 {
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 17ff24d999d18296b4c1b03bf1a700144fcbdaa5..cb0e837d3dbabb801f7fa163859c43cd25a49be0 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/version.h>
 #include <linux/dma-buf.h>
 #include <linux/of_graph.h>
 #include <linux/delay.h>
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 07140e0b90a3c774f7c0c8ce290e42198b7e4a94..7fa71c8bb8289aaee96b69c48210ddd38480fbb9 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -35,7 +35,6 @@
 #include <linux/platform_device.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 42d401fd244e38a248e8281816d032e5302d88fa..99e22beea90b1dedecdda0e78112ced6f3c285d7 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -232,8 +232,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
 		return ret;
 
 	mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
-	dev->coherent_dma_mask =
-		DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+	dma_set_mask_and_coherent(dev,
+		DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)));
 	v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
 
 	ident1 = V3D_READ(V3D_HUB_IDENT1);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index c88686489b88899fb3b8d8094bec02ff32b662f5..e714d5318f3095c2f3d23692d23da98f7247389f 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -178,10 +178,7 @@ v3d_hub_irq(int irq, void *arg)
 		};
 		const char *client = "?";
 
-		V3D_WRITE(V3D_MMU_CTL,
-			  V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED |
-						   V3D_MMU_CTL_PT_INVALID |
-						   V3D_MMU_CTL_WRITE_VIOLATION));
+		V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
 
 		if (v3d->ver >= 41) {
 			axi_id = axi_id >> 5;
@@ -217,7 +214,7 @@ v3d_irq_init(struct v3d_dev *v3d)
 		V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
 	V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 
-	irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
+	irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
 	if (irq1 == -EPROBE_DEFER)
 		return irq1;
 	if (irq1 > 0) {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index f3eac72cb46ec22f23e05f3b7c6fbbb53251a650..e534896b6cfd16f1e4dcb680260f5565b35cadf2 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -51,7 +51,6 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (IS_ERR(vbox))
 		return PTR_ERR(vbox);
 
-	vbox->ddev.pdev = pdev;
 	pci_set_drvdata(pdev, vbox);
 	mutex_init(&vbox->hw_mutex);
 
@@ -109,15 +108,16 @@ static void vbox_pci_remove(struct pci_dev *pdev)
 static int vbox_pm_suspend(struct device *dev)
 {
 	struct vbox_private *vbox = dev_get_drvdata(dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
 	int error;
 
 	error = drm_mode_config_helper_suspend(&vbox->ddev);
 	if (error)
 		return error;
 
-	pci_save_state(vbox->ddev.pdev);
-	pci_disable_device(vbox->ddev.pdev);
-	pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
 
 	return 0;
 }
@@ -125,8 +125,9 @@ static int vbox_pm_suspend(struct device *dev)
 static int vbox_pm_resume(struct device *dev)
 {
 	struct vbox_private *vbox = dev_get_drvdata(dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
 
-	if (pci_enable_device(vbox->ddev.pdev))
+	if (pci_enable_device(pdev))
 		return -EIO;
 
 	return drm_mode_config_helper_resume(&vbox->ddev);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 631657fa554f9a1a5b6104064697f7eec8b6c8fa..b3ded68603bad7f211dfeb43c50b7986b26c0110 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -170,10 +170,12 @@ static void vbox_hotplug_worker(struct work_struct *work)
 
 int vbox_irq_init(struct vbox_private *vbox)
 {
+	struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
+
 	INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
 	vbox_update_mode_hints(vbox);
 
-	return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
+	return drm_irq_install(&vbox->ddev, pdev->irq);
 }
 
 void vbox_irq_fini(struct vbox_private *vbox)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index d68d9bad76747015c446b3f5908014ca81dd3b70..f28779715ccda8332b3467a4ca27b8c1db8ac9b5 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -8,7 +8,9 @@
  *          Hans de Goede <hdegoede@redhat.com>
  */
 
+#include <linux/pci.h>
 #include <linux/vbox_err.h>
+
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_damage_helper.h>
@@ -30,6 +32,7 @@ void vbox_report_caps(struct vbox_private *vbox)
 
 static int vbox_accel_init(struct vbox_private *vbox)
 {
+	struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
 	struct vbva_buffer *vbva;
 	unsigned int i;
 
@@ -41,7 +44,7 @@ static int vbox_accel_init(struct vbox_private *vbox)
 	/* Take a command buffer for each screen from the end of usable VRAM. */
 	vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
 
-	vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
+	vbox->vbva_buffers = pci_iomap_range(pdev, 0,
 					     vbox->available_vram_size,
 					     vbox->num_crtcs *
 					     VBVA_MIN_BUFFER_SIZE);
@@ -106,6 +109,7 @@ bool vbox_check_supported(u16 id)
 
 int vbox_hw_init(struct vbox_private *vbox)
 {
+	struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
 	int ret = -ENOMEM;
 
 	vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
@@ -115,7 +119,7 @@ int vbox_hw_init(struct vbox_private *vbox)
 
 	/* Map guest-heap at end of vram */
 	vbox->guest_heap =
-	    pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
+	    pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
 			    GUEST_HEAP_SIZE);
 	if (!vbox->guest_heap)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index f5a06675da43c67abcf187f7bddc9b95f88e5f3a..0066a3c1dfc96fdd720cbcfc4923ad37775890f3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -15,8 +15,9 @@ int vbox_mm_init(struct vbox_private *vbox)
 	struct drm_vram_mm *vmm;
 	int ret;
 	struct drm_device *dev = &vbox->ddev;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-	vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
+	vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(pdev, 0),
 				       vbox->available_vram_size);
 	if (IS_ERR(vmm)) {
 		ret = PTR_ERR(vmm);
@@ -24,8 +25,8 @@ int vbox_mm_init(struct vbox_private *vbox)
 		return ret;
 	}
 
-	vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
-					 pci_resource_len(dev->pdev, 0));
+	vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+					 pci_resource_len(pdev, 0));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 469d1b4f2643fb1c04680f37dcd2a8fe4cd46a97..fddaeb0b09c11764cb46b23ffff6ec345f505cd7 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -21,7 +21,7 @@
 #include "vc4_drv.h"
 #include "uapi/drm/vc4_drm.h"
 
-static vm_fault_t vc4_fault(struct vm_fault *vmf);
+static const struct drm_gem_object_funcs vc4_gem_object_funcs;
 
 static const char * const bo_type_names[] = {
 	"kernel",
@@ -376,20 +376,6 @@ static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
 	return bo;
 }
 
-static const struct vm_operations_struct vc4_vm_ops = {
-	.fault = vc4_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
-static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
-	.free = vc4_free_object,
-	.export = vc4_prime_export,
-	.get_sg_table = drm_gem_cma_prime_get_sg_table,
-	.vmap = vc4_prime_vmap,
-	.vm_ops = &vc4_vm_ops,
-};
-
 /**
  * vc4_create_object - Implementation of driver->gem_create_object.
  * @dev: DRM device
@@ -538,7 +524,7 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
 /* Called on the last userspace/kernel unreference of the BO.  Returns
  * it to the BO cache if possible, otherwise frees it.
  */
-void vc4_free_object(struct drm_gem_object *gem_bo)
+static void vc4_free_object(struct drm_gem_object *gem_bo)
 {
 	struct drm_device *dev = gem_bo->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -673,7 +659,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
 	schedule_work(&vc4->bo_cache.time_work);
 }
 
-struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
+static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
 {
 	struct vc4_bo *bo = to_vc4_bo(obj);
 	struct dma_buf *dmabuf;
@@ -718,19 +704,9 @@ static vm_fault_t vc4_fault(struct vm_fault *vmf)
 	return VM_FAULT_SIGBUS;
 }
 
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
+static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
-	struct drm_gem_object *gem_obj;
-	unsigned long vm_pgoff;
-	struct vc4_bo *bo;
-	int ret;
-
-	ret = drm_gem_mmap(filp, vma);
-	if (ret)
-		return ret;
-
-	gem_obj = vma->vm_private_data;
-	bo = to_vc4_bo(gem_obj);
+	struct vc4_bo *bo = to_vc4_bo(obj);
 
 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
 		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
@@ -744,72 +720,23 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
 		return -EINVAL;
 	}
 
-	/*
-	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
-	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
-	 * the whole buffer.
-	 */
-	vma->vm_flags &= ~VM_PFNMAP;
-
-	/* This ->vm_pgoff dance is needed to make all parties happy:
-	 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
-	 *   mem-region, hence the need to set it to zero (the value set by
-	 *   the DRM core is a virtual offset encoding the GEM object-id)
-	 * - the mmap() core logic needs ->vm_pgoff to be restored to its
-	 *   initial value before returning from this function because it
-	 *   encodes the  offset of this GEM in the dev->anon_inode pseudo-file
-	 *   and this information will be used when we invalidate userspace
-	 *   mappings  with drm_vma_node_unmap() (called from vc4_gem_purge()).
-	 */
-	vm_pgoff = vma->vm_pgoff;
-	vma->vm_pgoff = 0;
-	ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
-			  bo->base.paddr, vma->vm_end - vma->vm_start);
-	vma->vm_pgoff = vm_pgoff;
-
-	if (ret)
-		drm_gem_vm_close(vma);
-
-	return ret;
-}
-
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
-	struct vc4_bo *bo = to_vc4_bo(obj);
-
-	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
-		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
-		return -EINVAL;
-	}
-
-	return drm_gem_cma_prime_mmap(obj, vma);
-}
-
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
-	struct vc4_bo *bo = to_vc4_bo(obj);
-
-	if (bo->validated_shader) {
-		DRM_DEBUG("mmaping of shader BOs not allowed.\n");
-		return -EINVAL;
-	}
-
-	return drm_gem_cma_prime_vmap(obj, map);
+	return drm_gem_cma_mmap(obj, vma);
 }
 
-struct drm_gem_object *
-vc4_prime_import_sg_table(struct drm_device *dev,
-			  struct dma_buf_attachment *attach,
-			  struct sg_table *sgt)
-{
-	struct drm_gem_object *obj;
-
-	obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
-	if (IS_ERR(obj))
-		return obj;
+static const struct vm_operations_struct vc4_vm_ops = {
+	.fault = vc4_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
 
-	return obj;
-}
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+	.free = vc4_free_object,
+	.export = vc4_prime_export,
+	.get_sg_table = drm_gem_cma_get_sg_table,
+	.vmap = drm_gem_cma_vmap,
+	.mmap = vc4_gem_object_mmap,
+	.vm_ops = &vc4_vm_ops,
+};
 
 static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
 {
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ea710beb8e005d7655496df362e09e6a290e8cd1..269390bc586ed12b5a18ad3350a5ee8bcc52fcdd 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -403,7 +403,9 @@ static void require_hvs_enabled(struct drm_device *dev)
 		     SCALER_DISPCTRL_ENABLE);
 }
 
-static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
+static int vc4_crtc_disable(struct drm_crtc *crtc,
+			    struct drm_atomic_state *state,
+			    unsigned int channel)
 {
 	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
 	struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
@@ -435,13 +437,13 @@ static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
 	mdelay(20);
 
 	if (vc4_encoder && vc4_encoder->post_crtc_disable)
-		vc4_encoder->post_crtc_disable(encoder);
+		vc4_encoder->post_crtc_disable(encoder, state);
 
 	vc4_crtc_pixelvalve_reset(crtc);
 	vc4_hvs_stop_channel(dev, channel);
 
 	if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
-		vc4_encoder->post_crtc_powerdown(encoder);
+		vc4_encoder->post_crtc_powerdown(encoder, state);
 
 	return 0;
 }
@@ -468,7 +470,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
 	if (channel < 0)
 		return 0;
 
-	return vc4_crtc_disable(crtc, channel);
+	return vc4_crtc_disable(crtc, NULL, channel);
 }
 
 static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -484,7 +486,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
 	/* Disable vblank irq handling before crtc is disabled. */
 	drm_crtc_vblank_off(crtc);
 
-	vc4_crtc_disable(crtc, old_vc4_state->assigned_channel);
+	vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
 
 	/*
 	 * Make sure we issue a vblank event after disabling the CRTC if
@@ -503,8 +505,6 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
 static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
 				   struct drm_atomic_state *state)
 {
-	struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
-									 crtc);
 	struct drm_device *dev = crtc->dev;
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
@@ -517,17 +517,17 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
 	 */
 	drm_crtc_vblank_on(crtc);
 
-	vc4_hvs_atomic_enable(crtc, old_state);
+	vc4_hvs_atomic_enable(crtc, state);
 
 	if (vc4_encoder->pre_crtc_configure)
-		vc4_encoder->pre_crtc_configure(encoder);
+		vc4_encoder->pre_crtc_configure(encoder, state);
 
 	vc4_crtc_config_pv(crtc);
 
 	CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
 
 	if (vc4_encoder->pre_crtc_enable)
-		vc4_encoder->pre_crtc_enable(encoder);
+		vc4_encoder->pre_crtc_enable(encoder, state);
 
 	/* When feeding the transposer block the pixelvalve is unneeded and
 	 * should not be enabled.
@@ -536,7 +536,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
 		   CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
 
 	if (vc4_encoder->post_crtc_enable)
-		vc4_encoder->post_crtc_enable(encoder);
+		vc4_encoder->post_crtc_enable(encoder, state);
 }
 
 static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
 	struct drm_connector_state *conn_state;
 	int ret, i;
 
-	ret = vc4_hvs_atomic_check(crtc, crtc_state);
+	ret = vc4_hvs_atomic_check(crtc, state);
 	if (ret)
 		return ret;
 
@@ -697,7 +697,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
 		container_of(cb, struct vc4_async_flip_state, cb);
 	struct drm_crtc *crtc = flip_state->crtc;
 	struct drm_device *dev = crtc->dev;
-	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct drm_plane *plane = crtc->primary;
 
 	vc4_plane_async_set_fb(plane, flip_state->fb);
@@ -729,8 +728,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
 	}
 
 	kfree(flip_state);
-
-	up(&vc4->async_modeset);
 }
 
 /* Implements async (non-vblank-synced) page flips.
@@ -745,7 +742,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
 			       uint32_t flags)
 {
 	struct drm_device *dev = crtc->dev;
-	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct drm_plane *plane = crtc->primary;
 	int ret = 0;
 	struct vc4_async_flip_state *flip_state;
@@ -774,15 +770,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
 	flip_state->crtc = crtc;
 	flip_state->event = event;
 
-	/* Make sure all other async modesetes have landed. */
-	ret = down_interruptible(&vc4->async_modeset);
-	if (ret) {
-		drm_framebuffer_put(fb);
-		vc4_bo_dec_usecnt(bo);
-		kfree(flip_state);
-		return ret;
-	}
-
 	/* Save the current FB before it's replaced by the new one in
 	 * drm_atomic_set_fb_for_plane(). We'll need the old FB in
 	 * vc4_async_page_flip_complete() to decrement the BO usecnt and keep
@@ -884,7 +871,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
 	.reset = vc4_crtc_reset,
 	.atomic_duplicate_state = vc4_crtc_duplicate_state,
 	.atomic_destroy_state = vc4_crtc_destroy_state,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.enable_vblank = vc4_enable_vblank,
 	.disable_vblank = vc4_disable_vblank,
 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2cd97a39c28688ff2ca203c2605a5d8ac53ce769..556ad0f02a0db56913f3dc2ade1984b6c1308ac9 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -140,17 +140,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
 	kfree(vc4file);
 }
 
-static const struct file_operations vc4_drm_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
-	.mmap = vc4_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-	.compat_ioctl = drm_compat_ioctl,
-	.llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(vc4_drm_fops);
 
 static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
@@ -190,12 +180,7 @@ static struct drm_driver vc4_drm_driver = {
 
 	.gem_create_object = vc4_create_object,
 
-	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_import_sg_table = vc4_prime_import_sg_table,
-	.gem_prime_mmap = vc4_prime_mmap,
-
-	.dumb_create = vc4_dumb_create,
+	DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
 
 	.ioctls = vc4_drm_ioctls,
 	.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 43a1af110b3e4106b3ce57542c5f777841dd013e..a7500716cf3f10499168b21cefeadcc61816c019 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -77,7 +77,6 @@ struct vc4_dev {
 	struct vc4_hvs *hvs;
 	struct vc4_v3d *v3d;
 	struct vc4_dpi *dpi;
-	struct vc4_dsi *dsi1;
 	struct vc4_vec *vec;
 	struct vc4_txp *txp;
 
@@ -215,8 +214,6 @@ struct vc4_dev {
 		struct work_struct reset_work;
 	} hangcheck;
 
-	struct semaphore async_modeset;
-
 	struct drm_modeset_lock ctm_state_lock;
 	struct drm_private_obj ctm_manager;
 	struct drm_private_obj hvs_channels;
@@ -444,12 +441,12 @@ struct vc4_encoder {
 	enum vc4_encoder_type type;
 	u32 clock_select;
 
-	void (*pre_crtc_configure)(struct drm_encoder *encoder);
-	void (*pre_crtc_enable)(struct drm_encoder *encoder);
-	void (*post_crtc_enable)(struct drm_encoder *encoder);
+	void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+	void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+	void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
 
-	void (*post_crtc_disable)(struct drm_encoder *encoder);
-	void (*post_crtc_powerdown)(struct drm_encoder *encoder);
+	void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+	void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
 };
 
 static inline struct vc4_encoder *
@@ -785,13 +782,11 @@ struct vc4_validated_shader_info {
 
 /* vc4_bo.c */
 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
-void vc4_free_object(struct drm_gem_object *gem_obj);
 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
 			     bool from_cache, enum vc4_kernel_bo_type type);
 int vc4_dumb_create(struct drm_file *file_priv,
 		    struct drm_device *dev,
 		    struct drm_mode_create_dumb *args);
-struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -806,12 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv);
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
-						 struct dma_buf_attachment *attach,
-						 struct sg_table *sgt);
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
 int vc4_bo_cache_init(struct drm_device *dev);
 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
@@ -916,11 +905,10 @@ void vc4_irq_reset(struct drm_device *dev);
 extern struct platform_driver vc4_hvs_driver;
 void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
 int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
-int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state);
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
 void vc4_hvs_dump_state(struct drm_device *dev);
 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 19aab4e7e2095c8fe8c0172c0dbbd4167fff0454..a55256ed0955841c620159af2611475e4ec780bd 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -306,11 +306,11 @@
 # define DSI0_PHY_AFEC0_RESET			BIT(11)
 # define DSI1_PHY_AFEC0_PD_BG			BIT(11)
 # define DSI0_PHY_AFEC0_PD			BIT(10)
-# define DSI1_PHY_AFEC0_PD_DLANE3		BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE1		BIT(10)
 # define DSI0_PHY_AFEC0_PD_BG			BIT(9)
 # define DSI1_PHY_AFEC0_PD_DLANE2		BIT(9)
 # define DSI0_PHY_AFEC0_PD_DLANE1		BIT(8)
-# define DSI1_PHY_AFEC0_PD_DLANE1		BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE3		BIT(8)
 # define DSI_PHY_AFEC0_PTATADJ_MASK		VC4_MASK(7, 4)
 # define DSI_PHY_AFEC0_PTATADJ_SHIFT		4
 # define DSI_PHY_AFEC0_CTATADJ_MASK		VC4_MASK(3, 0)
@@ -493,6 +493,18 @@
  */
 #define DSI1_ID			0x8c
 
+struct vc4_dsi_variant {
+	/* Whether we're on bcm2835's DSI0 or DSI1. */
+	unsigned int port;
+
+	bool broken_axi_workaround;
+
+	const char *debugfs_name;
+	const struct debugfs_reg32 *regs;
+	size_t nregs;
+
+};
+
 /* General DSI hardware state. */
 struct vc4_dsi {
 	struct platform_device *pdev;
@@ -509,8 +521,7 @@ struct vc4_dsi {
 	u32 *reg_dma_mem;
 	dma_addr_t reg_paddr;
 
-	/* Whether we're on bcm2835's DSI0 or DSI1. */
-	int port;
+	const struct vc4_dsi_variant *variant;
 
 	/* DSI channel for the panel we're connected to. */
 	u32 channel;
@@ -586,10 +597,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
 #define DSI_READ(offset) readl(dsi->regs + (offset))
 #define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
 #define DSI_PORT_READ(offset) \
-	DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+	DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
 #define DSI_PORT_WRITE(offset, val) \
-	DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
-#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+	DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
 
 /* VC4 DSI encoder KMS struct */
 struct vc4_dsi_encoder {
@@ -837,7 +848,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 
 	ret = pm_runtime_get_sync(dev);
 	if (ret) {
-		DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+		DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
 		return;
 	}
 
@@ -871,7 +882,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 	DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
 
 	/* Set AFE CTR00/CTR1 to release powerdown of analog. */
-	if (dsi->port == 0) {
+	if (dsi->variant->port == 0) {
 		u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
 			     VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
 
@@ -1017,7 +1028,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 		       DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
 		       ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
 			0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
-		       (dsi->port == 0 ?
+		       (dsi->variant->port == 0 ?
 			VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
 			VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
 
@@ -1043,13 +1054,13 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 		       DSI_DISP1_ENABLE);
 
 	/* Ungate the block. */
-	if (dsi->port == 0)
+	if (dsi->variant->port == 0)
 		DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
 	else
 		DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
 
 	/* Bring AFE out of reset. */
-	if (dsi->port == 0) {
+	if (dsi->variant->port == 0) {
 	} else {
 		DSI_PORT_WRITE(PHY_AFEC0,
 			       DSI_PORT_READ(PHY_AFEC0) &
@@ -1313,8 +1324,32 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
 	.mode_fixup = vc4_dsi_encoder_mode_fixup,
 };
 
+static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
+	.port			= 1,
+	.debugfs_name		= "dsi1_regs",
+	.regs			= dsi1_regs,
+	.nregs			= ARRAY_SIZE(dsi1_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi0_variant = {
+	.port			= 0,
+	.debugfs_name		= "dsi0_regs",
+	.regs			= dsi0_regs,
+	.nregs			= ARRAY_SIZE(dsi0_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi1_variant = {
+	.port			= 1,
+	.broken_axi_workaround	= true,
+	.debugfs_name		= "dsi1_regs",
+	.regs			= dsi1_regs,
+	.nregs			= ARRAY_SIZE(dsi1_regs),
+};
+
 static const struct of_device_id vc4_dsi_dt_match[] = {
-	{ .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+	{ .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant },
+	{ .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant },
+	{ .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant },
 	{}
 };
 
@@ -1325,7 +1360,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
 	if (!(stat & bit))
 		return;
 
-	DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+	DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
 	*ret = IRQ_HANDLED;
 }
 
@@ -1398,12 +1433,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
 	struct device *dev = &dsi->pdev->dev;
 	const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
 	static const struct {
-		const char *dsi0_name, *dsi1_name;
+		const char *name;
 		int div;
 	} phy_clocks[] = {
-		{ "dsi0_byte", "dsi1_byte", 8 },
-		{ "dsi0_ddr2", "dsi1_ddr2", 4 },
-		{ "dsi0_ddr", "dsi1_ddr", 2 },
+		{ "byte", 8 },
+		{ "ddr2", 4 },
+		{ "ddr", 2 },
 	};
 	int i;
 
@@ -1419,8 +1454,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
 	for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
 		struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
 		struct clk_init_data init;
+		char clk_name[16];
 		int ret;
 
+		snprintf(clk_name, sizeof(clk_name),
+			 "dsi%u_%s", dsi->variant->port, phy_clocks[i].name);
+
 		/* We just use core fixed factor clock ops for the PHY
 		 * clocks.  The clocks are actually gated by the
 		 * PHY_AFEC0_DDRCLK_EN bits, which we should be
@@ -1437,10 +1476,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
 		memset(&init, 0, sizeof(init));
 		init.parent_names = &parent_name;
 		init.num_parents = 1;
-		if (dsi->port == 1)
-			init.name = phy_clocks[i].dsi1_name;
-		else
-			init.name = phy_clocks[i].dsi0_name;
+		init.name = clk_name;
 		init.ops = &clk_fixed_factor_ops;
 
 		ret = devm_clk_hw_register(dev, &fix->hw);
@@ -1459,7 +1495,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct drm_device *drm = dev_get_drvdata(master);
-	struct vc4_dev *vc4 = to_vc4_dev(drm);
 	struct vc4_dsi *dsi = dev_get_drvdata(dev);
 	struct vc4_dsi_encoder *vc4_dsi_encoder;
 	struct drm_panel *panel;
@@ -1471,7 +1506,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 	if (!match)
 		return -ENODEV;
 
-	dsi->port = (uintptr_t)match->data;
+	dsi->variant = match->data;
 
 	vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
 				       GFP_KERNEL);
@@ -1488,13 +1523,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 		return PTR_ERR(dsi->regs);
 
 	dsi->regset.base = dsi->regs;
-	if (dsi->port == 0) {
-		dsi->regset.regs = dsi0_regs;
-		dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
-	} else {
-		dsi->regset.regs = dsi1_regs;
-		dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
-	}
+	dsi->regset.regs = dsi->variant->regs;
+	dsi->regset.nregs = dsi->variant->nregs;
 
 	if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
 		dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -1502,11 +1532,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 		return -ENODEV;
 	}
 
-	/* DSI1 has a broken AXI slave that doesn't respond to writes
-	 * from the ARM.  It does handle writes from the DMA engine,
+	/* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to
+	 * writes from the ARM.  It does handle writes from the DMA engine,
 	 * so set up a channel for talking to it.
 	 */
-	if (dsi->port == 1) {
+	if (dsi->variant->broken_axi_workaround) {
 		dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
 						      &dsi->reg_dma_paddr,
 						      GFP_KERNEL);
@@ -1612,9 +1642,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 	if (ret)
 		return ret;
 
-	if (dsi->port == 1)
-		vc4->dsi1 = dsi;
-
 	drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
 	drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
 
@@ -1630,10 +1657,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 	 */
 	list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
 
-	if (dsi->port == 0)
-		vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
-	else
-		vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+	vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
 
 	pm_runtime_enable(dev);
 
@@ -1643,8 +1667,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 static void vc4_dsi_unbind(struct device *dev, struct device *master,
 			   void *data)
 {
-	struct drm_device *drm = dev_get_drvdata(master);
-	struct vc4_dev *vc4 = to_vc4_dev(drm);
 	struct vc4_dsi *dsi = dev_get_drvdata(dev);
 
 	if (dsi->bridge)
@@ -1656,9 +1678,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
 	 */
 	list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
 	drm_encoder_cleanup(dsi->encoder);
-
-	if (dsi->port == 1)
-		vc4->dsi1 = NULL;
 }
 
 static const struct component_ops vc4_dsi_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b641252939d878ee83527d9774fd91b426ababce..445d3bab89e0a27c421c2abd64164050513b362c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -1026,7 +1026,6 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
 		       void (*func)(struct vc4_seqno_cb *cb))
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	int ret = 0;
 	unsigned long irqflags;
 
 	cb->func = func;
@@ -1041,7 +1040,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
 	}
 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 
-	return ret;
+	return 0;
 }
 
 /* Scheduled when any job has been completed, this walks the list of
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 98cab0bbe92d881086f09e1b916d8d27c5c320e4..1fda574579afc326d7d7d29f8a3bd94d15133d34 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -76,12 +76,25 @@
 #define VC5_HDMI_VERTB_VSPO_SHIFT		16
 #define VC5_HDMI_VERTB_VSPO_MASK		VC4_MASK(29, 16)
 
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT	8
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK	VC4_MASK(10, 8)
+
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT		0
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK		VC4_MASK(3, 0)
+
+#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE		BIT(31)
+
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT	8
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK	VC4_MASK(15, 8)
+
 # define VC4_HD_M_SW_RST			BIT(2)
 # define VC4_HD_M_ENABLE			BIT(0)
 
 #define CEC_CLOCK_FREQ 40000
 #define VC4_HSM_MID_CLOCK 149985000
 
+#define HDMI_14_MAX_TMDS_CLK   (340 * 1000 * 1000)
+
 static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -119,24 +132,57 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
 		   HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
 }
 
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
+{
+	u16 clk_cnt;
+	u32 value;
+
+	value = HDMI_READ(HDMI_CEC_CNTRL_1);
+	value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+
+	/*
+	 * Set the clock divider: the hsm_clock rate and this divider
+	 * setting will give a 40 kHz CEC clock.
+	 */
+	clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ;
+	value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
+	HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+}
+#else
+static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
+#endif
+
 static enum drm_connector_status
 vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
 	struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+	bool connected = false;
 
 	if (vc4_hdmi->hpd_gpio) {
 		if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
 		    vc4_hdmi->hpd_active_low)
-			return connector_status_connected;
-		cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
-		return connector_status_disconnected;
+			connected = true;
+	} else if (drm_probe_ddc(vc4_hdmi->ddc)) {
+		connected = true;
+	} else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
+		connected = true;
 	}
 
-	if (drm_probe_ddc(vc4_hdmi->ddc))
-		return connector_status_connected;
+	if (connected) {
+		if (connector->status != connector_status_connected) {
+			struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
+
+			if (edid) {
+				cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+				vc4_hdmi->encoder.hdmi_monitor = drm_detect_hdmi_monitor(edid);
+				kfree(edid);
+			}
+		}
 
-	if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
 		return connector_status_connected;
+	}
+
 	cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
 	return connector_status_disconnected;
 }
@@ -170,16 +216,48 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
 
 static void vc4_hdmi_connector_reset(struct drm_connector *connector)
 {
-	drm_atomic_helper_connector_reset(connector);
+	struct vc4_hdmi_connector_state *old_state =
+		conn_state_to_vc4_hdmi_conn_state(connector->state);
+	struct vc4_hdmi_connector_state *new_state =
+		kzalloc(sizeof(*new_state), GFP_KERNEL);
+
+	if (connector->state)
+		__drm_atomic_helper_connector_destroy_state(connector->state);
+
+	kfree(old_state);
+	__drm_atomic_helper_connector_reset(connector, &new_state->base);
+
+	if (!new_state)
+		return;
+
+	new_state->base.max_bpc = 8;
+	new_state->base.max_requested_bpc = 8;
 	drm_atomic_helper_connector_tv_reset(connector);
 }
 
+static struct drm_connector_state *
+vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+	struct drm_connector_state *conn_state = connector->state;
+	struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
+	struct vc4_hdmi_connector_state *new_state;
+
+	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+	if (!new_state)
+		return NULL;
+
+	new_state->pixel_rate = vc4_state->pixel_rate;
+	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+	return &new_state->base;
+}
+
 static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
 	.detect = vc4_hdmi_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = vc4_hdmi_connector_destroy,
 	.reset = vc4_hdmi_connector_reset,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
@@ -200,12 +278,20 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
 				    vc4_hdmi->ddc);
 	drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
 
+	/*
+	 * Some of the properties below require access to state, like bpc.
+	 * Allocate some default initial connector state with our reset helper.
+	 */
+	if (connector->funcs->reset)
+		connector->funcs->reset(connector);
+
 	/* Create and attach TV margin props to this connector. */
 	ret = drm_mode_create_tv_margin_properties(dev);
 	if (ret)
 		return ret;
 
 	drm_connector_attach_tv_margin_properties(connector);
+	drm_connector_attach_max_bpc_property(connector, 8, 12);
 
 	connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
 			     DRM_CONNECTOR_POLL_DISCONNECT);
@@ -219,7 +305,8 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
 }
 
 static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
-				enum hdmi_infoframe_type type)
+				enum hdmi_infoframe_type type,
+				bool poll)
 {
 	struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 	u32 packet_id = type - 0x80;
@@ -227,6 +314,9 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
 	HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
 		   HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
 
+	if (!poll)
+		return 0;
+
 	return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
 			  BIT(packet_id)), 100);
 }
@@ -253,7 +343,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
 	if (len < 0)
 		return;
 
-	ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+	ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
 	if (ret) {
 		DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
 		return;
@@ -356,7 +446,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
 		vc4_hdmi_set_audio_infoframe(encoder);
 }
 
-static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
+					       struct drm_atomic_state *state)
 {
 	struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 
@@ -369,7 +460,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
 		   HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
 }
 
-static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+						 struct drm_atomic_state *state)
 {
 	struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 	int ret;
@@ -468,6 +560,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
 }
 
 static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+				 struct drm_connector_state *state,
 				 struct drm_display_mode *mode)
 {
 	bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -511,7 +604,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
 	HDMI_WRITE(HDMI_VERTB0, vertb_even);
 	HDMI_WRITE(HDMI_VERTB1, vertb);
 }
+
 static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+				 struct drm_connector_state *state,
 				 struct drm_display_mode *mode)
 {
 	bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -531,6 +626,9 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
 					mode->crtc_vsync_end -
 					interlaced,
 					VC4_HDMI_VERTB_VBP));
+	unsigned char gcp;
+	bool gcp_en;
+	u32 reg;
 
 	HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
 	HDMI_WRITE(HDMI_HORZA,
@@ -556,6 +654,39 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
 	HDMI_WRITE(HDMI_VERTB0, vertb_even);
 	HDMI_WRITE(HDMI_VERTB1, vertb);
 
+	switch (state->max_bpc) {
+	case 12:
+		gcp = 6;
+		gcp_en = true;
+		break;
+	case 10:
+		gcp = 5;
+		gcp_en = true;
+		break;
+	case 8:
+	default:
+		gcp = 4;
+		gcp_en = false;
+		break;
+	}
+
+	reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
+	reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK |
+		 VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK);
+	reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) |
+	       VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH);
+	HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg);
+
+	reg = HDMI_READ(HDMI_GCP_WORD_1);
+	reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
+	reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+	HDMI_WRITE(HDMI_GCP_WORD_1, reg);
+
+	reg = HDMI_READ(HDMI_GCP_CONFIG);
+	reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
+	reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+	HDMI_WRITE(HDMI_GCP_CONFIG, reg);
+
 	HDMI_WRITE(HDMI_CLOCK_STOP, 0);
 }
 
@@ -583,8 +714,29 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
 		  "VC4_HDMI_FIFO_CTL_RECENTER_DONE");
 }
 
-static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+static struct drm_connector_state *
+vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder,
+				     struct drm_atomic_state *state)
 {
+	struct drm_connector_state *conn_state;
+	struct drm_connector *connector;
+	unsigned int i;
+
+	for_each_new_connector_in_state(state, connector, conn_state, i) {
+		if (conn_state->best_encoder == encoder)
+			return conn_state;
+	}
+
+	return NULL;
+}
+
+static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+						struct drm_atomic_state *state)
+{
+	struct drm_connector_state *conn_state =
+		vc4_hdmi_encoder_get_connector_state(encoder, state);
+	struct vc4_hdmi_connector_state *vc4_conn_state =
+		conn_state_to_vc4_hdmi_conn_state(conn_state);
 	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 	unsigned long pixel_rate, hsm_rate;
@@ -596,7 +748,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
 		return;
 	}
 
-	pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1);
+	pixel_rate = vc4_conn_state->pixel_rate;
 	ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
 	if (ret) {
 		DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
@@ -639,6 +791,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
 		return;
 	}
 
+	vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
 	/*
 	 * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
 	 * at 300MHz.
@@ -660,11 +814,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
 		return;
 	}
 
-	if (vc4_hdmi->variant->reset)
-		vc4_hdmi->variant->reset(vc4_hdmi);
-
 	if (vc4_hdmi->variant->phy_init)
-		vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+		vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
 
 	HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
 		   HDMI_READ(HDMI_SCHEDULER_CONTROL) |
@@ -672,10 +823,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
 		   VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
 
 	if (vc4_hdmi->variant->set_timings)
-		vc4_hdmi->variant->set_timings(vc4_hdmi, mode);
+		vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
 }
 
-static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
+					     struct drm_atomic_state *state)
 {
 	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
@@ -697,7 +849,8 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
 	HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
 }
 
-static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
+					      struct drm_atomic_state *state)
 {
 	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -766,6 +919,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
 					 struct drm_crtc_state *crtc_state,
 					 struct drm_connector_state *conn_state)
 {
+	struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
 	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
 	struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 	unsigned long long pixel_rate = mode->clock * 1000;
@@ -790,9 +944,22 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
 		pixel_rate = mode->clock * 1000;
 	}
 
+	if (conn_state->max_bpc == 12) {
+		pixel_rate = pixel_rate * 150;
+		do_div(pixel_rate, 100);
+	} else if (conn_state->max_bpc == 10) {
+		pixel_rate = pixel_rate * 125;
+		do_div(pixel_rate, 100);
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		pixel_rate = pixel_rate * 2;
+
 	if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
 		return -EINVAL;
 
+	vc4_state->pixel_rate = pixel_rate;
+
 	return 0;
 }
 
@@ -936,7 +1103,7 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
 	int ret;
 
 	vc4_hdmi->audio.streaming = false;
-	ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO);
+	ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
 	if (ret)
 		dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
 
@@ -1288,15 +1455,22 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
 }
 
 #ifdef CONFIG_DRM_VC4_HDMI_CEC
-static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
+{
+	struct vc4_hdmi *vc4_hdmi = priv;
+
+	if (vc4_hdmi->cec_rx_msg.len)
+		cec_received_msg(vc4_hdmi->cec_adap,
+				 &vc4_hdmi->cec_rx_msg);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
 {
 	struct vc4_hdmi *vc4_hdmi = priv;
 
-	if (vc4_hdmi->cec_irq_was_rx) {
-		if (vc4_hdmi->cec_rx_msg.len)
-			cec_received_msg(vc4_hdmi->cec_adap,
-					 &vc4_hdmi->cec_rx_msg);
-	} else if (vc4_hdmi->cec_tx_ok) {
+	if (vc4_hdmi->cec_tx_ok) {
 		cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
 				  0, 0, 0, 0);
 	} else {
@@ -1310,15 +1484,35 @@ static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+{
+	struct vc4_hdmi *vc4_hdmi = priv;
+	irqreturn_t ret;
+
+	if (vc4_hdmi->cec_irq_was_rx)
+		ret = vc4_cec_irq_handler_rx_thread(irq, priv);
+	else
+		ret = vc4_cec_irq_handler_tx_thread(irq, priv);
+
+	return ret;
+}
+
 static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
 {
+	struct drm_device *dev = vc4_hdmi->connector.dev;
 	struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
 	unsigned int i;
 
 	msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
 					VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
+
+	if (msg->len > 16) {
+		drm_err(dev, "Attempting to read too much data (%d)\n", msg->len);
+		return;
+	}
+
 	for (i = 0; i < msg->len; i += 4) {
-		u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i);
+		u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2));
 
 		msg->msg[i] = val & 0xff;
 		msg->msg[i + 1] = (val >> 8) & 0xff;
@@ -1327,31 +1521,55 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
 	}
 }
 
+static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
+{
+	struct vc4_hdmi *vc4_hdmi = priv;
+	u32 cntrl1;
+
+	cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+	vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
+	cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
+	HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
+{
+	struct vc4_hdmi *vc4_hdmi = priv;
+	u32 cntrl1;
+
+	vc4_hdmi->cec_rx_msg.len = 0;
+	cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+	vc4_cec_read_msg(vc4_hdmi, cntrl1);
+	cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+	HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+	cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+
+	HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+
+	return IRQ_WAKE_THREAD;
+}
+
 static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
 {
 	struct vc4_hdmi *vc4_hdmi = priv;
 	u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS);
-	u32 cntrl1, cntrl5;
+	irqreturn_t ret;
+	u32 cntrl5;
 
 	if (!(stat & VC4_HDMI_CPU_CEC))
 		return IRQ_NONE;
-	vc4_hdmi->cec_rx_msg.len = 0;
-	cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+
 	cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5);
 	vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
-	if (vc4_hdmi->cec_irq_was_rx) {
-		vc4_cec_read_msg(vc4_hdmi, cntrl1);
-		cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
-		HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
-		cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
-	} else {
-		vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
-		cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
-	}
-	HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
-	HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+	if (vc4_hdmi->cec_irq_was_rx)
+		ret = vc4_cec_irq_handler_rx_bare(irq, priv);
+	else
+		ret = vc4_cec_irq_handler_tx_bare(irq, priv);
 
-	return IRQ_WAKE_THREAD;
+	HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+	return ret;
 }
 
 static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -1388,9 +1606,11 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
 			   ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
 			   ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
 
-		HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
+		if (!vc4_hdmi->variant->external_irq_controller)
+			HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
 	} else {
-		HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
+		if (!vc4_hdmi->variant->external_irq_controller)
+			HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
 		HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
 			   VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
 	}
@@ -1411,11 +1631,17 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
 				      u32 signal_free_time, struct cec_msg *msg)
 {
 	struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+	struct drm_device *dev = vc4_hdmi->connector.dev;
 	u32 val;
 	unsigned int i;
 
+	if (msg->len > 16) {
+		drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
+		return -ENOMEM;
+	}
+
 	for (i = 0; i < msg->len; i += 4)
-		HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i,
+		HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
 			   (msg->msg[i]) |
 			   (msg->msg[i + 1] << 8) |
 			   (msg->msg[i + 2] << 16) |
@@ -1442,11 +1668,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
 {
 	struct cec_connector_info conn_info;
 	struct platform_device *pdev = vc4_hdmi->pdev;
+	struct device *dev = &pdev->dev;
 	u32 value;
 	int ret;
 
-	if (!vc4_hdmi->variant->cec_available)
+	if (!of_find_property(dev->of_node, "interrupts", NULL)) {
+		dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
 		return 0;
+	}
 
 	vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
 						  vc4_hdmi, "vc4",
@@ -1459,23 +1688,39 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
 	cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
 	cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
 
-	HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
 	value = HDMI_READ(HDMI_CEC_CNTRL_1);
-	value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
-	/*
-	 * Set the logical address to Unregistered and set the clock
-	 * divider: the hsm_clock rate and this divider setting will
-	 * give a 40 kHz CEC clock.
-	 */
-	value |= VC4_HDMI_CEC_ADDR_MASK |
-		 (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
+	/* Set the logical address to Unregistered */
+	value |= VC4_HDMI_CEC_ADDR_MASK;
 	HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
-	ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
-					vc4_cec_irq_handler,
-					vc4_cec_irq_handler_thread, 0,
-					"vc4 hdmi cec", vc4_hdmi);
-	if (ret)
-		goto err_delete_cec_adap;
+
+	vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
+	if (vc4_hdmi->variant->external_irq_controller) {
+		ret = devm_request_threaded_irq(&pdev->dev,
+						platform_get_irq_byname(pdev, "cec-rx"),
+						vc4_cec_irq_handler_rx_bare,
+						vc4_cec_irq_handler_rx_thread, 0,
+						"vc4 hdmi cec rx", vc4_hdmi);
+		if (ret)
+			goto err_delete_cec_adap;
+
+		ret = devm_request_threaded_irq(&pdev->dev,
+						platform_get_irq_byname(pdev, "cec-tx"),
+						vc4_cec_irq_handler_tx_bare,
+						vc4_cec_irq_handler_tx_thread, 0,
+						"vc4 hdmi cec tx", vc4_hdmi);
+		if (ret)
+			goto err_delete_cec_adap;
+	} else {
+		HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
+
+		ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
+						vc4_cec_irq_handler,
+						vc4_cec_irq_handler_thread, 0,
+						"vc4 hdmi cec", vc4_hdmi);
+		if (ret)
+			goto err_delete_cec_adap;
+	}
 
 	ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
 	if (ret < 0)
@@ -1575,6 +1820,7 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
 		return PTR_ERR(vc4_hdmi->hsm_clock);
 	}
 	vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
+	vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
 
 	return 0;
 }
@@ -1668,6 +1914,12 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
 		return PTR_ERR(vc4_hdmi->audio_clock);
 	}
 
+	vc4_hdmi->cec_clock = devm_clk_get(dev, "cec");
+	if (IS_ERR(vc4_hdmi->cec_clock)) {
+		DRM_ERROR("Failed to get CEC clock\n");
+		return PTR_ERR(vc4_hdmi->cec_clock);
+	}
+
 	vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
 	if (IS_ERR(vc4_hdmi->reset)) {
 		DRM_ERROR("Failed to get HDMI reset line\n");
@@ -1740,6 +1992,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 	vc4_hdmi->disable_wifi_frequencies =
 		of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
 
+	if (vc4_hdmi->variant->reset)
+		vc4_hdmi->variant->reset(vc4_hdmi);
+
 	pm_runtime_enable(dev);
 
 	drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1835,7 +2090,6 @@ static const struct vc4_hdmi_variant bcm2835_variant = {
 	.debugfs_name		= "hdmi_regs",
 	.card_name		= "vc4-hdmi",
 	.max_pixel_clock	= 162000000,
-	.cec_available		= true,
 	.registers		= vc4_hdmi_fields,
 	.num_registers		= ARRAY_SIZE(vc4_hdmi_fields),
 
@@ -1854,7 +2108,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
 	.encoder_type		= VC4_ENCODER_TYPE_HDMI0,
 	.debugfs_name		= "hdmi0_regs",
 	.card_name		= "vc4-hdmi-0",
-	.max_pixel_clock	= 297000000,
+	.max_pixel_clock	= HDMI_14_MAX_TMDS_CLK,
 	.registers		= vc5_hdmi_hdmi0_fields,
 	.num_registers		= ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
 	.phy_lane_mapping	= {
@@ -1864,6 +2118,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
 		PHY_LANE_CK,
 	},
 	.unsupported_odd_h_timings	= true,
+	.external_irq_controller	= true,
 
 	.init_resources		= vc5_hdmi_init_resources,
 	.csc_setup		= vc5_hdmi_csc_setup,
@@ -1880,7 +2135,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
 	.encoder_type		= VC4_ENCODER_TYPE_HDMI1,
 	.debugfs_name		= "hdmi1_regs",
 	.card_name		= "vc4-hdmi-1",
-	.max_pixel_clock	= 297000000,
+	.max_pixel_clock	= HDMI_14_MAX_TMDS_CLK,
 	.registers		= vc5_hdmi_hdmi1_fields,
 	.num_registers		= ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
 	.phy_lane_mapping	= {
@@ -1890,6 +2145,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
 		PHY_LANE_2,
 	},
 	.unsupported_odd_h_timings	= true,
+	.external_irq_controller	= true,
 
 	.init_resources		= vc5_hdmi_init_resources,
 	.csc_setup		= vc5_hdmi_csc_setup,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 0526a9cf608a3b5fb242de70353a91199ad1b40f..3cebd1fd00fc6e5be022c56419998864ce6c0276 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -21,10 +21,9 @@ to_vc4_hdmi_encoder(struct drm_encoder *encoder)
 	return container_of(encoder, struct vc4_hdmi_encoder, base.base);
 }
 
-struct drm_display_mode;
-
 struct vc4_hdmi;
 struct vc4_hdmi_register;
+struct vc4_hdmi_connector_state;
 
 enum vc4_hdmi_phy_channel {
 	PHY_LANE_0 = 0,
@@ -43,9 +42,6 @@ struct vc4_hdmi_variant {
 	/* Filename to expose the registers in debugfs */
 	const char *debugfs_name;
 
-	/* Set to true when the CEC support is available */
-	bool cec_available;
-
 	/* Maximum pixel clock supported by the controller (in Hz) */
 	unsigned long long max_pixel_clock;
 
@@ -65,6 +61,13 @@ struct vc4_hdmi_variant {
 	/* The BCM2711 cannot deal with odd horizontal pixel timings */
 	bool unsupported_odd_h_timings;
 
+	/*
+	 * The BCM2711 CEC/hotplug IRQ controller is shared between the
+	 * two HDMI controllers, and we have a proper irqchip driver for
+	 * it.
+	 */
+	bool external_irq_controller;
+
 	/* Callback to get the resources (memory region, interrupts,
 	 * clocks, etc) for that variant.
 	 */
@@ -78,11 +81,12 @@ struct vc4_hdmi_variant {
 
 	/* Callback to configure the video timings in the HDMI block */
 	void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
+			    struct drm_connector_state *state,
 			    struct drm_display_mode *mode);
 
-	/* Callback to initialize the PHY according to the mode */
+	/* Callback to initialize the PHY according to the connector state */
 	void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
-			 struct drm_display_mode *mode);
+			 struct vc4_hdmi_connector_state *vc4_conn_state);
 
 	/* Callback to disable the PHY */
 	void (*phy_disable)(struct vc4_hdmi *vc4_hdmi);
@@ -155,6 +159,7 @@ struct vc4_hdmi {
 	bool cec_tx_ok;
 	bool cec_irq_was_rx;
 
+	struct clk *cec_clock;
 	struct clk *pixel_clock;
 	struct clk *hsm_clock;
 	struct clk *audio_clock;
@@ -180,14 +185,25 @@ encoder_to_vc4_hdmi(struct drm_encoder *encoder)
 	return container_of(_encoder, struct vc4_hdmi, encoder);
 }
 
+struct vc4_hdmi_connector_state {
+	struct drm_connector_state	base;
+	unsigned long long		pixel_rate;
+};
+
+static inline struct vc4_hdmi_connector_state *
+conn_state_to_vc4_hdmi_conn_state(struct drm_connector_state *conn_state)
+{
+	return container_of(conn_state, struct vc4_hdmi_connector_state, base);
+}
+
 void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
-		       struct drm_display_mode *mode);
+		       struct vc4_hdmi_connector_state *vc4_conn_state);
 void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
 void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
 void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
 
 void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
-		       struct drm_display_mode *mode);
+		       struct vc4_hdmi_connector_state *vc4_conn_state);
 void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
 void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
 void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 057796b54c51ab59f76486eca88bb78e428d982b..36535480f8e2b62302ff3eafce83b1facb69e16a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -127,7 +127,8 @@
 
 #define OSCILLATOR_FREQUENCY	54000000
 
-void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+		       struct vc4_hdmi_connector_state *conn_state)
 {
 	/* PHY should be in reset, like
 	 * vc4_hdmi_encoder_disable() does.
@@ -339,11 +340,12 @@ static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
 	HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
 }
 
-void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+		       struct vc4_hdmi_connector_state *conn_state)
 {
 	const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
 	const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
-	unsigned long long pixel_freq = mode->clock * 1000;
+	unsigned long long pixel_freq = conn_state->pixel_rate;
 	unsigned long long vco_freq;
 	unsigned char word_sel;
 	u8 vco_sel, vco_div;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 96d764ebfe67546bd9eb37b3883887b1a8833fe1..e1b58eac766f853add952dd9d18adbe58ffb964e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -29,6 +29,7 @@ enum vc4_hdmi_field {
 	HDMI_CEC_CPU_MASK_SET,
 	HDMI_CEC_CPU_MASK_STATUS,
 	HDMI_CEC_CPU_STATUS,
+	HDMI_CEC_CPU_SET,
 
 	/*
 	 * Transmit data, first byte is low byte of the 32-bit reg.
@@ -59,9 +60,12 @@ enum vc4_hdmi_field {
 	 */
 	HDMI_CTS_0,
 	HDMI_CTS_1,
+	HDMI_DEEP_COLOR_CONFIG_1,
 	HDMI_DVP_CTL,
 	HDMI_FIFO_CTL,
 	HDMI_FRAME_COUNT,
+	HDMI_GCP_CONFIG,
+	HDMI_GCP_WORD_1,
 	HDMI_HORZA,
 	HDMI_HORZB,
 	HDMI_HOTPLUG,
@@ -196,9 +200,10 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
 	VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0),
 	VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4),
 	VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340),
+	VC4_HDMI_REG(HDMI_CEC_CPU_SET, 0x0344),
 	VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348),
 	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c),
-	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c),
+	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x0350),
 	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354),
 	VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
 };
@@ -229,6 +234,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
 	VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
 	VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
 	VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+	VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+	VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+	VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
 	VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
 
 	VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
@@ -305,6 +313,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
 	VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
 	VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
 	VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+	VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+	VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+	VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
 	VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
 
 	VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 3b722252d1fbe1090ecba3d4059a2fd7765c476e..c239045e05d6f1efd65c6501a3cb7f99ff0e5b1b 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -326,10 +326,10 @@ void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan)
 		     SCALER_DISPSTATX_EMPTY);
 }
 
-int vc4_hvs_atomic_check(struct drm_crtc *crtc,
-			 struct drm_crtc_state *state)
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
 {
-	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
 	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct drm_plane *plane;
@@ -341,10 +341,10 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc,
 	/* The pixelvalve can only feed one encoder (and encoders are
 	 * 1:1 with connectors.)
 	 */
-	if (hweight32(state->connector_mask) > 1)
+	if (hweight32(crtc_state->connector_mask) > 1)
 		return -EINVAL;
 
-	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
+	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
 		dlist_count += vc4_plane_dlist_size(plane_state);
 
 	dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -391,11 +391,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
 }
 
 void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
-			   struct drm_crtc_state *old_state)
+			   struct drm_atomic_state *state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+	struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 	bool oneshot = vc4_state->feed_txp;
 
@@ -404,9 +405,10 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
 }
 
 void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
-			    struct drm_crtc_state *old_state)
+			    struct drm_atomic_state *state)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
 	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
 	unsigned int chan = vc4_state->assigned_channel;
 
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index ba310c0ab5f69e6b779e5a7058ef6741b8476d2b..f09254c2497d5e0b46431ce68587b832db8092fc 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -39,7 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
 
 struct vc4_hvs_state {
 	struct drm_private_state base;
-	unsigned int unassigned_channels;
+
+	struct {
+		unsigned in_use: 1;
+		struct drm_crtc_commit *pending_commit;
+	} fifo_state[HVS_NUM_CHANNELS];
 };
 
 static struct vc4_hvs_state *
@@ -182,6 +186,32 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
 }
 
+static struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+	struct drm_private_state *priv_state;
+
+	priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
+	if (IS_ERR(priv_state))
+		return ERR_CAST(priv_state);
+
+	return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+	struct drm_private_state *priv_state;
+
+	priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
+	if (IS_ERR(priv_state))
+		return ERR_CAST(priv_state);
+
+	return to_vc4_hvs_state(priv_state);
+}
+
 static struct vc4_hvs_state *
 vc4_hvs_get_global_state(struct drm_atomic_state *state)
 {
@@ -302,14 +332,15 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
 	}
 }
 
-static void
-vc4_atomic_complete_commit(struct drm_atomic_state *state)
+static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct vc4_hvs *hvs = vc4->hvs;
+	struct drm_crtc_state *old_crtc_state;
 	struct drm_crtc_state *new_crtc_state;
 	struct drm_crtc *crtc;
+	struct vc4_hvs_state *old_hvs_state;
 	int i;
 
 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -325,9 +356,35 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
 	if (vc4->hvs->hvs5)
 		clk_set_min_rate(hvs->core_clk, 500000000);
 
-	drm_atomic_helper_wait_for_fences(dev, state, false);
+	old_hvs_state = vc4_hvs_get_old_global_state(state);
+	if (!old_hvs_state)
+		return;
+
+	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+		struct vc4_crtc_state *vc4_crtc_state =
+			to_vc4_crtc_state(old_crtc_state);
+		struct drm_crtc_commit *commit;
+		unsigned int channel = vc4_crtc_state->assigned_channel;
+		unsigned long done;
+
+		if (channel == VC4_HVS_CHANNEL_DISABLED)
+			continue;
+
+		if (!old_hvs_state->fifo_state[channel].in_use)
+			continue;
+
+		commit = old_hvs_state->fifo_state[i].pending_commit;
+		if (!commit)
+			continue;
 
-	drm_atomic_helper_wait_for_dependencies(state);
+		done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
+		if (!done)
+			drm_err(dev, "Timed out waiting for hw_done\n");
+
+		done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+		if (!done)
+			drm_err(dev, "Timed out waiting for flip_done\n");
+	}
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
@@ -350,125 +407,37 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 
-	drm_atomic_helper_commit_cleanup_done(state);
-
 	if (vc4->hvs->hvs5)
 		clk_set_min_rate(hvs->core_clk, 0);
-
-	drm_atomic_state_put(state);
-
-	up(&vc4->async_modeset);
-}
-
-static void commit_work(struct work_struct *work)
-{
-	struct drm_atomic_state *state = container_of(work,
-						      struct drm_atomic_state,
-						      commit_work);
-	vc4_atomic_complete_commit(state);
 }
 
-/**
- * vc4_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int vc4_atomic_commit(struct drm_device *dev,
-			     struct drm_atomic_state *state,
-			     bool nonblock)
+static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
 {
-	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	int ret;
-
-	if (state->async_update) {
-		ret = down_interruptible(&vc4->async_modeset);
-		if (ret)
-			return ret;
-
-		ret = drm_atomic_helper_prepare_planes(dev, state);
-		if (ret) {
-			up(&vc4->async_modeset);
-			return ret;
-		}
-
-		drm_atomic_helper_async_commit(dev, state);
-
-		drm_atomic_helper_cleanup_planes(dev, state);
-
-		up(&vc4->async_modeset);
-
-		return 0;
-	}
+	struct drm_crtc_state *crtc_state;
+	struct vc4_hvs_state *hvs_state;
+	struct drm_crtc *crtc;
+	unsigned int i;
 
-	/* We know for sure we don't want an async update here. Set
-	 * state->legacy_cursor_update to false to prevent
-	 * drm_atomic_helper_setup_commit() from auto-completing
-	 * commit->flip_done.
-	 */
-	state->legacy_cursor_update = false;
-	ret = drm_atomic_helper_setup_commit(state, nonblock);
-	if (ret)
-		return ret;
+	hvs_state = vc4_hvs_get_new_global_state(state);
+	if (!hvs_state)
+		return -EINVAL;
 
-	INIT_WORK(&state->commit_work, commit_work);
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		struct vc4_crtc_state *vc4_crtc_state =
+			to_vc4_crtc_state(crtc_state);
+		unsigned int channel =
+			vc4_crtc_state->assigned_channel;
 
-	ret = down_interruptible(&vc4->async_modeset);
-	if (ret)
-		return ret;
+		if (channel == VC4_HVS_CHANNEL_DISABLED)
+			continue;
 
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret) {
-		up(&vc4->async_modeset);
-		return ret;
-	}
+		if (!hvs_state->fifo_state[channel].in_use)
+			continue;
 
-	if (!nonblock) {
-		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
-		if (ret) {
-			drm_atomic_helper_cleanup_planes(dev, state);
-			up(&vc4->async_modeset);
-			return ret;
-		}
+		hvs_state->fifo_state[channel].pending_commit =
+			drm_crtc_commit_get(crtc_state->commit);
 	}
 
-	/*
-	 * This is the point of no return - everything below never fails except
-	 * when the hw goes bonghits. Which means we can commit the new state on
-	 * the software side now.
-	 */
-
-	BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
-	/*
-	 * Everything below can be run asynchronously without the need to grab
-	 * any modeset locks at all under one condition: It must be guaranteed
-	 * that the asynchronous work has either been cancelled (if the driver
-	 * supports it, which at least requires that the framebuffers get
-	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
-	 * before the new state gets committed on the software side with
-	 * drm_atomic_helper_swap_state().
-	 *
-	 * This scheme allows new atomic state updates to be prepared and
-	 * checked in parallel to the asynchronous completion of the previous
-	 * update. Which is important since compositors need to figure out the
-	 * composition of the next frame right after having submitted the
-	 * current layout.
-	 */
-
-	drm_atomic_state_get(state);
-	if (nonblock)
-		queue_work(system_unbound_wq, &state->commit_work);
-	else
-		vc4_atomic_complete_commit(state);
-
 	return 0;
 }
 
@@ -697,6 +666,7 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
 {
 	struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
 	struct vc4_hvs_state *state;
+	unsigned int i;
 
 	state = kzalloc(sizeof(*state), GFP_KERNEL);
 	if (!state)
@@ -704,7 +674,16 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
 
 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 
-	state->unassigned_channels = old_state->unassigned_channels;
+
+	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+		state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+
+		if (!old_state->fifo_state[i].pending_commit)
+			continue;
+
+		state->fifo_state[i].pending_commit =
+			drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
+	}
 
 	return &state->base;
 }
@@ -713,6 +692,14 @@ static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
 					   struct drm_private_state *state)
 {
 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
+	unsigned int i;
+
+	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+		if (!hvs_state->fifo_state[i].pending_commit)
+			continue;
+
+		drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
+	}
 
 	kfree(hvs_state);
 }
@@ -737,7 +724,6 @@ static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
 	if (!state)
 		return -ENOMEM;
 
-	state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
 	drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
 				    &state->base,
 				    &vc4_hvs_state_funcs);
@@ -781,12 +767,17 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
 	struct vc4_hvs_state *hvs_new_state;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct drm_crtc *crtc;
+	unsigned int unassigned_channels = 0;
 	unsigned int i;
 
 	hvs_new_state = vc4_hvs_get_global_state(state);
 	if (!hvs_new_state)
 		return -EINVAL;
 
+	for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
+		if (!hvs_new_state->fifo_state[i].in_use)
+			unassigned_channels |= BIT(i);
+
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		struct vc4_crtc_state *old_vc4_crtc_state =
 			to_vc4_crtc_state(old_crtc_state);
@@ -794,6 +785,7 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
 			to_vc4_crtc_state(new_crtc_state);
 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 		unsigned int matching_channels;
+		unsigned int channel;
 
 		/* Nothing to do here, let's skip it */
 		if (old_crtc_state->enable == new_crtc_state->enable)
@@ -804,7 +796,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
 
 		/* If we're disabling our CRTC, we put back our channel */
 		if (!new_crtc_state->enable) {
-			hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
+			channel = old_vc4_crtc_state->assigned_channel;
+			hvs_new_state->fifo_state[channel].in_use = false;
 			new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
 			continue;
 		}
@@ -833,15 +826,14 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
 		 * the future, we will need to have something smarter,
 		 * but it works so far.
 		 */
-		matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
-		if (matching_channels) {
-			unsigned int channel = ffs(matching_channels) - 1;
-
-			new_vc4_crtc_state->assigned_channel = channel;
-			hvs_new_state->unassigned_channels &= ~BIT(channel);
-		} else {
+		matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+		if (!matching_channels)
 			return -EINVAL;
-		}
+
+		channel = ffs(matching_channels) - 1;
+		new_vc4_crtc_state->assigned_channel = channel;
+		unassigned_channels &= ~BIT(channel);
+		hvs_new_state->fifo_state[channel].in_use = true;
 	}
 
 	return 0;
@@ -867,9 +859,14 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
 	return vc4_load_tracker_atomic_check(state);
 }
 
+static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
+	.atomic_commit_setup	= vc4_atomic_commit_setup,
+	.atomic_commit_tail	= vc4_atomic_commit_tail,
+};
+
 static const struct drm_mode_config_funcs vc4_mode_funcs = {
 	.atomic_check = vc4_atomic_check,
-	.atomic_commit = vc4_atomic_commit,
+	.atomic_commit = drm_atomic_helper_commit,
 	.fb_create = vc4_fb_create,
 };
 
@@ -889,8 +886,6 @@ int vc4_kms_load(struct drm_device *dev)
 		vc4->load_tracker_enabled = true;
 	}
 
-	sema_init(&vc4->async_modeset, 1);
-
 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
 	dev->vblank_disable_immediate = true;
 
@@ -910,6 +905,7 @@ int vc4_kms_load(struct drm_device *dev)
 	}
 
 	dev->mode_config.funcs = &vc4_mode_funcs;
+	dev->mode_config.helper_private = &vc4_mode_config_helpers;
 	dev->mode_config.preferred_depth = 24;
 	dev->mode_config.async_page_flip = true;
 	dev->mode_config.allow_fb_modifiers = true;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index af4b8944a60327f4f5da0f34d2bb4e7b48c58e08..7322169c0682fe8564f014ed1e56b5916aab2c2c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1283,11 +1283,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
 	.atomic_async_update = vc4_plane_atomic_async_update,
 };
 
-static void vc4_plane_destroy(struct drm_plane *plane)
-{
-	drm_plane_cleanup(plane);
-}
-
 static bool vc4_format_mod_supported(struct drm_plane *plane,
 				     uint32_t format,
 				     uint64_t modifier)
@@ -1338,7 +1333,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
 static const struct drm_plane_funcs vc4_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = vc4_plane_destroy,
+	.destroy = drm_plane_cleanup,
 	.set_property = NULL,
 	.reset = vc4_plane_reset,
 	.atomic_duplicate_state = vc4_plane_duplicate_state,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 8aa5220885f4a72bbcb669514a0c36a27b9c29d2..c0122d83b6511c35c71de5ae4d30ffba254502db 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -382,7 +382,6 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
 	.reset			= vc4_crtc_reset,
 	.atomic_duplicate_state	= vc4_crtc_duplicate_state,
 	.atomic_destroy_state	= vc4_crtc_destroy_state,
-	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
 	.enable_vblank		= vc4_txp_enable_vblank,
 	.disable_vblank		= vc4_txp_disable_vblank,
 };
@@ -395,7 +394,7 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
 	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
 	int ret;
 
-	ret = vc4_hvs_atomic_check(crtc, crtc_state);
+	ret = vc4_hvs_atomic_check(crtc, state);
 	if (ret)
 		return ret;
 
@@ -408,23 +407,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
 static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
 				  struct drm_atomic_state *state)
 {
-	struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
-									 crtc);
 	drm_crtc_vblank_on(crtc);
-	vc4_hvs_atomic_enable(crtc, old_state);
+	vc4_hvs_atomic_enable(crtc, state);
 }
 
 static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
 				   struct drm_atomic_state *state)
 {
-	struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
-									 crtc);
 	struct drm_device *dev = crtc->dev;
 
 	/* Disable vblank irq handling before crtc is disabled. */
 	drm_crtc_vblank_off(crtc);
 
-	vc4_hvs_atomic_disable(crtc, old_state);
+	vc4_hvs_atomic_disable(crtc, state);
 
 	/*
 	 * Make sure we issue a vblank event after disabling the CRTC if
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f8635ccaf9a1703338aa633ff1507b5611e0d473..a0e75f1d5d016cee0832bf9185f68d700f9ab702 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
 	}
 
 	obj->pages_pin_count++; /* perma-pinned */
-	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
-					npages);
+	drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
 	return &obj->base;
 }
 
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index a3e0fb5b86713347aec015101a931a379c15da41..faeae5d881fb3dacb8f201efb734f525b9084780 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -308,7 +308,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
 
-	DRM_DEBUG("via_driver_irq_postinstall\n");
+	DRM_DEBUG("fun: %s\n", __func__);
 	if (!dev_priv)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index b925b8b1da1677128df9005cc817764add8aa8c4..51ec7c3240c91c2b0540a29d0feac31ff1267cfe 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config DRM_VIRTIO_GPU
 	tristate "Virtio GPU driver"
-	depends on DRM && VIRTIO && VIRTIO_MENU && MMU
+	depends on DRM && VIRTIO_MENU && MMU
+	select VIRTIO
 	select DRM_KMS_HELPER
 	select DRM_GEM_SHMEM_HELPER
 	select VIRTIO_DMA_SHARED_BUFFER
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5fefc88d47e45e03a2acb9c6deee19f69fa53d05..c2b20e0ee030684e848292861e3cf979cb52cae7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,14 +28,13 @@
 
 #include "virtgpu_drv.h"
 
-static void virtio_add_bool(struct seq_file *m, const char *name,
-				    bool value)
+static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
+				bool value)
 {
 	seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
 }
 
-static void virtio_add_int(struct seq_file *m, const char *name,
-				   int value)
+static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
 {
 	seq_printf(m, "%-16s : %d\n", name, value);
 }
@@ -45,13 +44,16 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
 
-	virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
-	virtio_add_bool(m, "edid", vgdev->has_edid);
-	virtio_add_bool(m, "indirect", vgdev->has_indirect);
-	virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
-	virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
-	virtio_add_int(m, "cap sets", vgdev->num_capsets);
-	virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+	virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d);
+	virtio_gpu_add_bool(m, "edid", vgdev->has_edid);
+	virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect);
+
+	virtio_gpu_add_bool(m, "resource uuid",
+			    vgdev->has_resource_assign_uuid);
+
+	virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+	virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
+	virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
 	if (vgdev->host_visible_region.len) {
 		seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
 			   (unsigned long)vgdev->host_visible_region.addr,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 27f13bd29c13c1a3f301fb7c926b5d0eb019bdad..a21dc3ad6f88802ffa72ecb75ecaee70b387bcdc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -54,7 +54,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
 	DRM_INFO("pci: %s detected at %s\n",
 		 vga ? "virtio-vga" : "virtio-gpu-pci",
 		 pname);
-	dev->pdev = pdev;
 	if (vga)
 		drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
 								  "virtiodrmfb");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6a232553c99baaed7343e7a97448f62809703f71..d9dbc4f258f3a2f1653cf9ab4b88d71bd13c1e92 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -136,6 +136,7 @@ struct virtio_gpu_fence_driver {
 
 struct virtio_gpu_fence {
 	struct dma_fence f;
+	uint64_t fence_id;
 	struct virtio_gpu_fence_driver *drv;
 	struct list_head node;
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 728ca36f63274e2eb735db382b64158778e13ff1..d28e25e8409b0ff4e2ecb173bcf93de812163eb6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -27,51 +27,48 @@
 
 #include "virtgpu_drv.h"
 
-#define to_virtio_fence(x) \
+#define to_virtio_gpu_fence(x) \
 	container_of(x, struct virtio_gpu_fence, f)
 
-static const char *virtio_get_driver_name(struct dma_fence *f)
+static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
 {
 	return "virtio_gpu";
 }
 
-static const char *virtio_get_timeline_name(struct dma_fence *f)
+static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
 {
 	return "controlq";
 }
 
-static bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_gpu_fence_signaled(struct dma_fence *f)
 {
-	struct virtio_gpu_fence *fence = to_virtio_fence(f);
-
-	if (WARN_ON_ONCE(fence->f.seqno == 0))
-		/* leaked fence outside driver before completing
-		 * initialization with virtio_gpu_fence_emit */
-		return false;
-	if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
-		return true;
+	/* leaked fence outside driver before completing
+	 * initialization with virtio_gpu_fence_emit.
+	 */
+	WARN_ON_ONCE(f->seqno == 0);
 	return false;
 }
 
-static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
 {
-	snprintf(str, size, "%llu", f->seqno);
+	snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
 }
 
-static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
+					  int size)
 {
-	struct virtio_gpu_fence *fence = to_virtio_fence(f);
+	struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
 
 	snprintf(str, size, "%llu",
 		 (u64)atomic64_read(&fence->drv->last_fence_id));
 }
 
-static const struct dma_fence_ops virtio_fence_ops = {
-	.get_driver_name     = virtio_get_driver_name,
-	.get_timeline_name   = virtio_get_timeline_name,
-	.signaled            = virtio_fence_signaled,
-	.fence_value_str     = virtio_fence_value_str,
-	.timeline_value_str  = virtio_timeline_value_str,
+static const struct dma_fence_ops virtio_gpu_fence_ops = {
+	.get_driver_name     = virtio_gpu_get_driver_name,
+	.get_timeline_name   = virtio_gpu_get_timeline_name,
+	.signaled            = virtio_gpu_fence_signaled,
+	.fence_value_str     = virtio_gpu_fence_value_str,
+	.timeline_value_str  = virtio_gpu_timeline_value_str,
 };
 
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
@@ -88,7 +85,8 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
 	 * unknown yet.  The fence must not be used outside of the driver
 	 * until virtio_gpu_fence_emit is called.
 	 */
-	dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+	dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
+		       0);
 
 	return fence;
 }
@@ -101,7 +99,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(&drv->lock, irq_flags);
-	fence->f.seqno = ++drv->current_fence_id;
+	fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
 	dma_fence_get(&fence->f);
 	list_add_tail(&fence->node, &drv->fences);
 	spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -109,24 +107,45 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 	trace_dma_fence_emit(&fence->f);
 
 	cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
-	cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
+	cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
 }
 
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
 				    u64 fence_id)
 {
 	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
-	struct virtio_gpu_fence *fence, *tmp;
+	struct virtio_gpu_fence *signaled, *curr, *tmp;
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(&drv->lock, irq_flags);
 	atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
-	list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
-		if (fence_id < fence->f.seqno)
+	list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+		if (fence_id != curr->fence_id)
 			continue;
-		dma_fence_signal_locked(&fence->f);
-		list_del(&fence->node);
-		dma_fence_put(&fence->f);
+
+		signaled = curr;
+
+		/*
+		 * Signal any fences with a strictly smaller sequence number
+		 * than the current signaled fence.
+		 */
+		list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+			/* dma-fence contexts must match */
+			if (signaled->f.context != curr->f.context)
+				continue;
+
+			if (!dma_fence_is_later(&signaled->f, &curr->f))
+				continue;
+
+			dma_fence_signal_locked(&curr->f);
+			list_del(&curr->node);
+			dma_fence_put(&curr->f);
+		}
+
+		dma_fence_signal_locked(&signaled->f);
+		list_del(&signaled->node);
+		dma_fence_put(&signaled->f);
+		break;
 	}
 	spin_unlock_irqrestore(&drv->lock, irq_flags);
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index c30c75ee83fce0c378a03168694210d6e14d4123..8502400b2f9c910c7d4451025c8f3ca10accdc22 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file,
 	int ret;
 	u32 handle;
 
-	if (vgdev->has_virgl_3d)
-		virtio_gpu_create_context(dev, file);
-
 	ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
 	if (ret < 0)
 		return ret;
@@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
 	if (!vgdev->has_virgl_3d)
 		goto out_notify;
 
+	/* the context might still be missing when the first ioctl is
+	 * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
+	 */
+	virtio_gpu_create_context(obj->dev, file);
+
 	objs = virtio_gpu_array_alloc(1);
 	if (!objs)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index b4ec479c32cda13cfd5e125c70878104afb5b877..b375394193be822dcb1d655078e47396c9787b4c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -163,6 +163,7 @@ int virtio_gpu_init(struct drm_device *dev)
 					     vgdev->host_visible_region.len,
 					     dev_name(&vgdev->vdev->dev))) {
 			DRM_ERROR("Could not reserve host visible region\n");
+			ret = -EBUSY;
 			goto err_vqs;
 		}
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 23c21bc4d01e2d1a83857aa450d6ca920c377839..5cc34e7330fa5ab45fbbfa412c6d9adef6bca1cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -69,6 +69,7 @@ static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
 	.close = virtio_gpu_gem_object_close,
 	.free = virtio_gpu_vram_free,
 	.mmap = virtio_gpu_vram_mmap,
+	.export = virtgpu_gem_prime_export,
 };
 
 bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
@@ -134,6 +135,8 @@ int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
 
 	obj = &vram->base.base.base;
 	obj->funcs = &virtio_gpu_vram_funcs;
+
+	params->size = PAGE_ALIGN(params->size);
 	drm_gem_private_object_init(vgdev->ddev, obj, params->size);
 
 	/* Create fake offset */
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index d4d39227f2ed47ee82cfd57cd2d36aed0e8edeed..2173b82606f661313870aa0643bb505ba79400d3 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -34,12 +34,16 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
-static struct vkms_device *vkms_device;
+static struct vkms_config *default_config;
 
-bool enable_cursor = true;
+static bool enable_cursor = true;
 module_param_named(enable_cursor, enable_cursor, bool, 0444);
 MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
 
+static bool enable_writeback = true;
+module_param_named(enable_writeback, enable_writeback, bool, 0444);
+MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
+
 DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
 
 static void vkms_release(struct drm_device *dev)
@@ -113,16 +117,20 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
 	dev->mode_config.max_height = YRES_MAX;
 	dev->mode_config.cursor_width = 512;
 	dev->mode_config.cursor_height = 512;
-	dev->mode_config.preferred_depth = 32;
+	/* FIXME: There's a confusion between bpp and depth between this and
+	 * fbdev helpers. We have to go with 0, meaning "pick the default",
+	 * which ix XRGB8888 in all cases. */
+	dev->mode_config.preferred_depth = 0;
 	dev->mode_config.helper_private = &vkms_mode_config_helpers;
 
 	return vkms_output_init(vkmsdev, 0);
 }
 
-static int __init vkms_init(void)
+static int vkms_create(struct vkms_config *config)
 {
 	int ret;
 	struct platform_device *pdev;
+	struct vkms_device *vkms_device;
 
 	pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
 	if (IS_ERR(pdev))
@@ -140,6 +148,8 @@ static int __init vkms_init(void)
 		goto out_devres;
 	}
 	vkms_device->platform = pdev;
+	vkms_device->config = config;
+	config->dev = vkms_device;
 
 	ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
 					   DMA_BIT_MASK(64));
@@ -176,21 +186,47 @@ static int __init vkms_init(void)
 	return ret;
 }
 
-static void __exit vkms_exit(void)
+static int __init vkms_init(void)
+{
+	struct vkms_config *config;
+
+	config = kmalloc(sizeof(*config), GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+
+	default_config = config;
+
+	config->cursor = enable_cursor;
+	config->writeback = enable_writeback;
+
+	return vkms_create(config);
+}
+
+static void vkms_destroy(struct vkms_config *config)
 {
 	struct platform_device *pdev;
 
-	if (!vkms_device) {
+	if (!config->dev) {
 		DRM_INFO("vkms_device is NULL.\n");
 		return;
 	}
 
-	pdev = vkms_device->platform;
+	pdev = config->dev->platform;
 
-	drm_dev_unregister(&vkms_device->drm);
-	drm_atomic_helper_shutdown(&vkms_device->drm);
+	drm_dev_unregister(&config->dev->drm);
+	drm_atomic_helper_shutdown(&config->dev->drm);
 	devres_release_group(&pdev->dev, NULL);
 	platform_device_unregister(pdev);
+
+	config->dev = NULL;
+}
+
+static void __exit vkms_exit(void)
+{
+	if (default_config->dev)
+		vkms_destroy(default_config);
+
+	kfree(default_config);
 }
 
 module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5ed91ff08cb326c05ffab3de855d35300578e6ed..35540c7c4416925695bb7cbda716a2598bec28c3 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -19,8 +19,6 @@
 #define XRES_MAX  8192
 #define YRES_MAX  8192
 
-extern bool enable_cursor;
-
 struct vkms_composer {
 	struct drm_framebuffer fb;
 	struct drm_rect src, dst;
@@ -82,10 +80,20 @@ struct vkms_output {
 	spinlock_t composer_lock;
 };
 
+struct vkms_device;
+
+struct vkms_config {
+	bool writeback;
+	bool cursor;
+	/* only set when instantiated */
+	struct vkms_device *dev;
+};
+
 struct vkms_device {
 	struct drm_device drm;
 	struct platform_device *platform;
 	struct vkms_output output;
+	const struct vkms_config *config;
 };
 
 #define drm_crtc_to_vkms_output(target) \
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 4a1848b0318fb2ca2af2918d7e967dabe8fdda0b..f5f6f15c362c0011b4f0bbc32314c177f4e378ef 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -41,12 +41,13 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
 	struct drm_crtc *crtc = &output->crtc;
 	struct drm_plane *primary, *cursor = NULL;
 	int ret;
+	int writeback;
 
 	primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
 	if (IS_ERR(primary))
 		return PTR_ERR(primary);
 
-	if (enable_cursor) {
+	if (vkmsdev->config->cursor) {
 		cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
 		if (IS_ERR(cursor)) {
 			ret = PTR_ERR(cursor);
@@ -80,9 +81,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
 		goto err_attach;
 	}
 
-	ret = vkms_enable_writeback_connector(vkmsdev);
-	if (ret)
-		DRM_ERROR("Failed to init writeback connector\n");
+	if (vkmsdev->config->writeback) {
+		writeback = vkms_enable_writeback_connector(vkmsdev);
+		if (writeback)
+			DRM_ERROR("Failed to init writeback connector\n");
+	}
 
 	drm_mode_config_reset(dev);
 
@@ -98,7 +101,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
 	drm_crtc_cleanup(crtc);
 
 err_crtc:
-	if (enable_cursor)
+	if (vkmsdev->config->cursor)
 		drm_plane_cleanup(cursor);
 
 err_cursor:
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 31f85f09f1fcaa7105b755e7a03d4b40c2ef8b32..cc4cdca7176e5652e6bdaa44ebf59af89e9b1431 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
 	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
-	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
-	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-	    vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
+	    vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+	    vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
+	    vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
 	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
 	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 16077785ad47eeb0d6d7d53d0881ce978af62500..0fe869d0fad12692cbde574a0642d5421de9a313 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -59,7 +59,6 @@
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_module.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index f41550797970b5f7cc39ea20fa2eec18da69367e..180f6dbc9460da1c691dc4319f1f8ea06340cb7a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -555,7 +555,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 		SVGA3dCmdSetShader body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -564,7 +564,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 	cmd->body.cid = bi->ctx->id;
 	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -587,7 +587,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
 		SVGA3dCmdSetRenderTarget body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -598,7 +598,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
 	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 	cmd->body.target.face = 0;
 	cmd->body.target.mipmap = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -626,7 +626,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
 		} body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -636,7 +636,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
 	cmd->body.s1.stage = binding->texture_stage;
 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
 	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -657,7 +657,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 		SVGA3dCmdDXSetShader body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -665,7 +665,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -686,7 +686,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
 		SVGA3dCmdDXSetSingleConstantBuffer body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -703,7 +703,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
 		cmd->body.sizeInBytes = 0;
 		cmd->body.sid = SVGA3D_INVALID_ID;
 	}
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -810,7 +810,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
 
 	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 	cmd_size = sizeof(*cmd) + view_id_size;
-	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -821,7 +821,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
 
 	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 
-	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	vmw_cmd_commit(ctx->dev_priv, cmd_size);
 	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
 		     cbs->bind_first_slot, cbs->bind_cmd_count);
 
@@ -846,7 +846,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
 	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
 	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 	cmd_size = sizeof(*cmd) + view_id_size;
-	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -860,7 +860,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
 
 	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 
-	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	vmw_cmd_commit(ctx->dev_priv, cmd_size);
 
 	return 0;
 
@@ -930,7 +930,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
 
 	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
 	cmd_size = sizeof(*cmd) + so_target_size;
-	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -938,7 +938,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
 	cmd->header.size = sizeof(cmd->body) + so_target_size;
 	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
 
-	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	vmw_cmd_commit(ctx->dev_priv, cmd_size);
 
 	return 0;
 
@@ -1044,7 +1044,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
 
 	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
 	cmd_size = sizeof(*cmd) + set_vb_size;
-	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -1054,7 +1054,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
 
 	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
 
-	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	vmw_cmd_commit(ctx->dev_priv, cmd_size);
 	bitmap_clear(cbs->dirty_vb,
 		     cbs->bind_first_slot, cbs->bind_cmd_count);
 
@@ -1074,7 +1074,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
 	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
 	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 	cmd_size = sizeof(*cmd) + view_id_size;
-	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
 	if (!cmd)
 		return -ENOMEM;
 
@@ -1086,7 +1086,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
 
 	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 
-	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	vmw_cmd_commit(ctx->dev_priv, cmd_size);
 
 	return 0;
 }
@@ -1104,7 +1104,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
 	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
 	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 	cmd_size = sizeof(*cmd) + view_id_size;
-	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
 	if (!cmd)
 		return -ENOMEM;
 
@@ -1116,7 +1116,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
 
 	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 
-	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	vmw_cmd_commit(ctx->dev_priv, cmd_size);
 
 	return 0;
 }
@@ -1263,7 +1263,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
 		SVGA3dCmdDXSetIndexBuffer body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -1279,7 +1279,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
 		cmd->body.offset = 0;
 	}
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -1315,14 +1315,14 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
 		SVGA3dCmdDXSetStreamOutput body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
 	if (!cmd)
 		return -ENOMEM;
 
 	cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index f21881e087dbb53678a9165124a584beab6e04f4..9f2779ddcf083c37f44c6529a753b1e3ce719bf9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 	d.src_addr = NULL;
 	d.dst_pages = dst->ttm->pages;
 	d.src_pages = src->ttm->pages;
-	d.dst_num_pages = dst->num_pages;
-	d.src_num_pages = src->num_pages;
+	d.dst_num_pages = dst->mem.num_pages;
+	d.src_num_pages = src->mem.num_pages;
 	d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
 	d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
 	d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 263d76ae43f02d870a0670d57fc4b7efab908088..63dbc44eebe0b85e0a911637e0c30e92774b0da5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	uint32_t new_flags;
 
 	place = vmw_vram_placement.placement[0];
-	place.lpfn = bo->num_pages;
+	place.lpfn = bo->mem.num_pages;
 	placement.num_placement = 1;
 	placement.placement = &place;
 	placement.num_busy_placement = 1;
@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	 * that situation.
 	 */
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
-	    bo->mem.start < bo->num_pages &&
+	    bo->mem.start < bo->mem.num_pages &&
 	    bo->mem.start > 0 &&
 	    buf->base.pin_count == 0) {
 		ctx.interruptible = false;
@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 	if (virtual)
 		return virtual;
 
-	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+	ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
 	if (ret)
 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
similarity index 82%
rename from drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
rename to drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index a95156fc5db73ebfaceda3f3cd1111bf836cd73d..7400d617ae3cc209fff43c7d77d165a8504a5ac9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -36,9 +36,8 @@ struct vmw_temp_set_context {
 	SVGA3dCmdDXTempSetContext body;
 };
 
-bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+bool vmw_supports_3d(struct vmw_private *dev_priv)
 {
-	u32 *fifo_mem = dev_priv->mmio_virt;
 	uint32_t fifo_min, hwversion;
 	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
@@ -62,15 +61,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 		return false;
 
-	fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
+	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
 		return false;
 
-	hwversion = vmw_mmio_read(fifo_mem +
-				  ((fifo->capabilities &
-				    SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
-				   SVGA_FIFO_3D_HWVERSION_REVISED :
-				   SVGA_FIFO_3D_HWVERSION));
+	hwversion = vmw_fifo_mem_read(dev_priv,
+				      ((fifo->capabilities &
+					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+					       SVGA_FIFO_3D_HWVERSION_REVISED :
+					       SVGA_FIFO_3D_HWVERSION));
 
 	if (hwversion == 0)
 		return false;
@@ -87,13 +86,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 
 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 {
-	u32  *fifo_mem = dev_priv->mmio_virt;
 	uint32_t caps;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 		return false;
 
-	caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+	caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
 	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
 		return true;
 
@@ -102,7 +100,6 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	u32  *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
 
@@ -129,6 +126,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
 	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
 		  SVGA_REG_ENABLE_HIDE);
+
 	vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 
 	min = 4;
@@ -139,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 	if (min < PAGE_SIZE)
 		min = PAGE_SIZE;
 
-	vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
-	vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
 	wmb();
-	vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
-	vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
-	vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
 	mb();
 
 	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
 
-	max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
-	min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
-	fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+	fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
 
 	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
 		 (unsigned int) max,
@@ -159,15 +157,14 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 		 (unsigned int) fifo->capabilities);
 
 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
-	vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
-	vmw_marker_queue_init(&fifo->marker_queue);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
 
 	return 0;
 }
 
 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
-	u32 *fifo_mem = dev_priv->mmio_virt;
+	u32 *fifo_mem = dev_priv->fifo_mem;
 
 	if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
 		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
@@ -175,13 +172,11 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	u32  *fifo_mem = dev_priv->mmio_virt;
-
 	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
 		;
 
-	dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+	dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
 
 	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
 		  dev_priv->config_done_state);
@@ -190,8 +185,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 	vmw_write(dev_priv, SVGA_REG_TRACES,
 		  dev_priv->traces_state);
 
-	vmw_marker_queue_takedown(&fifo->marker_queue);
-
 	if (likely(fifo->static_buffer != NULL)) {
 		vfree(fifo->static_buffer);
 		fifo->static_buffer = NULL;
@@ -205,11 +198,10 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 {
-	u32  *fifo_mem = dev_priv->mmio_virt;
-	uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
-	uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
-	uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
-	uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+	uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
 
 	return ((max - next_cmd) + (stop - min) <= bytes);
 }
@@ -298,7 +290,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 				    uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	u32  *fifo_mem = dev_priv->mmio_virt;
+	u32  *fifo_mem = dev_priv->fifo_mem;
 	uint32_t max;
 	uint32_t min;
 	uint32_t next_cmd;
@@ -306,9 +298,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 	int ret;
 
 	mutex_lock(&fifo_state->fifo_mutex);
-	max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
-	min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
-	next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+	next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
 
 	if (unlikely(bytes >= (max - min)))
 		goto out_err;
@@ -319,7 +311,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 	fifo_state->reserved_size = bytes;
 
 	while (1) {
-		uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+		uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
 		bool need_bounce = false;
 		bool reserve_in_place = false;
 
@@ -353,8 +345,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 				fifo_state->using_bounce_buffer = false;
 
 				if (reserveable)
-					vmw_mmio_write(bytes, fifo_mem +
-						       SVGA_FIFO_RESERVED);
+					vmw_fifo_mem_write(dev_priv,
+							   SVGA_FIFO_RESERVED,
+							   bytes);
 				return (void __force *) (fifo_mem +
 							 (next_cmd >> 2));
 			} else {
@@ -381,7 +374,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 	return NULL;
 }
 
-void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
 			  int ctx_id)
 {
 	void *ret;
@@ -402,10 +395,11 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
 }
 
 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
-			      u32  *fifo_mem,
+			      struct vmw_private *vmw,
 			      uint32_t next_cmd,
 			      uint32_t max, uint32_t min, uint32_t bytes)
 {
+	u32 *fifo_mem = vmw->fifo_mem;
 	uint32_t chunk_size = max - next_cmd;
 	uint32_t rest;
 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
@@ -414,7 +408,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
 	if (bytes < chunk_size)
 		chunk_size = bytes;
 
-	vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+	vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
 	mb();
 	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
 	rest = bytes - chunk_size;
@@ -423,7 +417,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
 }
 
 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
-			       u32  *fifo_mem,
+			       struct vmw_private *vmw,
 			       uint32_t next_cmd,
 			       uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -431,12 +425,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
 	while (bytes > 0) {
-		vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
+		vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
 		next_cmd += sizeof(uint32_t);
 		if (unlikely(next_cmd == max))
 			next_cmd = min;
 		mb();
-		vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+		vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
 		mb();
 		bytes -= sizeof(uint32_t);
 	}
@@ -445,10 +439,9 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	u32  *fifo_mem = dev_priv->mmio_virt;
-	uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
-	uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
-	uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
 	if (fifo_state->dx)
@@ -462,10 +455,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 
 	if (fifo_state->using_bounce_buffer) {
 		if (reserveable)
-			vmw_fifo_res_copy(fifo_state, fifo_mem,
+			vmw_fifo_res_copy(fifo_state, dev_priv,
 					  next_cmd, max, min, bytes);
 		else
-			vmw_fifo_slow_copy(fifo_state, fifo_mem,
+			vmw_fifo_slow_copy(fifo_state, dev_priv,
 					   next_cmd, max, min, bytes);
 
 		if (fifo_state->dynamic_buffer) {
@@ -481,18 +474,18 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 		if (next_cmd >= max)
 			next_cmd -= max - min;
 		mb();
-		vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
 	}
 
 	if (reserveable)
-		vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
+		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
 	mb();
 	up_write(&fifo_state->rwsem);
 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 	mutex_unlock(&fifo_state->fifo_mutex);
 }
 
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
 	if (dev_priv->cman)
 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
@@ -507,7 +500,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
  * @dev_priv: Pointer to device private structure.
  * @bytes: Number of bytes to commit.
  */
-void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
 {
 	if (dev_priv->cman)
 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
@@ -522,7 +515,7 @@ void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
  * @dev_priv: Pointer to device private structure.
  * @interruptible: Whether to wait interruptible if function needs to sleep.
  */
-int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
 {
 	might_sleep();
 
@@ -532,7 +525,7 @@ int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
 		return 0;
 }
 
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 	struct svga_fifo_cmd_fence *cmd_fence;
@@ -540,7 +533,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 	int ret = 0;
 	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 
-	fm = VMW_FIFO_RESERVE(dev_priv, bytes);
+	fm = VMW_CMD_RESERVE(dev_priv, bytes);
 	if (unlikely(fm == NULL)) {
 		*seqno = atomic_read(&dev_priv->marker_seq);
 		ret = -ENOMEM;
@@ -560,15 +553,14 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 		 * waiting code in vmwgfx_irq.c will emulate this.
 		 */
 
-		vmw_fifo_commit(dev_priv, 0);
+		vmw_cmd_commit(dev_priv, 0);
 		return 0;
 	}
 
 	*fm++ = SVGA_CMD_FENCE;
 	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
 	cmd_fence->fence = *seqno;
-	vmw_fifo_commit_flush(dev_priv, bytes);
-	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+	vmw_cmd_commit_flush(dev_priv, bytes);
 	vmw_update_seqno(dev_priv, fifo_state);
 
 out_err:
@@ -599,7 +591,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
 		SVGA3dCmdWaitForQuery body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -616,7 +608,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
 		cmd->body.guestResult.offset = 0;
 	}
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -645,7 +637,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
 		SVGA3dCmdWaitForGBQuery body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -657,7 +649,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
 	cmd->body.mobid = bo->mem.start;
 	cmd->body.offset = 0;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -681,7 +673,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
  *
  * Returns -ENOMEM on failure to reserve fifo space.
  */
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
 			      uint32_t cid)
 {
 	if (dev_priv->has_mob)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9a9fe10d829b892c8407bbe755a84c9b70210c3b..45fbc41440f1e397587c4f352d23e4872bc91b40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -610,7 +610,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
 
 	/* Send a new fence in case one was removed */
 	if (send_fence) {
-		vmw_fifo_send_fence(man->dev_priv, &dummy);
+		vmw_cmd_send_fence(man->dev_priv, &dummy);
 		wake_up_all(&man->idle_queue);
 	}
 
@@ -1208,18 +1208,14 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
  *
  * @man: The command buffer manager.
  * @size: The size of the main space pool.
- * @default_size: The default size of the command buffer for small kernel
- * submissions.
  *
- * Set the size and allocate the main command buffer space pool,
- * as well as the default size of the command buffer for
- * small kernel submissions. If successful, this enables large command
- * submissions. Note that this function requires that rudimentary command
+ * Set the size and allocate the main command buffer space pool.
+ * If successful, this enables large command submissions.
+ * Note that this function requires that rudimentary command
  * submission is already available and that the MOB memory manager is alive.
  * Returns 0 on success. Negative error code on failure.
  */
-int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
-			     size_t size, size_t default_size)
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
 {
 	struct vmw_private *dev_priv = man->dev_priv;
 	bool dummy;
@@ -1230,7 +1226,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
 
 	/* First, try to allocate a huge chunk of DMA memory */
 	size = PAGE_ALIGN(size);
-	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+	man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
 				      &man->handle, GFP_KERNEL);
 	if (man->map) {
 		man->using_mob = false;
@@ -1313,7 +1309,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
 		2 : 1;
 	man->headers = dma_pool_create("vmwgfx cmdbuf",
-				       &dev_priv->dev->pdev->dev,
+				       dev_priv->drm.dev,
 				       sizeof(SVGACBHeader),
 				       64, PAGE_SIZE);
 	if (!man->headers) {
@@ -1322,7 +1318,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 	}
 
 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
-					&dev_priv->dev->pdev->dev,
+					dev_priv->drm.dev,
 					sizeof(struct vmw_cmdbuf_dheader),
 					64, PAGE_SIZE);
 	if (!man->dheaders) {
@@ -1387,7 +1383,7 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
 		ttm_bo_put(man->cmd_space);
 		man->cmd_space = NULL;
 	} else {
-		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+		dma_free_coherent(man->dev_priv->drm.dev,
 				  man->size, man->map, man->handle);
 	}
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 61c246335e66f608578df3167c74a62c318ff1a3..6f4d0da11ad877050d7709d93e0ad9e8e8a052a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -163,7 +163,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 	}
 
 	vmw_execbuf_release_pinned_bo(dev_priv);
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return;
 
@@ -171,7 +171,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	vmw_fifo_resource_dec(dev_priv);
 }
 
@@ -265,7 +265,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
 		return -ENOMEM;
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		vmw_resource_unreference(&res);
 		return -ENOMEM;
@@ -275,7 +275,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	vmw_fifo_resource_inc(dev_priv);
 	res->hw_destroy = vmw_hw_context_destroy;
 	return 0;
@@ -316,7 +316,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
 		goto out_no_fifo;
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		ret = -ENOMEM;
 		goto out_no_fifo;
@@ -325,7 +325,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
@@ -348,7 +348,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
 
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -358,7 +358,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
 	cmd->body.mobid = bo->mem.start;
 	cmd->body.validContents = res->backup_dirty;
 	res->backup_dirty = false;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -392,7 +392,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(cmd == NULL)) {
 		mutex_unlock(&dev_priv->binding_mutex);
 		return -ENOMEM;
@@ -411,7 +411,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 	cmd2->body.cid = res->id;
 	cmd2->body.mobid = SVGA3D_INVALID_ID;
 
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 	mutex_unlock(&dev_priv->binding_mutex);
 
 	/*
@@ -440,14 +440,14 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
 	if (likely(res->id == -1))
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	if (dev_priv->query_cid == res->id)
 		dev_priv->query_cid_valid = false;
 	vmw_resource_release_id(res);
@@ -483,7 +483,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
 		goto out_no_fifo;
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		ret = -ENOMEM;
 		goto out_no_fifo;
@@ -492,7 +492,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
 	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
@@ -515,7 +515,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
 
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -525,7 +525,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
 	cmd->body.mobid = bo->mem.start;
 	cmd->body.validContents = res->backup_dirty;
 	res->backup_dirty = false;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 
 	return 0;
@@ -608,7 +608,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(cmd == NULL)) {
 		mutex_unlock(&dev_priv->binding_mutex);
 		return -ENOMEM;
@@ -627,7 +627,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
 	cmd2->body.cid = res->id;
 	cmd2->body.mobid = SVGA3D_INVALID_ID;
 
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 	mutex_unlock(&dev_priv->binding_mutex);
 
 	/*
@@ -656,14 +656,14 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
 	if (likely(res->id == -1))
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
 	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	if (dev_priv->query_cid == res->id)
 		dev_priv->query_cid_valid = false;
 	vmw_resource_release_id(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 984d8884357d9088565d1ee1e506ebfc11ee42bf..ba658fa9cf6c6eb29c17978d4fa90d42336296f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -175,7 +175,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
 	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
 	dma_resv_assert_held(bo->base.resv);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (!cmd)
 		return -ENOMEM;
 
@@ -188,7 +188,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
 	cmd->body.mobid = bo->mem.start;
 	cmd->body.validSizeInBytes = vcotbl->size_read_back;
 
-	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
 	vcotbl->scrubbed = false;
 
 	return 0;
@@ -263,7 +263,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
 	if (readback)
 		submit_size += sizeof(*cmd0);
 
-	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (!cmd1)
 		return -ENOMEM;
 
@@ -283,7 +283,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
 	cmd1->body.type = vcotbl->type;
 	cmd1->body.mobid = SVGA3D_INVALID_ID;
 	cmd1->body.validSizeInBytes = 0;
-	vmw_fifo_commit_flush(dev_priv, submit_size);
+	vmw_cmd_commit_flush(dev_priv, submit_size);
 	vcotbl->scrubbed = true;
 
 	/* Trigger a create() on next validate. */
@@ -349,7 +349,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
 	struct vmw_fence_obj *fence;
 
 	if (!vcotbl->scrubbed) {
-		cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+		cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 		if (!cmd)
 			return -ENOMEM;
 
@@ -358,7 +358,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
 		cmd->body.cid = vcotbl->ctx->id;
 		cmd->body.type = vcotbl->type;
 		vcotbl->size_read_back = res->backup_size;
-		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+		vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	}
 
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
 	 * This should really be a TTM utility.
 	 */
-	for (i = 0; i < old_bo->num_pages; ++i) {
+	for (i = 0; i < old_bo->mem.num_pages; ++i) {
 		bool dummy;
 
 		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 216daf93022c6f4b57e56cf0ae9b318c2625ff8c..dd69b51c40e418a4c7ca90656bb42f61b41552e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,10 +32,10 @@
 #include <linux/mem_encrypt.h>
 
 #include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_sysfs.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "ttm_object.h"
@@ -43,8 +43,6 @@
 #include "vmwgfx_drv.h"
 
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
-#define VMWGFX_CHIP_SVGAII 0
-#define VMW_FB_RESERVATION 0
 
 #define VMW_MIN_INITIAL_WIDTH 800
 #define VMW_MIN_INITIAL_HEIGHT 600
@@ -253,8 +251,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 };
 
 static const struct pci_device_id vmw_pci_id_list[] = {
-	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
-	{0, 0, 0}
+	{ PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+	{ }
 };
 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 
@@ -425,8 +423,7 @@ static int vmw_request_device_late(struct vmw_private *dev_priv)
 	}
 
 	if (dev_priv->cman) {
-		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
-					       256*4096, 2*4096);
+		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
 		if (ret) {
 			struct vmw_cmdbuf_man *man = dev_priv->cman;
 
@@ -609,7 +606,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
  */
 static int vmw_dma_masks(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret = 0;
 
 	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
@@ -644,26 +641,84 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
 #endif
 }
 
-static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+static int vmw_setup_pci_resources(struct vmw_private *dev,
+				   unsigned long pci_id)
 {
-	struct vmw_private *dev_priv;
+	resource_size_t fifo_start;
+	resource_size_t fifo_size;
 	int ret;
+	struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
+
+	pci_set_master(pdev);
+
+	ret = pci_request_regions(pdev, "vmwgfx probe");
+	if (ret)
+		return ret;
+
+	dev->io_start = pci_resource_start(pdev, 0);
+	dev->vram_start = pci_resource_start(pdev, 1);
+	dev->vram_size = pci_resource_len(pdev, 1);
+	fifo_start = pci_resource_start(pdev, 2);
+	fifo_size = pci_resource_len(pdev, 2);
+
+	DRM_INFO("FIFO at %pa size is %llu kiB\n",
+		 &fifo_start, (uint64_t)fifo_size / 1024);
+	dev->fifo_mem = devm_memremap(dev->drm.dev,
+				      fifo_start,
+				      fifo_size,
+				      MEMREMAP_WB);
+
+	if (IS_ERR(dev->fifo_mem)) {
+		DRM_ERROR("Failed mapping FIFO memory.\n");
+		pci_release_regions(pdev);
+		return PTR_ERR(dev->fifo_mem);
+	}
+
+	/*
+	 * This is approximate size of the vram, the exact size will only
+	 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
+	 * size will be equal to or bigger than the size reported by
+	 * SVGA_REG_VRAM_SIZE.
+	 */
+	DRM_INFO("VRAM at %pa size is %llu kiB\n",
+		 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
+
+	return 0;
+}
+
+static int vmw_detect_version(struct vmw_private *dev)
+{
 	uint32_t svga_id;
+
+	vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+	svga_id = vmw_read(dev, SVGA_REG_ID);
+	if (svga_id != SVGA_ID_2) {
+		DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
+			  svga_id, dev->vmw_chipset);
+		return -ENOSYS;
+	}
+	return 0;
+}
+
+static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+{
+	int ret;
 	enum vmw_res_type i;
 	bool refuse_dma = false;
 	char host_log[100] = {0};
+	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 
-	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
-	if (unlikely(!dev_priv)) {
-		DRM_ERROR("Failed allocating a device private struct.\n");
-		return -ENOMEM;
-	}
+	dev_priv->vmw_chipset = pci_id;
+	dev_priv->last_read_seqno = (uint32_t) -100;
+	dev_priv->drm.dev_private = dev_priv;
 
-	pci_set_master(dev->pdev);
+	ret = vmw_setup_pci_resources(dev_priv, pci_id);
+	if (ret)
+		return ret;
+	ret = vmw_detect_version(dev_priv);
+	if (ret)
+		goto out_no_pci_or_version;
 
-	dev_priv->dev = dev;
-	dev_priv->vmw_chipset = chipset;
-	dev_priv->last_read_seqno = (uint32_t) -100;
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	mutex_init(&dev_priv->binding_mutex);
@@ -673,7 +728,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	spin_lock_init(&dev_priv->hw_lock);
 	spin_lock_init(&dev_priv->waiter_lock);
 	spin_lock_init(&dev_priv->cap_lock);
-	spin_lock_init(&dev_priv->svga_lock);
 	spin_lock_init(&dev_priv->cursor_lock);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -688,21 +742,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
 	dev_priv->used_memory_size = 0;
 
-	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
-	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
-	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
-
 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
 
 	dev_priv->enable_fb = enable_fbdev;
 
-	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
-	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
-	if (svga_id != SVGA_ID_2) {
-		ret = -ENOSYS;
-		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
-		goto out_err0;
-	}
 
 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 
@@ -720,7 +763,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	}
 
 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
-	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+	dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 
@@ -794,7 +837,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (unlikely(ret != 0))
 		goto out_err0;
 
-	dma_set_max_seg_size(dev->dev, U32_MAX);
+	dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
 
 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max GMR ids is %u\n",
@@ -804,21 +847,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 			 (unsigned)dev_priv->memory_size / 1024);
 	}
-	DRM_INFO("Maximum display memory size is %u kiB\n",
-		 dev_priv->prim_bb_mem / 1024);
-	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
-		 dev_priv->vram_start, dev_priv->vram_size / 1024);
-	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
-		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
-
-	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
-				       dev_priv->mmio_size, MEMREMAP_WB);
-
-	if (unlikely(dev_priv->mmio_virt == NULL)) {
-		ret = -ENOMEM;
-		DRM_ERROR("Failed mapping MMIO.\n");
-		goto out_err0;
-	}
+	DRM_INFO("Maximum display memory size is %llu kiB\n",
+		 (uint64_t)dev_priv->prim_bb_mem / 1024);
 
 	/* Need mmio memory to check for fifo pitchlock cap. */
 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
@@ -826,7 +856,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	    !vmw_fifo_have_pitchlock(dev_priv)) {
 		ret = -ENOSYS;
 		DRM_ERROR("Hardware has no pitchlock\n");
-		goto out_err4;
+		goto out_err0;
 	}
 
 	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
@@ -835,29 +865,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (unlikely(dev_priv->tdev == NULL)) {
 		DRM_ERROR("Unable to initialize TTM object management.\n");
 		ret = -ENOMEM;
-		goto out_err4;
-	}
-
-	dev->dev_private = dev_priv;
-
-	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
-	dev_priv->stealth = (ret != 0);
-	if (dev_priv->stealth) {
-		/**
-		 * Request at least the mmio PCI resource.
-		 */
-
-		DRM_INFO("It appears like vesafb is loaded. "
-			 "Ignore above error if any.\n");
-		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
-			goto out_no_device;
-		}
+		goto out_err0;
 	}
 
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-		ret = vmw_irq_install(dev, dev->pdev->irq);
+		ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
 		if (ret != 0) {
 			DRM_ERROR("Failed installing irq: %d\n", ret);
 			goto out_no_irq;
@@ -874,8 +886,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 				    DRM_FILE_PAGE_OFFSET_START,
 				    DRM_FILE_PAGE_OFFSET_SIZE);
 	ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
-				 dev_priv->dev->dev,
-				 dev->anon_inode->i_mapping,
+				 dev_priv->drm.dev,
+				 dev_priv->drm.anon_inode->i_mapping,
 				 &dev_priv->vma_manager,
 				 dev_priv->map_mode == vmw_dma_alloc_coherent,
 				 false);
@@ -955,7 +967,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (ret)
 		goto out_no_fifo;
 
-	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+	DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
 		 ? "yes." : "no.");
 	if (dev_priv->sm_type == VMW_SM_5)
 		DRM_INFO("SM5 support available.\n");
@@ -1000,29 +1012,24 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-		vmw_irq_uninstall(dev_priv->dev);
+		vmw_irq_uninstall(&dev_priv->drm);
 out_no_irq:
-	if (dev_priv->stealth)
-		pci_release_region(dev->pdev, 2);
-	else
-		pci_release_regions(dev->pdev);
-out_no_device:
 	ttm_object_device_release(&dev_priv->tdev);
-out_err4:
-	memunmap(dev_priv->mmio_virt);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
 
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-	kfree(dev_priv);
+out_no_pci_or_version:
+	pci_release_regions(pdev);
 	return ret;
 }
 
 static void vmw_driver_unload(struct drm_device *dev)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	enum vmw_res_type i;
 
 	unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1052,21 +1059,16 @@ static void vmw_driver_unload(struct drm_device *dev)
 	vmw_release_device_late(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-		vmw_irq_uninstall(dev_priv->dev);
-	if (dev_priv->stealth)
-		pci_release_region(dev->pdev, 2);
-	else
-		pci_release_regions(dev->pdev);
+		vmw_irq_uninstall(&dev_priv->drm);
 
 	ttm_object_device_release(&dev_priv->tdev);
-	memunmap(dev_priv->mmio_virt);
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
 
-	kfree(dev_priv);
+	pci_release_regions(pdev);
 }
 
 static void vmw_postclose(struct drm_device *dev,
@@ -1190,12 +1192,10 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
 {
 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
 
-	spin_lock(&dev_priv->svga_lock);
 	if (!ttm_resource_manager_used(man)) {
 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
 		ttm_resource_manager_set_used(man, true);
 	}
-	spin_unlock(&dev_priv->svga_lock);
 }
 
 /**
@@ -1221,14 +1221,12 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
 {
 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
 
-	spin_lock(&dev_priv->svga_lock);
 	if (ttm_resource_manager_used(man)) {
 		ttm_resource_manager_set_used(man, false);
 		vmw_write(dev_priv, SVGA_REG_ENABLE,
 			  SVGA_REG_ENABLE_HIDE |
 			  SVGA_REG_ENABLE_ENABLE);
 	}
-	spin_unlock(&dev_priv->svga_lock);
 }
 
 /**
@@ -1253,19 +1251,16 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
 	 * to be inconsistent with the device, causing modesetting problems.
 	 *
 	 */
-	vmw_kms_lost_device(dev_priv->dev);
+	vmw_kms_lost_device(&dev_priv->drm);
 	ttm_write_lock(&dev_priv->reservation_sem, false);
-	spin_lock(&dev_priv->svga_lock);
 	if (ttm_resource_manager_used(man)) {
-		ttm_resource_manager_set_used(man, false);
-		spin_unlock(&dev_priv->svga_lock);
 		if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
 			DRM_ERROR("Failed evicting VRAM buffers.\n");
+		ttm_resource_manager_set_used(man, false);
 		vmw_write(dev_priv, SVGA_REG_ENABLE,
 			  SVGA_REG_ENABLE_HIDE |
 			  SVGA_REG_ENABLE_ENABLE);
-	} else
-		spin_unlock(&dev_priv->svga_lock);
+	}
 	ttm_write_unlock(&dev_priv->reservation_sem);
 }
 
@@ -1275,8 +1270,6 @@ static void vmw_remove(struct pci_dev *pdev)
 
 	drm_dev_unregister(dev);
 	vmw_driver_unload(dev);
-	drm_dev_put(dev);
-	pci_disable_device(pdev);
 }
 
 static unsigned long
@@ -1377,7 +1370,7 @@ static int vmw_pm_freeze(struct device *kdev)
 	 * No user-space processes should be running now.
 	 */
 	ttm_suspend_unlock(&dev_priv->reservation_sem);
-	ret = vmw_kms_suspend(dev_priv->dev);
+	ret = vmw_kms_suspend(&dev_priv->drm);
 	if (ret) {
 		ttm_suspend_lock(&dev_priv->reservation_sem);
 		DRM_ERROR("Failed to freeze modesetting.\n");
@@ -1409,7 +1402,7 @@ static int vmw_pm_freeze(struct device *kdev)
 
 	vmw_fence_fifo_down(dev_priv->fman);
 	__vmw_svga_disable(dev_priv);
-	
+
 	vmw_release_device_late(dev_priv);
 	return 0;
 }
@@ -1438,7 +1431,7 @@ static int vmw_pm_restore(struct device *kdev)
 	dev_priv->suspend_locked = false;
 	ttm_suspend_unlock(&dev_priv->reservation_sem);
 	if (dev_priv->suspend_state)
-		vmw_kms_resume(dev_priv->dev);
+		vmw_kms_resume(&dev_priv->drm);
 
 	if (dev_priv->enable_fb)
 		vmw_fb_on(dev_priv);
@@ -1507,39 +1500,36 @@ static struct pci_driver vmw_pci_driver = {
 
 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	struct drm_device *dev;
+	struct vmw_private *vmw;
 	int ret;
 
-	ret = pci_enable_device(pdev);
+	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
 	if (ret)
 		return ret;
 
-	dev = drm_dev_alloc(&driver, &pdev->dev);
-	if (IS_ERR(dev)) {
-		ret = PTR_ERR(dev);
-		goto err_pci_disable_device;
-	}
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
 
-	dev->pdev = pdev;
-	pci_set_drvdata(pdev, dev);
+	vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
+				 struct vmw_private, drm);
+	if (IS_ERR(vmw))
+		return PTR_ERR(vmw);
 
-	ret = vmw_driver_load(dev, ent->driver_data);
-	if (ret)
-		goto err_drm_dev_put;
+	vmw->drm.pdev = pdev;
+	pci_set_drvdata(pdev, &vmw->drm);
 
-	ret = drm_dev_register(dev, ent->driver_data);
+	ret = vmw_driver_load(vmw, ent->device);
 	if (ret)
-		goto err_vmw_driver_unload;
+		return ret;
 
-	return 0;
+	ret = drm_dev_register(&vmw->drm, 0);
+	if (ret) {
+		vmw_driver_unload(&vmw->drm);
+		return ret;
+	}
 
-err_vmw_driver_unload:
-	vmw_driver_unload(dev);
-err_drm_dev_put:
-	drm_dev_put(dev);
-err_pci_disable_device:
-	pci_disable_device(pdev);
-	return ret;
+	return 0;
 }
 
 static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b45becbb00f8e2e1b8148379eb7089d74c30e8da..5fa5bcd20cc5c1abf7a30fa7cc8f05bd29d5549d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,7 +39,6 @@
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
 
 #include "ttm_lock.h"
 #include "ttm_object.h"
@@ -67,6 +66,8 @@
 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
 
+#define VMWGFX_PCI_ID_SVGA2              0x0405
+
 /*
  * Perhaps we should have sysfs entries for these.
  */
@@ -275,13 +276,6 @@ struct vmw_surface {
 	struct list_head view_list;
 };
 
-struct vmw_marker_queue {
-	struct list_head head;
-	u64 lag;
-	u64 lag_time;
-	spinlock_t lock;
-};
-
 struct vmw_fifo_state {
 	unsigned long reserved_size;
 	u32 *dynamic_buffer;
@@ -291,7 +285,6 @@ struct vmw_fifo_state {
 	uint32_t capabilities;
 	struct mutex fifo_mutex;
 	struct rw_semaphore rwsem;
-	struct vmw_marker_queue marker_queue;
 	bool dx;
 };
 
@@ -490,19 +483,19 @@ enum vmw_sm_type {
 };
 
 struct vmw_private {
+	struct drm_device drm;
 	struct ttm_bo_device bdev;
 
 	struct vmw_fifo_state fifo;
 
-	struct drm_device *dev;
 	struct drm_vma_offset_manager vma_manager;
-	unsigned long vmw_chipset;
-	unsigned int io_start;
-	uint32_t vram_start;
-	uint32_t vram_size;
-	uint32_t prim_bb_mem;
-	uint32_t mmio_start;
-	uint32_t mmio_size;
+	u32 vmw_chipset;
+	resource_size_t io_start;
+	resource_size_t vram_start;
+	resource_size_t vram_size;
+	resource_size_t prim_bb_mem;
+	u32 *fifo_mem;
+	resource_size_t fifo_mem_size;
 	uint32_t fb_max_width;
 	uint32_t fb_max_height;
 	uint32_t texture_max_width;
@@ -511,7 +504,6 @@ struct vmw_private {
 	uint32_t stdu_max_height;
 	uint32_t initial_width;
 	uint32_t initial_height;
-	u32 *mmio_virt;
 	uint32_t capabilities;
 	uint32_t capabilities2;
 	uint32_t max_gmr_ids;
@@ -591,13 +583,7 @@ struct vmw_private {
 	struct mutex cmdbuf_mutex;
 	struct mutex binding_mutex;
 
-	/**
-	 * Operating mode.
-	 */
-
-	bool stealth;
 	bool enable_fb;
-	spinlock_t svga_lock;
 
 	/**
 	 * PM management.
@@ -967,30 +953,29 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
 extern void vmw_fifo_release(struct vmw_private *dev_priv,
 			     struct vmw_fifo_state *fifo);
 extern void *
-vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
-extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
-extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
-extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
-			       uint32_t *seqno);
+vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
+extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno);
+extern bool vmw_supports_3d(struct vmw_private *dev_priv);
 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
-extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
-extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
-				     uint32_t cid);
-extern int vmw_fifo_flush(struct vmw_private *dev_priv,
-			  bool interruptible);
+extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
+				    uint32_t cid);
+extern int vmw_cmd_flush(struct vmw_private *dev_priv,
+			 bool interruptible);
 
-#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id)                        \
+#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id)                        \
 ({                                                                            \
-	vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({                 \
+	vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({                 \
 		DRM_ERROR("FIFO reserve failed at %s for %u bytes\n",         \
 			  __func__, (unsigned int) __bytes);                  \
 		NULL;                                                         \
 	});                                                                   \
 })
 
-#define VMW_FIFO_RESERVE(__priv, __bytes)                                     \
-	VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+#define VMW_CMD_RESERVE(__priv, __bytes)                                     \
+	VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
 
 /**
  * TTM glue - vmwgfx_ttm_glue.c
@@ -1125,19 +1110,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
 				      u32 flag, int *waiter_count);
 
-/**
- * Rudimentary fence-like objects currently used only for throttling -
- * vmwgfx_marker.c
- */
-
-extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
-extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
-extern int vmw_marker_push(struct vmw_marker_queue *queue,
-			   uint32_t seqno);
-extern int vmw_marker_pull(struct vmw_marker_queue *queue,
-			   uint32_t signaled_seqno);
-extern int vmw_wait_lag(struct vmw_private *dev_priv,
-			struct vmw_marker_queue *queue, uint32_t us);
 
 /**
  * Kernel framebuffer - vmwgfx_fb.c
@@ -1411,8 +1383,7 @@ struct vmw_cmdbuf_header;
 
 extern struct vmw_cmdbuf_man *
 vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
-extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
-				    size_t size, size_t default_size);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size);
 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
@@ -1581,28 +1552,29 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
 }
 
 /**
- * vmw_mmio_read - Perform a MMIO read from volatile memory
+ * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
  *
- * @addr: The address to read from
+ * @fifo_reg: The fifo register to read from
  *
  * This function is intended to be equivalent to ioread32() on
  * memremap'd memory, but without byteswapping.
  */
-static inline u32 vmw_mmio_read(u32 *addr)
+static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
 {
-	return READ_ONCE(*addr);
+	return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
 }
 
 /**
- * vmw_mmio_write - Perform a MMIO write to volatile memory
+ * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
  *
- * @addr: The address to write to
+ * @addr: The fifo register to write to
  *
  * This function is intended to be equivalent to iowrite32 on
  * memremap'd memory, but without byteswapping.
  */
-static inline void vmw_mmio_write(u32 value, u32 *addr)
+static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
+				      u32 value)
 {
-	WRITE_ONCE(*addr, value);
+	WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
 }
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e67e2e8f6e6fa7299c7f6057a9955ea818c5c6a5..462f1732070859a06226d66dfe7b1e3e0776dcb7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -724,7 +724,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
 	if (cmd == NULL)
 		return -ENOMEM;
 
@@ -732,7 +732,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = ctx_res->id;
 	cmd->body.mobid = dx_query_mob->base.mem.start;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 
@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-		if (unlikely(new_query_bo->base.num_pages > 4)) {
+		if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
 			VMW_DEBUG_USER("Query buffer too large.\n");
 			return -EINVAL;
 		}
@@ -1100,7 +1100,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 		BUG_ON(!ctx_entry->valid);
 		ctx = ctx_entry->res;
 
-		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
 
 		if (unlikely(ret != 0))
 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 		return ret;
 
 	/* Make sure DMA doesn't cross BO boundaries. */
-	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+	bo_size = vmw_bo->base.base.size;
 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
 		VMW_DEBUG_USER("Invalid DMA offset.\n");
 		return -EINVAL;
@@ -3762,7 +3762,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 	/* p_handle implies file_priv. */
 	BUG_ON(p_handle != NULL && file_priv == NULL);
 
-	ret = vmw_fifo_send_fence(dev_priv, &sequence);
+	ret = vmw_cmd_send_fence(dev_priv, &sequence);
 	if (unlikely(ret != 0)) {
 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
 		synced = true;
@@ -3876,10 +3876,10 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
 	void *cmd;
 
 	if (sw_context->dx_ctx_node)
-		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
+		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
 					  sw_context->dx_ctx_node->ctx->id);
 	else
-		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
 
 	if (!cmd)
 		return -ENOMEM;
@@ -3888,7 +3888,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
 	memcpy(cmd, kernel_commands, command_size);
 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
 	vmw_resource_relocations_free(&sw_context->res_relocations);
-	vmw_fifo_commit(dev_priv, command_size);
+	vmw_cmd_commit(dev_priv, command_size);
 
 	return 0;
 }
@@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	}
 
 	if (throttle_us) {
-		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-				   throttle_us);
-
-		if (ret)
-			goto out_free_fence_fd;
+		VMW_DEBUG_USER("Throttling is no longer supported.\n");
 	}
 
 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
@@ -4329,7 +4325,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 
 	if (dev_priv->query_cid_valid) {
 		BUG_ON(fence != NULL);
-		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
 		if (ret)
 			goto out_no_emit;
 		dev_priv->query_cid_valid = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4d60201037d150c211d610ea41ddd573ada98185..33f07abfc3ae8e0138ee81aa0b3e750bb37c980e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -258,7 +258,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
 	if (w && h) {
 		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
 						       &clip, 1));
-		vmw_fifo_flush(vmw_priv, false);
+		vmw_cmd_flush(vmw_priv, false);
 	}
 out_unlock:
 	mutex_unlock(&par->bo_mutex);
@@ -481,7 +481,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
 			DRM_ERROR("Could not unset a mode.\n");
 			return ret;
 		}
-		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+		drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
 		par->set_mode = NULL;
 	}
 
@@ -567,7 +567,7 @@ static int vmw_fb_set_par(struct fb_info *info)
 	struct drm_display_mode *mode;
 	int ret;
 
-	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+	mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
 	if (!mode) {
 		DRM_ERROR("Could not create new fb mode.\n");
 		return -ENOMEM;
@@ -581,7 +581,7 @@ static int vmw_fb_set_par(struct fb_info *info)
 					mode->hdisplay *
 					DIV_ROUND_UP(var->bits_per_pixel, 8),
 					mode->vdisplay)) {
-		drm_mode_destroy(vmw_priv->dev, mode);
+		drm_mode_destroy(&vmw_priv->drm, mode);
 		return -EINVAL;
 	}
 
@@ -615,7 +615,7 @@ static int vmw_fb_set_par(struct fb_info *info)
 
 out_unlock:
 	if (par->set_mode)
-		drm_mode_destroy(vmw_priv->dev, par->set_mode);
+		drm_mode_destroy(&vmw_priv->drm, par->set_mode);
 	par->set_mode = mode;
 
 	mutex_unlock(&par->bo_mutex);
@@ -638,7 +638,7 @@ static const struct fb_ops vmw_fb_ops = {
 
 int vmw_fb_init(struct vmw_private *vmw_priv)
 {
-	struct device *device = &vmw_priv->dev->pdev->dev;
+	struct device *device = vmw_priv->drm.dev;
 	struct vmw_fb_par *par;
 	struct fb_info *info;
 	unsigned fb_width, fb_height;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 0f8d293971576a4f6d2faf78c6e24e8825457a58..378ec7600154b1d4e0afd59d189c342fb636ba93 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -141,8 +141,7 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_private *dev_priv = fman->dev_priv;
 
-	u32 *fifo_mem = dev_priv->mmio_virt;
-	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+	u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 		return false;
 
@@ -401,14 +400,12 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 				      u32 passed_seqno)
 {
 	u32 goal_seqno;
-	u32 *fifo_mem;
 	struct vmw_fence_obj *fence;
 
 	if (likely(!fman->seqno_valid))
 		return false;
 
-	fifo_mem = fman->dev_priv->mmio_virt;
-	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
 		return false;
 
@@ -416,8 +413,9 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 	list_for_each_entry(fence, &fman->fence_list, head) {
 		if (!list_empty(&fence->seq_passed_actions)) {
 			fman->seqno_valid = true;
-			vmw_mmio_write(fence->base.seqno,
-				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
+			vmw_fifo_mem_write(fman->dev_priv,
+					   SVGA_FIFO_FENCE_GOAL,
+					   fence->base.seqno);
 			break;
 		}
 	}
@@ -445,18 +443,17 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 {
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	u32 goal_seqno;
-	u32 *fifo_mem;
 
 	if (dma_fence_is_signaled_locked(&fence->base))
 		return false;
 
-	fifo_mem = fman->dev_priv->mmio_virt;
-	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
 	if (likely(fman->seqno_valid &&
 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
 		return false;
 
-	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
+			   fence->base.seqno);
 	fman->seqno_valid = true;
 
 	return true;
@@ -468,9 +465,8 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
 	struct list_head action_list;
 	bool needs_rerun;
 	uint32_t seqno, new_seqno;
-	u32 *fifo_mem = fman->dev_priv->mmio_virt;
 
-	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+	seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
 rerun:
 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -492,7 +488,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
 
 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 	if (unlikely(needs_rerun)) {
-		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+		new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
 		if (new_seqno != seqno) {
 			seqno = new_seqno;
 			goto rerun;
@@ -1033,7 +1029,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
 	eaction->action.type = VMW_ACTION_EVENT;
 
 	eaction->fence = vmw_fence_obj_reference(fence);
-	eaction->dev = fman->dev_priv->dev;
+	eaction->dev = &fman->dev_priv->drm;
 	eaction->tv_sec = tv_sec;
 	eaction->tv_usec = tv_usec;
 
@@ -1055,7 +1051,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
 {
 	struct vmw_event_fence_pending *event;
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
-	struct drm_device *dev = fman->dev_priv->dev;
+	struct drm_device *dev = &fman->dev_priv->drm;
 	int ret;
 
 	event = kzalloc(sizeof(*event), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 83c0d5a3e4fd74f1d4b5bbdef0a2d76a636fcaa2..964ddf1ca57a50ffa1bb08cd1cf30039d1e868d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
 	uint32_t cmd_size = define_size + remap_size;
 	uint32_t i;
 
-	cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+	cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -98,7 +98,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
 
 	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
 
-	vmw_fifo_commit(dev_priv, cmd_size);
+	vmw_cmd_commit(dev_priv, cmd_size);
 
 	return 0;
 }
@@ -110,7 +110,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
 	uint32_t define_size = sizeof(define_cmd) + 4;
 	uint32_t *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, define_size);
 	if (unlikely(cmd == NULL))
 		return;
 
@@ -120,7 +120,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
 
-	vmw_fifo_commit(dev_priv, define_size);
+	vmw_cmd_commit(dev_priv, define_size);
 }
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index be325a62c1781c17329fd6fbccafb12b859999e1..1774960d1b89daa6ec473b16b1ce1c2e6e93637e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -29,7 +29,6 @@
  */
 
 #include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <linux/idr.h>
@@ -65,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
 	spin_lock(&gman->lock);
 
 	if (gman->max_gmr_pages > 0) {
-		gman->used_gmr_pages += bo->num_pages;
+		gman->used_gmr_pages += mem->num_pages;
 		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 			goto nospace;
 	}
 
 	mem->mm_node = gman;
 	mem->start = id;
-	mem->num_pages = bo->num_pages;
 
 	spin_unlock(&gman->lock);
 	return 0;
 
 nospace:
-	gman->used_gmr_pages -= bo->num_pages;
+	gman->used_gmr_pages -= mem->num_pages;
 	spin_unlock(&gman->lock);
 	ida_free(&gman->gmr_ida, id);
 	return -ENOSPC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index f681b7b4df1b87f996f46a5c488f7c3279e641ee..80af8772b8c248c164dd55eef40c08c84a5d334d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -51,7 +51,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		param->value = vmw_overlay_num_free_overlays(dev_priv);
 		break;
 	case DRM_VMW_PARAM_3D:
-		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+		param->value = vmw_supports_3d(dev_priv) ? 1 : 0;
 		break;
 	case DRM_VMW_PARAM_HW_CAPS:
 		param->value = dev_priv->capabilities;
@@ -67,7 +67,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		break;
 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 	{
-		u32 *fifo_mem = dev_priv->mmio_virt;
 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -76,11 +75,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		}
 
 		param->value =
-			vmw_mmio_read(fifo_mem +
-				      ((fifo->capabilities &
-					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
-				       SVGA_FIFO_3D_HWVERSION_REVISED :
-				       SVGA_FIFO_3D_HWVERSION));
+			vmw_fifo_mem_read(dev_priv,
+					  ((fifo->capabilities &
+					    SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+						   SVGA_FIFO_3D_HWVERSION_REVISED :
+						   SVGA_FIFO_3D_HWVERSION));
 		break;
 	}
 	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
@@ -235,7 +234,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 		if (unlikely(ret != 0))
 			goto out_err;
 	} else {
-		fifo_mem = dev_priv->mmio_virt;
+		fifo_mem = dev_priv->fifo_mem;
 		memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 75f3efee21a42f8f5f4a32f5643e644ddef22d98..6c2a569f1fcb187c63bf80aa73a30cda8ee6e0fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -117,12 +117,10 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 void vmw_update_seqno(struct vmw_private *dev_priv,
 			 struct vmw_fifo_state *fifo_state)
 {
-	u32 *fifo_mem = dev_priv->mmio_virt;
-	uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+	uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
 
 	if (dev_priv->last_read_seqno != seqno) {
 		dev_priv->last_read_seqno = seqno;
-		vmw_marker_pull(&fifo_state->marker_queue, seqno);
 		vmw_fences_update(dev_priv->fman);
 	}
 }
@@ -222,11 +220,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 		}
 	}
 	finish_wait(&dev_priv->fence_queue, &__wait);
-	if (ret == 0 && fifo_idle) {
-		u32 *fifo_mem = dev_priv->mmio_virt;
+	if (ret == 0 && fifo_idle)
+		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
 
-		vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
-	}
 	wake_up_all(&dev_priv->fence_queue);
 out_err:
 	if (fifo_idle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bc67f2b930e1f17e8e4356f09385b980133d02b8..9a89f658e501cae9aad927d7b878c1854a5c1f02 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -36,9 +36,6 @@
 
 #include "vmwgfx_kms.h"
 
-/* Might need a hrtimer here? */
-#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
 void vmw_du_cleanup(struct vmw_display_unit *du)
 {
 	drm_plane_cleanup(&du->primary);
@@ -68,7 +65,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
 	if (!image)
 		return -EINVAL;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -83,7 +80,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
 	cmd->cursor.hotspotX = hotspotX;
 	cmd->cursor.hotspotY = hotspotY;
 
-	vmw_fifo_commit_flush(dev_priv, cmd_size);
+	vmw_cmd_commit_flush(dev_priv, cmd_size);
 
 	return 0;
 }
@@ -128,15 +125,14 @@ static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 				       bool show, int x, int y)
 {
-	u32 *fifo_mem = dev_priv->mmio_virt;
 	uint32_t count;
 
 	spin_lock(&dev_priv->cursor_lock);
-	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
-	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
-	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
-	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
-	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+	count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
 	spin_unlock(&dev_priv->cursor_lock);
 }
 
@@ -236,7 +232,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
  */
 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct vmw_display_unit *du;
 	struct drm_crtc *crtc;
 
@@ -252,7 +248,7 @@ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 
 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct vmw_display_unit *du;
 	struct drm_crtc *crtc;
 
@@ -891,7 +887,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 					   bool is_bo_proxy)
 
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct vmw_framebuffer_surface *vfbs;
 	enum SVGA3dSurfaceFormat format;
 	int ret;
@@ -1003,11 +999,11 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
 	struct drm_clip_rect norect;
 	int ret, increment = 1;
 
-	drm_modeset_lock_all(dev_priv->dev);
+	drm_modeset_lock_all(&dev_priv->drm);
 
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0)) {
-		drm_modeset_unlock_all(dev_priv->dev);
+		drm_modeset_unlock_all(&dev_priv->drm);
 		return ret;
 	}
 
@@ -1033,10 +1029,10 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
 		break;
 	}
 
-	vmw_fifo_flush(dev_priv, false);
+	vmw_cmd_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
-	drm_modeset_unlock_all(dev_priv->dev);
+	drm_modeset_unlock_all(&dev_priv->drm);
 
 	return ret;
 }
@@ -1213,14 +1209,14 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
 				      *mode_cmd)
 
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct vmw_framebuffer_bo *vfbd;
 	unsigned int requested_size;
 	struct drm_format_name_buf format_name;
 	int ret;
 
 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
-	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+	if (unlikely(requested_size > bo->base.base.size)) {
 		DRM_ERROR("Screen buffer object size is too small "
 			  "for requested mode.\n");
 		return -EINVAL;
@@ -1319,7 +1315,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
 	    bo && only_2d &&
 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
 	    dev_priv->active_display_unit == vmw_du_screen_target) {
-		ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
 					  bo, &surface);
 		if (ret)
 			return ERR_PTR(ret);
@@ -1768,7 +1764,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
 	if (ret)
 		return ret;
 
-	vmw_fifo_flush(dev_priv, false);
+	vmw_cmd_flush(dev_priv, false);
 
 	return 0;
 }
@@ -1780,7 +1776,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
 		return;
 
 	dev_priv->hotplug_mode_update_property =
-		drm_property_create_range(dev_priv->dev,
+		drm_property_create_range(&dev_priv->drm,
 					  DRM_MODE_PROP_IMMUTABLE,
 					  "hotplug_mode_update", 0, 1);
 
@@ -1791,7 +1787,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
 
 int vmw_kms_init(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	drm_mode_config_init(dev);
@@ -1823,7 +1819,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
 	 * but since it destroys encoders and our destructor calls
 	 * drm_encoder_cleanup which takes the lock we deadlock.
 	 */
-	drm_mode_config_cleanup(dev_priv->dev);
+	drm_mode_config_cleanup(&dev_priv->drm);
 	if (dev_priv->active_display_unit == vmw_du_legacy)
 		ret = vmw_kms_ldu_close_display(dev_priv);
 
@@ -1876,11 +1872,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
 	else if (vmw_fifo_have_pitchlock(vmw_priv))
-		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
-			       SVGA_FIFO_PITCHLOCK);
+		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
-	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
+		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
 
 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
@@ -1934,7 +1930,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc)
 static int vmw_du_update_layout(struct vmw_private *dev_priv,
 				unsigned int num_rects, struct drm_rect *rects)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct vmw_display_unit *du;
 	struct drm_connector *con;
 	struct drm_connector_list_iter conn_iter;
@@ -2366,7 +2362,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
 	if (dirty->crtc) {
 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
 	} else {
-		list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
 				    head) {
 			struct drm_plane *plane = crtc->primary;
 
@@ -2386,7 +2382,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
 
 		dirty->unit = unit;
 		if (dirty->fifo_reserve_size > 0) {
-			dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
+			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
 						      dirty->fifo_reserve_size);
 			if (!dirty->cmd)
 				return -ENOMEM;
@@ -2520,7 +2516,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
 	if (!clips)
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
 	if (!cmd)
 		return -ENOMEM;
 
@@ -2549,7 +2545,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
 		copy_size += sizeof(*cmd);
 	}
 
-	vmw_fifo_commit(dev_priv, copy_size);
+	vmw_cmd_commit(dev_priv, copy_size);
 
 	return 0;
 }
@@ -2568,8 +2564,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 	int i = 0;
 	int ret = 0;
 
-	mutex_lock(&dev_priv->dev->mode_config.mutex);
-	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+	mutex_lock(&dev_priv->drm.mode_config.mutex);
+	list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
 			    head) {
 		if (i == unit)
 			break;
@@ -2577,7 +2573,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 		++i;
 	}
 
-	if (&con->head == &dev_priv->dev->mode_config.connector_list) {
+	if (&con->head == &dev_priv->drm.mode_config.connector_list) {
 		DRM_ERROR("Could not find initial display unit.\n");
 		ret = -EINVAL;
 		goto out_unlock;
@@ -2611,7 +2607,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 	}
 
  out_unlock:
-	mutex_unlock(&dev_priv->dev->mode_config.mutex);
+	mutex_unlock(&dev_priv->drm.mode_config.mutex);
 
 	return ret;
 }
@@ -2631,7 +2627,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
 		return;
 
 	dev_priv->implicit_placement_property =
-		drm_property_create_range(dev_priv->dev,
+		drm_property_create_range(&dev_priv->drm,
 					  DRM_MODE_PROP_IMMUTABLE,
 					  "implicit_placement", 0, 1);
 }
@@ -2752,7 +2748,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
 		goto out_unref;
 
 	reserved_size = update->calc_fifo_size(update, num_hits);
-	cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
+	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
 	if (!cmd_start) {
 		ret = -ENOMEM;
 		goto out_revert;
@@ -2801,7 +2797,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
 	if (reserved_size < submit_size)
 		submit_size = 0;
 
-	vmw_fifo_commit(update->dev_priv, submit_size);
+	vmw_cmd_commit(update->dev_priv, submit_size);
 
 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
 					 update->out_fence, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9d1de5b5cc6a7f42f97a00b1e1ea9695fbf4d5ad..9a9508edbc9ed6385b176071cc38c67aeb50cd1e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -125,7 +125,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
 		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
 		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
 
 		i++;
 	}
@@ -355,7 +354,7 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
 static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 {
 	struct vmw_legacy_display_unit *ldu;
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_plane *primary, *cursor;
@@ -479,7 +478,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	int i, ret;
 
 	if (dev_priv->ldu_priv) {
@@ -554,7 +553,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
 	} *cmd;
 
 	fifo_size = sizeof(*cmd) * num_clips;
-	cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -567,6 +566,6 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
 		cmd[i].body.height = clips->y2 - clips->y1;
 	}
 
-	vmw_fifo_commit(dev_priv, fifo_size);
+	vmw_cmd_commit(dev_priv, fifo_size);
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
deleted file mode 100644
index e53bc639a7549a5d4f7bff2e26997de7e1f6b481..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "vmwgfx_drv.h"
-
-struct vmw_marker {
-	struct list_head head;
-	uint32_t seqno;
-	u64 submitted;
-};
-
-void vmw_marker_queue_init(struct vmw_marker_queue *queue)
-{
-	INIT_LIST_HEAD(&queue->head);
-	queue->lag = 0;
-	queue->lag_time = ktime_get_raw_ns();
-	spin_lock_init(&queue->lock);
-}
-
-void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
-{
-	struct vmw_marker *marker, *next;
-
-	spin_lock(&queue->lock);
-	list_for_each_entry_safe(marker, next, &queue->head, head) {
-		kfree(marker);
-	}
-	spin_unlock(&queue->lock);
-}
-
-int vmw_marker_push(struct vmw_marker_queue *queue,
-		   uint32_t seqno)
-{
-	struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
-
-	if (unlikely(!marker))
-		return -ENOMEM;
-
-	marker->seqno = seqno;
-	marker->submitted = ktime_get_raw_ns();
-	spin_lock(&queue->lock);
-	list_add_tail(&marker->head, &queue->head);
-	spin_unlock(&queue->lock);
-
-	return 0;
-}
-
-int vmw_marker_pull(struct vmw_marker_queue *queue,
-		   uint32_t signaled_seqno)
-{
-	struct vmw_marker *marker, *next;
-	bool updated = false;
-	u64 now;
-
-	spin_lock(&queue->lock);
-	now = ktime_get_raw_ns();
-
-	if (list_empty(&queue->head)) {
-		queue->lag = 0;
-		queue->lag_time = now;
-		updated = true;
-		goto out_unlock;
-	}
-
-	list_for_each_entry_safe(marker, next, &queue->head, head) {
-		if (signaled_seqno - marker->seqno > (1 << 30))
-			continue;
-
-		queue->lag = now - marker->submitted;
-		queue->lag_time = now;
-		updated = true;
-		list_del(&marker->head);
-		kfree(marker);
-	}
-
-out_unlock:
-	spin_unlock(&queue->lock);
-
-	return (updated) ? 0 : -EBUSY;
-}
-
-static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
-	u64 now;
-
-	spin_lock(&queue->lock);
-	now = ktime_get_raw_ns();
-	queue->lag += now - queue->lag_time;
-	queue->lag_time = now;
-	spin_unlock(&queue->lock);
-	return queue->lag;
-}
-
-
-static bool vmw_lag_lt(struct vmw_marker_queue *queue,
-		       uint32_t us)
-{
-	u64 cond = (u64) us * NSEC_PER_USEC;
-
-	return vmw_fifo_lag(queue) <= cond;
-}
-
-int vmw_wait_lag(struct vmw_private *dev_priv,
-		 struct vmw_marker_queue *queue, uint32_t us)
-{
-	struct vmw_marker *marker;
-	uint32_t seqno;
-	int ret;
-
-	while (!vmw_lag_lt(queue, us)) {
-		spin_lock(&queue->lock);
-		if (list_empty(&queue->head))
-			seqno = atomic_read(&dev_priv->marker_seq);
-		else {
-			marker = list_first_entry(&queue->head,
-						 struct vmw_marker, head);
-			seqno = marker->seqno;
-		}
-		spin_unlock(&queue->lock);
-
-		ret = vmw_wait_seqno(dev_priv, false, seqno, true,
-					3*HZ);
-
-		if (unlikely(ret != 0))
-			return ret;
-
-		(void) vmw_marker_pull(queue, seqno);
-	}
-	return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7f95ed6aa2241c9eb0512054c98dad94ddd71b8e..a372980fe6a54f18b5bf2e6f0ee37d0f37fd56c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -148,7 +148,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		ret = -ENOMEM;
 		goto out_no_fifo;
@@ -170,7 +170,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 	 */
 	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	otable->page_table = mob;
 
 	return 0;
@@ -203,7 +203,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
 		return;
 
 	bo = otable->page_table->pt_bo;
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return;
 
@@ -215,7 +215,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
 	cmd->body.sizeInBytes = 0;
 	cmd->body.validSizeInBytes = 0;
 	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	if (bo) {
 		int ret;
@@ -558,12 +558,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
 		BUG_ON(ret != 0);
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (cmd) {
 		cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
 		cmd->header.size = sizeof(cmd->body);
 		cmd->body.mobid = mob->id;
-		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+		vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	}
 
 	if (bo) {
@@ -625,7 +625,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 
 	vmw_fifo_resource_inc(dev_priv);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		goto out_no_cmd_space;
 
@@ -636,7 +636,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cd7ed1650d60c8af9613f9d944b814665044e8d2..d6d282c13b7f7cd5f5a1a10745d2565676e17f09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -122,7 +122,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
 
 	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
 
-	cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
 	/* hardware has hung, can't do anything here */
 	if (!cmds)
 		return -ENOMEM;
@@ -169,7 +169,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
 
 	fill_flush(flush, arg->stream_id);
 
-	vmw_fifo_commit(dev_priv, fifo_size);
+	vmw_cmd_commit(dev_priv, fifo_size);
 
 	return 0;
 }
@@ -192,7 +192,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
 	int ret;
 
 	for (;;) {
-		cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
+		cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
 		if (cmds)
 			break;
 
@@ -211,7 +211,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
 	cmds->body.items[0].value = false;
 	fill_flush(&cmds->flush, stream_id);
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmds));
+	vmw_cmd_commit(dev_priv, sizeof(*cmds));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 0b76b3d17d4ce448cdb6a84c621d4f3079413623..0a900afc66ffd095b5883e6430243586c2866f8d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
 {
 	struct vmw_bo_dirty *dirty = vbo->dirty;
-	pgoff_t num_pages = vbo->base.num_pages;
+	pgoff_t num_pages = vbo->base.mem.num_pages;
 	size_t size, acc_size;
 	int ret;
 	static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
 		return ret;
 
 	page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
-	if (unlikely(page_offset >= bo->num_pages)) {
+	if (unlikely(page_offset >= bo->mem.num_pages)) {
 		ret = VM_FAULT_SIGBUS;
 		goto out_unlock;
 	}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
 
 		page_offset = vmf->pgoff -
 			drm_vma_node_start(&bo->base.vma_node);
-		if (page_offset >= bo->num_pages ||
+		if (page_offset >= bo->mem.num_pages ||
 		    vmw_resources_clean(vbo, page_offset,
 					page_offset + PAGE_SIZE,
 					&allowed_prefault)) {
@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
 
 		page_offset = vmf->pgoff -
 			drm_vma_node_start(&bo->base.vma_node);
-		if (page_offset >= bo->num_pages ||
+		if (page_offset >= bo->mem.num_pages ||
 		    vmw_resources_clean(vbo, page_offset,
 					page_offset + PAGE_SIZE,
 					&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 00b535831a7a7767b36ded6f6d6957f8b18dc6ee..d1e7b9608145bd048b59b652febef57538e5a94e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
 	int ret;
 
 	if (likely(res->backup)) {
-		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+		BUG_ON(res->backup->base.base.size < size);
 		return 0;
 	}
 
@@ -827,7 +827,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 	dx_query_ctx = dx_query_mob->dx_query_ctx;
 	dev_priv     = dx_query_ctx->dev_priv;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -835,7 +835,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid    = dx_query_ctx->id;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	/* Triggers a rebind the next time affected context is bound */
 	dx_query_mob->dx_query_ctx = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4bdad2f2d13089bba2c2e56392f3bca3974d4011..b0db059b8cfbe4f9ead6c87b1e8d91f947a772f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -132,7 +132,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
 	BUG_ON(!sou->buffer);
 
 	fifo_size = sizeof(*cmd);
-	cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -153,7 +153,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
 	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
 	cmd->obj.backingStore.pitch = mode->hdisplay * 4;
 
-	vmw_fifo_commit(dev_priv, fifo_size);
+	vmw_cmd_commit(dev_priv, fifo_size);
 
 	sou->defined = true;
 
@@ -181,7 +181,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
 		return 0;
 
 	fifo_size = sizeof(*cmd);
-	cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -189,7 +189,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
 	cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
 	cmd->body.screenId = sou->base.unit;
 
-	vmw_fifo_commit(dev_priv, fifo_size);
+	vmw_cmd_commit(dev_priv, fifo_size);
 
 	/* Force sync */
 	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -829,7 +829,7 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
 static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 {
 	struct vmw_screen_object_unit *sou;
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_plane *primary, *cursor;
@@ -946,7 +946,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
 int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	int i, ret;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
@@ -992,7 +992,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
 	if (depth == 32)
 		depth = 24;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (!cmd)
 		return -ENOMEM;
 
@@ -1003,7 +1003,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
 	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
 	/* Buffer is reserved in vram or GMR */
 	vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -1029,7 +1029,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 	int i;
 
 	if (!dirty->num_hits) {
-		vmw_fifo_commit(dirty->dev_priv, 0);
+		vmw_cmd_commit(dirty->dev_priv, 0);
 		return;
 	}
 
@@ -1061,7 +1061,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 		blit->bottom -= sdirty->top;
 	}
 
-	vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+	vmw_cmd_commit(dirty->dev_priv, region_size + sizeof(*cmd));
 
 	sdirty->left = sdirty->top = S32_MAX;
 	sdirty->right = sdirty->bottom = S32_MIN;
@@ -1185,11 +1185,11 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
 static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 {
 	if (!dirty->num_hits) {
-		vmw_fifo_commit(dirty->dev_priv, 0);
+		vmw_cmd_commit(dirty->dev_priv, 0);
 		return;
 	}
 
-	vmw_fifo_commit(dirty->dev_priv,
+	vmw_cmd_commit(dirty->dev_priv,
 			sizeof(struct vmw_kms_sou_bo_blit) *
 			dirty->num_hits);
 }
@@ -1295,11 +1295,11 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
 static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
 {
 	if (!dirty->num_hits) {
-		vmw_fifo_commit(dirty->dev_priv, 0);
+		vmw_cmd_commit(dirty->dev_priv, 0);
 		return;
 	}
 
-	vmw_fifo_commit(dirty->dev_priv,
+	vmw_cmd_commit(dirty->dev_priv,
 			sizeof(struct vmw_kms_sou_readback_blit) *
 			dirty->num_hits);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index f328aa5839a222e024dd76a2da6efe3cdda8af55..905ae50aaa2ae46fbde14d6cdcaa9a2fab43d0f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -222,7 +222,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
 		goto out_no_fifo;
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		ret = -ENOMEM;
 		goto out_no_fifo;
@@ -233,7 +233,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
 	cmd->body.shid = res->id;
 	cmd->body.type = shader->type;
 	cmd->body.sizeInBytes = shader->size;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
@@ -256,7 +256,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
 
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -266,7 +266,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
 	cmd->body.mobid = bo->mem.start;
 	cmd->body.offsetInBytes = res->backup_offset;
 	res->backup_dirty = false;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -284,7 +284,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
 
 	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -293,7 +293,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
 	cmd->body.shid = res->id;
 	cmd->body.mobid = SVGA3D_INVALID_ID;
 	cmd->body.offsetInBytes = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	/*
 	 * Create a fence object and fence the backup buffer.
@@ -324,7 +324,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
 	mutex_lock(&dev_priv->binding_mutex);
 	vmw_binding_res_list_scrub(&res->binding_head);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		mutex_unlock(&dev_priv->binding_mutex);
 		return -ENOMEM;
@@ -333,7 +333,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.shid = res->id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
 	vmw_fifo_resource_dec(dev_priv);
@@ -394,7 +394,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
 	if (!list_empty(&shader->cotable_head) || !shader->committed)
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -404,7 +404,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
 	cmd->body.shid = shader->id;
 	cmd->body.mobid = res->backup->base.mem.start;
 	cmd->body.offsetInBytes = res->backup_offset;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
 
@@ -481,7 +481,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
 		return 0;
 
 	WARN_ON_ONCE(!shader->committed);
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -491,7 +491,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
 	cmd->body.shid = res->id;
 	cmd->body.mobid = SVGA3D_INVALID_ID;
 	cmd->body.offsetInBytes = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	res->id = -1;
 	list_del_init(&shader->cotable_head);
 
@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
 			return ret;
 		}
 
-		if ((u64)buffer->base.num_pages * PAGE_SIZE <
-		    (u64)size + (u64)offset) {
+		if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
 			VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
 			ret = -EINVAL;
 			goto out_bad_arg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 3f97b61dd5d83bf87c437ccf28ec45e95c497728..7369dd86d3a9cfe4c41a50b53daa3260968ca3ad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,7 +170,7 @@ static int vmw_view_create(struct vmw_resource *res)
 		return 0;
 	}
 
-	cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id);
 	if (!cmd) {
 		mutex_unlock(&dev_priv->binding_mutex);
 		return -ENOMEM;
@@ -181,7 +181,7 @@ static int vmw_view_create(struct vmw_resource *res)
 	/* Sid may have changed due to surface eviction. */
 	WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
 	cmd->body.sid = view->srf->id;
-	vmw_fifo_commit(res->dev_priv, view->cmd_size);
+	vmw_cmd_commit(res->dev_priv, view->cmd_size);
 	res->id = view->view_id;
 	list_add_tail(&view->srf_head, &srf->view_list);
 	vmw_cotable_add_resource(view->cotable, &view->cotable_head);
@@ -213,14 +213,14 @@ static int vmw_view_destroy(struct vmw_resource *res)
 	if (!view->committed || res->id == -1)
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id);
 	if (!cmd)
 		return -ENOMEM;
 
 	cmd->header.id = vmw_view_destroy_cmds[view->view_type];
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.view_id = view->view_id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	res->id = -1;
 	list_del_init(&view->cotable_head);
 	list_del_init(&view->srf_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5b04ec047ef36c23d0cdb430d25fa405927376bf..fbe9778813648298fb757c9e98be32099c8e3a65 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -170,7 +170,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
 		SVGA3dCmdDefineGBScreenTarget body;
 	} *cmd;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -188,7 +188,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
 	stdu->base.set_gui_x = cmd->body.xRoot;
 	stdu->base.set_gui_y = cmd->body.yRoot;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	stdu->defined = true;
 	stdu->display_width  = mode->hdisplay;
@@ -229,7 +229,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
 	memset(&image, 0, sizeof(image));
 	image.sid = res ? res->id : SVGA3D_INVALID_ID;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -239,7 +239,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
 	cmd->body.stid   = stdu->base.unit;
 	cmd->body.image  = image;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -293,7 +293,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
 		return -EINVAL;
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -301,7 +301,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
 				 0, stdu->display_width,
 				 0, stdu->display_height);
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
 }
@@ -329,7 +329,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
 	if (unlikely(!stdu->defined))
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
@@ -338,7 +338,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
 
 	cmd->body.stid   = stdu->base.unit;
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	/* Force sync */
 	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -499,7 +499,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 	size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
 
 	if (!dirty->num_hits) {
-		vmw_fifo_commit(dirty->dev_priv, 0);
+		vmw_cmd_commit(dirty->dev_priv, 0);
 		return;
 	}
 
@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 	cmd->body.host.mipmap = 0;
 	cmd->body.transfer = ddirty->transfer;
 	suffix->suffixSize = sizeof(*suffix);
-	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+	suffix->maximumOffset = ddirty->buf->base.base.size;
 
 	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
 		blit_size += sizeof(struct vmw_stdu_update);
@@ -522,7 +522,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 					 ddirty->top, ddirty->bottom);
 	}
 
-	vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+	vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
 
 	stdu->display_srf->res.res_dirty = true;
 	ddirty->left = ddirty->top = S32_MAX;
@@ -628,7 +628,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
 
 
 		dev_priv = vmw_priv(stdu->base.crtc.dev);
-		cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+		cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 		if (!cmd)
 			goto out_cleanup;
 
@@ -636,7 +636,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
 					 region.x1, region.x2,
 					 region.y1, region.y2);
 
-		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+		vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	}
 
 out_cleanup:
@@ -795,7 +795,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 	size_t commit_size;
 
 	if (!dirty->num_hits) {
-		vmw_fifo_commit(dirty->dev_priv, 0);
+		vmw_cmd_commit(dirty->dev_priv, 0);
 		return;
 	}
 
@@ -817,7 +817,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 	vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
 				 sdirty->right, sdirty->top, sdirty->bottom);
 
-	vmw_fifo_commit(dirty->dev_priv, commit_size);
+	vmw_cmd_commit(dirty->dev_priv, commit_size);
 
 	sdirty->left = sdirty->top = S32_MAX;
 	sdirty->right = sdirty->bottom = S32_MIN;
@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
 	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
 
 	suffix->suffixSize = sizeof(*suffix);
-	suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+	suffix->maximumOffset = vfbbo->buffer->base.base.size;
 
 	vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
 				 bb->y1, bb->y2);
@@ -1713,7 +1713,7 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
 static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 {
 	struct vmw_screen_target_display_unit *stdu;
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_plane *primary, *cursor;
@@ -1861,7 +1861,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
  */
 int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct drm_device *dev = &dev_priv->drm;
 	int i, ret;
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 193192456663efb29782656fe28656c9bbe63377..1dd042a20a66ce6f2e8d773e3733595bd405db57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -99,7 +99,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
 	if (!list_empty(&so->cotable_head) || !so->committed )
 		return 0;
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
 	if (!cmd)
 		return -ENOMEM;
 
@@ -109,7 +109,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
 	cmd->body.mobid = res->backup->base.mem.start;
 	cmd->body.offsetInBytes = res->backup_offset;
 	cmd->body.sizeInBytes = so->size;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	vmw_cotable_add_resource(so->cotable, &so->cotable_head);
 
@@ -172,7 +172,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
 
 	WARN_ON_ONCE(!so->committed);
 
-	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
 	if (!cmd)
 		return -ENOMEM;
 
@@ -182,7 +182,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
 	cmd->body.mobid = SVGA3D_INVALID_ID;
 	cmd->body.offsetInBytes = 0;
 	cmd->body.sizeInBytes = so->size;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	res->id = -1;
 	list_del_init(&so->cotable_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3914bfee0533b578a176a2de9de51ceb33a2ea82..f6cab77075a0467623545c17a7232747dc74c455 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -372,12 +372,12 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
 
 	if (res->id != -1) {
 
-		cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+		cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
 		if (unlikely(!cmd))
 			return;
 
 		vmw_surface_destroy_encode(res->id, cmd);
-		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+		vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
 
 		/*
 		 * used_memory_size_atomic, or separate lock
@@ -440,14 +440,14 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
 	 */
 
 	submit_size = vmw_surface_define_size(srf);
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(!cmd)) {
 		ret = -ENOMEM;
 		goto out_no_fifo;
 	}
 
 	vmw_surface_define_encode(srf, cmd);
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 	vmw_fifo_resource_inc(dev_priv);
 
 	/*
@@ -492,14 +492,14 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
 
 	BUG_ON(!val_buf->bo);
 	submit_size = vmw_surface_dma_size(srf);
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(!cmd))
 		return -ENOMEM;
 
 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
 
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 
 	/*
 	 * Create a fence object and fence the backup buffer.
@@ -578,12 +578,12 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
 	 */
 
 	submit_size = vmw_surface_destroy_size();
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(!cmd))
 		return -ENOMEM;
 
 	vmw_surface_destroy_encode(res->id, cmd);
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 
 	/*
 	 * Surface memory usage accounting.
@@ -1121,7 +1121,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		submit_len = sizeof(*cmd);
 	}
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
 	cmd2 = (typeof(cmd2))cmd;
 	cmd3 = (typeof(cmd3))cmd;
 	cmd4 = (typeof(cmd4))cmd;
@@ -1188,7 +1188,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		cmd->body.size.depth = metadata->base_size.depth;
 	}
 
-	vmw_fifo_commit(dev_priv, submit_len);
+	vmw_cmd_commit(dev_priv, submit_len);
 
 	return 0;
 
@@ -1219,7 +1219,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
 
 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
 
-	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(!cmd1))
 		return -ENOMEM;
 
@@ -1233,7 +1233,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
 		cmd2->header.size = sizeof(cmd2->body);
 		cmd2->body.sid = res->id;
 	}
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 
 	if (res->backup->dirty && res->backup_dirty) {
 		/* We've just made a full upload. Cear dirty regions. */
@@ -1272,7 +1272,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
-	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
 	if (unlikely(!cmd))
 		return -ENOMEM;
 
@@ -1295,7 +1295,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 	cmd3->body.sid = res->id;
 	cmd3->body.mobid = SVGA3D_INVALID_ID;
 
-	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_cmd_commit(dev_priv, submit_size);
 
 	/*
 	 * Create a fence object and fence the backup buffer.
@@ -1328,7 +1328,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
 	vmw_binding_res_list_scrub(&res->binding_head);
 
-	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(!cmd)) {
 		mutex_unlock(&dev_priv->binding_mutex);
 		return -ENOMEM;
@@ -1337,7 +1337,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.sid = res->id;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
 	vmw_fifo_resource_dec(dev_priv);
@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 					 &res->backup,
 					 &user_srf->backup_base);
 		if (ret == 0) {
-			if (res->backup->base.num_pages * PAGE_SIZE <
-			    res->backup_size) {
+			if (res->backup->base.base.size < res->backup_size) {
 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
 				vmw_bo_unreference(&res->backup);
 				ret = -EINVAL;
@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 	if (res->backup) {
 		rep->buffer_map_handle =
 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
-		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+		rep->buffer_size = res->backup->base.base.size;
 		rep->buffer_handle = backup_handle;
 	} else {
 		rep->buffer_map_handle = 0;
@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
 	rep->crep.buffer_handle = backup_handle;
 	rep->crep.buffer_map_handle =
 		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
-	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+	rep->crep.buffer_size = srf->res.backup->base.base.size;
 
 	rep->creq.version = drm_vmw_gb_surface_v1;
 	rep->creq.svga3d_flags_upper_32_bits =
@@ -1896,7 +1895,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
 		goto out;
 
 	alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
-	cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
 	if (!cmd)
 		return -ENOMEM;
 
@@ -1932,7 +1931,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
 		}
 
 	}
-	vmw_fifo_commit(dev_priv, alloc_size);
+	vmw_cmd_commit(dev_priv, alloc_size);
  out:
 	memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
 	       dirty->num_subres);
@@ -2032,14 +2031,14 @@ static int vmw_surface_clean(struct vmw_resource *res)
 	} *cmd;
 
 	alloc_size = sizeof(*cmd);
-	cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+	cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
 	if (!cmd)
 		return -ENOMEM;
 
 	cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.sid = res->id;
-	vmw_fifo_commit(dev_priv, alloc_size);
+	vmw_cmd_commit(dev_priv, alloc_size);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 155ca3a5c7e55400596bec864bb0cc1d0ba5aebc..e8e79de255cf73c136b4ca283c2697f0a0b315df 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -5,7 +5,6 @@
  * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
  */
 #include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 6a04261ce760e4fb191f8fe6026d1969b17c1d43..dbb068830d80015a5ec724d84f2bc66ec9df6bf6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -309,7 +309,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
  */
 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
 {
-	struct device *dev = vmw_tt->dev_priv->dev->dev;
+	struct device *dev = vmw_tt->dev_priv->drm.dev;
 
 	dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
@@ -330,7 +330,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
  */
 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
 {
-	struct device *dev = vmw_tt->dev_priv->dev->dev;
+	struct device *dev = vmw_tt->dev_priv->drm.dev;
 
 	return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
 }
@@ -385,7 +385,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 		sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
 				vsgt->num_pages, 0,
 				(unsigned long) vsgt->num_pages << PAGE_SHIFT,
-				dma_get_max_seg_size(dev_priv->dev->dev),
+				dma_get_max_seg_size(dev_priv->drm.dev),
 				NULL, 0, GFP_KERNEL);
 		if (IS_ERR(sg)) {
 			ret = PTR_ERR(sg);
@@ -611,8 +611,8 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
 	vmw_be->mob = NULL;
 
 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
-				      ttm_cached);
+		ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+				     ttm_cached);
 	else
 		ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
 				  ttm_cached);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 74db5a840bed07b5f53eeca2fa58a508a54cb7d0..b293c67230eff50a499c68177f5be0d71b160cd7 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 
 	xen_obj->sgt_imported = sgt;
 
-	ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
-					       NULL, xen_obj->num_pages);
+	ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
+					 xen_obj->num_pages);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index c8f7b21fa09e6fe1fb0a095eb631631eaa5a3944..78d787afe5949fbfcc04ccad9713c0181f5a0dbb 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -438,15 +438,10 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
 	.atomic_disable = zx_plane_atomic_disable,
 };
 
-static void zx_plane_destroy(struct drm_plane *plane)
-{
-	drm_plane_cleanup(plane);
-}
-
 static const struct drm_plane_funcs zx_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = zx_plane_destroy,
+	.destroy = drm_plane_cleanup,
 	.reset = drm_atomic_helper_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b4a31d506fccf0cf733e91ea5c39ed39f959af45..e617f60afeea3d3e7ad36058feb2eb525cb14bb9 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -310,10 +310,6 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
 			/* unused */
 		} , {
 			/* unused */
-		} , {
-			/* unused */
-		} , {
-			/* unused */
 		},
 	};
 	/* can't use #7 and #8 for line active and pixel active counters */
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 9ae9669e46eae92d12906d86bcf2ef80c75488aa..3506a3534294aff11e3ae719176cf55e4207e377 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -569,8 +569,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
 	verify_mprime_in->header.api_version = HDCP_API_VERSION;
 	verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
 	verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
-	verify_mprime_in->header.buffer_len =
-			WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
+	verify_mprime_in->header.buffer_len = cmd_size  - sizeof(verify_mprime_in->header);
 
 	verify_mprime_in->port.integrated_port_type = data->port_type;
 	verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 790393d1e31896d9d18f33e2f872c18f3e794e8e..b67c4327d307f0ec1d79381b9bc61e60244399f9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1643,7 +1643,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
 		res = pdev->resource + bar_idx;
-		size = ilog2(resource_size(res)) - 20;
+		size = pci_rebar_bytes_to_size(resource_size(res));
 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
 		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
@@ -3596,8 +3596,16 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
 		return 0;
 
 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
-	return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+	cap &= PCI_REBAR_CAP_SIZES;
+
+	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+	    bar == 0 && cap == 0x7000)
+		cap = 0x3f000;
+
+	return cap >> 4;
 }
+EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
 
 /**
  * pci_rebar_get_current_size - get the current size of a BAR
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a7bdf0b1d45db8de894ef0f4a20b9d155c17e122..ef7c4661314faaa1b217b958ca4848b256effc68 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -626,7 +626,6 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
 			  struct resource *res);
 #endif
 
-u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
 static inline u64 pci_rebar_size_to_bytes(int size)
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index b6908db534c257e65118a4550b08c1793447e6f3..90270f8114ed70dea7b58d1e86da5632cb13385a 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
 obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
 obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
 obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/soc/mediatek/mtk-mutex.c
similarity index 53%
rename from drivers/gpu/drm/mediatek/mtk_drm_ddp.c
rename to drivers/soc/mediatek/mtk-mutex.c
index 1f99db6b1a4223985c13cc850955aa24110b5493..f531b119da7a9840406cd7508968b7d754bd60b4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -9,12 +9,11 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
 
-#include "mtk_drm_ddp.h"
-#include "mtk_drm_ddp_comp.h"
-
-#define MT2701_DISP_MUTEX0_MOD0			0x2c
-#define MT2701_DISP_MUTEX0_SOF0			0x30
+#define MT2701_MUTEX0_MOD0			0x2c
+#define MT2701_MUTEX0_SOF0			0x30
 
 #define DISP_REG_MUTEX_EN(n)			(0x20 + 0x20 * (n))
 #define DISP_REG_MUTEX(n)			(0x24 + 0x20 * (n))
@@ -79,33 +78,32 @@
 #define MT2701_MUTEX_MOD_DISP_RDMA0		10
 #define MT2701_MUTEX_MOD_DISP_RDMA1		12
 
-#define MUTEX_SOF_SINGLE_MODE		0
-#define MUTEX_SOF_DSI0			1
-#define MUTEX_SOF_DSI1			2
-#define MUTEX_SOF_DPI0			3
-#define MUTEX_SOF_DPI1			4
-#define MUTEX_SOF_DSI2			5
-#define MUTEX_SOF_DSI3			6
-#define MT8167_MUTEX_SOF_DPI0		2
-#define MT8167_MUTEX_SOF_DPI1		3
-
-
-struct mtk_disp_mutex {
+#define MT2712_MUTEX_SOF_SINGLE_MODE		0
+#define MT2712_MUTEX_SOF_DSI0			1
+#define MT2712_MUTEX_SOF_DSI1			2
+#define MT2712_MUTEX_SOF_DPI0			3
+#define MT2712_MUTEX_SOF_DPI1			4
+#define MT2712_MUTEX_SOF_DSI2			5
+#define MT2712_MUTEX_SOF_DSI3			6
+#define MT8167_MUTEX_SOF_DPI0			2
+#define MT8167_MUTEX_SOF_DPI1			3
+
+struct mtk_mutex {
 	int id;
 	bool claimed;
 };
 
-enum mtk_ddp_mutex_sof_id {
-	DDP_MUTEX_SOF_SINGLE_MODE,
-	DDP_MUTEX_SOF_DSI0,
-	DDP_MUTEX_SOF_DSI1,
-	DDP_MUTEX_SOF_DPI0,
-	DDP_MUTEX_SOF_DPI1,
-	DDP_MUTEX_SOF_DSI2,
-	DDP_MUTEX_SOF_DSI3,
+enum mtk_mutex_sof_id {
+	MUTEX_SOF_SINGLE_MODE,
+	MUTEX_SOF_DSI0,
+	MUTEX_SOF_DSI1,
+	MUTEX_SOF_DPI0,
+	MUTEX_SOF_DPI1,
+	MUTEX_SOF_DSI2,
+	MUTEX_SOF_DSI3,
 };
 
-struct mtk_ddp_data {
+struct mtk_mutex_data {
 	const unsigned int *mutex_mod;
 	const unsigned int *mutex_sof;
 	const unsigned int mutex_mod_reg;
@@ -113,12 +111,12 @@ struct mtk_ddp_data {
 	const bool no_clk;
 };
 
-struct mtk_ddp {
+struct mtk_mutex_ctx {
 	struct device			*dev;
 	struct clk			*clk;
 	void __iomem			*regs;
-	struct mtk_disp_mutex		mutex[10];
-	const struct mtk_ddp_data	*data;
+	struct mtk_mutex		mutex[10];
+	const struct mtk_mutex_data	*data;
 };
 
 static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -183,150 +181,155 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
 	[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
 };
 
-static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
-	[DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
-	[DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
-	[DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
-	[DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
-	[DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
-	[DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
-	[DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+	[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+	[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+	[MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+	[MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+	[MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+	[MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+	[MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
 };
 
-static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
-	[DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
-	[DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
-	[DDP_MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
-	[DDP_MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
+static const unsigned int mt8167_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+	[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+	[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+	[MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
+	[MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
 };
 
-static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+static const struct mtk_mutex_data mt2701_mutex_driver_data = {
 	.mutex_mod = mt2701_mutex_mod,
 	.mutex_sof = mt2712_mutex_sof,
-	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
-	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+	.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_MUTEX0_SOF0,
 };
 
-static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+static const struct mtk_mutex_data mt2712_mutex_driver_data = {
 	.mutex_mod = mt2712_mutex_mod,
 	.mutex_sof = mt2712_mutex_sof,
-	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
-	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+	.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_MUTEX0_SOF0,
 };
 
-static const struct mtk_ddp_data mt8167_ddp_driver_data = {
+static const struct mtk_mutex_data mt8167_mutex_driver_data = {
 	.mutex_mod = mt8167_mutex_mod,
 	.mutex_sof = mt8167_mutex_sof,
-	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
-	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+	.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_MUTEX0_SOF0,
 	.no_clk = true,
 };
 
-static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+static const struct mtk_mutex_data mt8173_mutex_driver_data = {
 	.mutex_mod = mt8173_mutex_mod,
 	.mutex_sof = mt2712_mutex_sof,
-	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
-	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+	.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_MUTEX0_SOF0,
 };
 
-struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
+struct mtk_mutex *mtk_mutex_get(struct device *dev)
 {
-	struct mtk_ddp *ddp = dev_get_drvdata(dev);
-
-	if (id >= 10)
-		return ERR_PTR(-EINVAL);
-	if (ddp->mutex[id].claimed)
-		return ERR_PTR(-EBUSY);
+	struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
+	int i;
 
-	ddp->mutex[id].claimed = true;
+	for (i = 0; i < 10; i++)
+		if (!mtx->mutex[i].claimed) {
+			mtx->mutex[i].claimed = true;
+			return &mtx->mutex[i];
+		}
 
-	return &ddp->mutex[id];
+	return ERR_PTR(-EBUSY);
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_get);
 
-void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex)
+void mtk_mutex_put(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 
-	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+	WARN_ON(&mtx->mutex[mutex->id] != mutex);
 
 	mutex->claimed = false;
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_put);
 
-int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex)
+int mtk_mutex_prepare(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
-	return clk_prepare_enable(ddp->clk);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
+	return clk_prepare_enable(mtx->clk);
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_prepare);
 
-void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex)
+void mtk_mutex_unprepare(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
-	clk_disable_unprepare(ddp->clk);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
+	clk_disable_unprepare(mtx->clk);
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_unprepare);
 
-void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
-			     enum mtk_ddp_comp_id id)
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+			enum mtk_ddp_comp_id id)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 	unsigned int reg;
 	unsigned int sof_id;
 	unsigned int offset;
 
-	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+	WARN_ON(&mtx->mutex[mutex->id] != mutex);
 
 	switch (id) {
 	case DDP_COMPONENT_DSI0:
-		sof_id = DDP_MUTEX_SOF_DSI0;
+		sof_id = MUTEX_SOF_DSI0;
 		break;
 	case DDP_COMPONENT_DSI1:
-		sof_id = DDP_MUTEX_SOF_DSI0;
+		sof_id = MUTEX_SOF_DSI0;
 		break;
 	case DDP_COMPONENT_DSI2:
-		sof_id = DDP_MUTEX_SOF_DSI2;
+		sof_id = MUTEX_SOF_DSI2;
 		break;
 	case DDP_COMPONENT_DSI3:
-		sof_id = DDP_MUTEX_SOF_DSI3;
+		sof_id = MUTEX_SOF_DSI3;
 		break;
 	case DDP_COMPONENT_DPI0:
-		sof_id = DDP_MUTEX_SOF_DPI0;
+		sof_id = MUTEX_SOF_DPI0;
 		break;
 	case DDP_COMPONENT_DPI1:
-		sof_id = DDP_MUTEX_SOF_DPI1;
+		sof_id = MUTEX_SOF_DPI1;
 		break;
 	default:
-		if (ddp->data->mutex_mod[id] < 32) {
-			offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+		if (mtx->data->mutex_mod[id] < 32) {
+			offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
 						    mutex->id);
-			reg = readl_relaxed(ddp->regs + offset);
-			reg |= 1 << ddp->data->mutex_mod[id];
-			writel_relaxed(reg, ddp->regs + offset);
+			reg = readl_relaxed(mtx->regs + offset);
+			reg |= 1 << mtx->data->mutex_mod[id];
+			writel_relaxed(reg, mtx->regs + offset);
 		} else {
 			offset = DISP_REG_MUTEX_MOD2(mutex->id);
-			reg = readl_relaxed(ddp->regs + offset);
-			reg |= 1 << (ddp->data->mutex_mod[id] - 32);
-			writel_relaxed(reg, ddp->regs + offset);
+			reg = readl_relaxed(mtx->regs + offset);
+			reg |= 1 << (mtx->data->mutex_mod[id] - 32);
+			writel_relaxed(reg, mtx->regs + offset);
 		}
 		return;
 	}
 
-	writel_relaxed(ddp->data->mutex_sof[sof_id],
-		       ddp->regs +
-		       DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
+	writel_relaxed(mtx->data->mutex_sof[sof_id],
+		       mtx->regs +
+		       DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id));
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_add_comp);
 
-void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
-				enum mtk_ddp_comp_id id)
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+			   enum mtk_ddp_comp_id id)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 	unsigned int reg;
 	unsigned int offset;
 
-	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+	WARN_ON(&mtx->mutex[mutex->id] != mutex);
 
 	switch (id) {
 	case DDP_COMPONENT_DSI0:
@@ -336,129 +339,136 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
 	case DDP_COMPONENT_DPI0:
 	case DDP_COMPONENT_DPI1:
 		writel_relaxed(MUTEX_SOF_SINGLE_MODE,
-			       ddp->regs +
-			       DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+			       mtx->regs +
+			       DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg,
 						  mutex->id));
 		break;
 	default:
-		if (ddp->data->mutex_mod[id] < 32) {
-			offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+		if (mtx->data->mutex_mod[id] < 32) {
+			offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
 						    mutex->id);
-			reg = readl_relaxed(ddp->regs + offset);
-			reg &= ~(1 << ddp->data->mutex_mod[id]);
-			writel_relaxed(reg, ddp->regs + offset);
+			reg = readl_relaxed(mtx->regs + offset);
+			reg &= ~(1 << mtx->data->mutex_mod[id]);
+			writel_relaxed(reg, mtx->regs + offset);
 		} else {
 			offset = DISP_REG_MUTEX_MOD2(mutex->id);
-			reg = readl_relaxed(ddp->regs + offset);
-			reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
-			writel_relaxed(reg, ddp->regs + offset);
+			reg = readl_relaxed(mtx->regs + offset);
+			reg &= ~(1 << (mtx->data->mutex_mod[id] - 32));
+			writel_relaxed(reg, mtx->regs + offset);
 		}
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_remove_comp);
 
-void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex)
+void mtk_mutex_enable(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 
-	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+	WARN_ON(&mtx->mutex[mutex->id] != mutex);
 
-	writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+	writel(1, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_enable);
 
-void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
+void mtk_mutex_disable(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 
-	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+	WARN_ON(&mtx->mutex[mutex->id] != mutex);
 
-	writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+	writel(0, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_disable);
 
-void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex)
+void mtk_mutex_acquire(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 	u32 tmp;
 
-	writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
-	writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id));
-	if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id),
+	writel(1, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
+	writel(1, mtx->regs + DISP_REG_MUTEX(mutex->id));
+	if (readl_poll_timeout_atomic(mtx->regs + DISP_REG_MUTEX(mutex->id),
 				      tmp, tmp & INT_MUTEX, 1, 10000))
 		pr_err("could not acquire mutex %d\n", mutex->id);
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_acquire);
 
-void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex)
+void mtk_mutex_release(struct mtk_mutex *mutex)
 {
-	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
-					   mutex[mutex->id]);
+	struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+						 mutex[mutex->id]);
 
-	writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id));
+	writel(0, mtx->regs + DISP_REG_MUTEX(mutex->id));
 }
+EXPORT_SYMBOL_GPL(mtk_mutex_release);
 
-static int mtk_ddp_probe(struct platform_device *pdev)
+static int mtk_mutex_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct mtk_ddp *ddp;
+	struct mtk_mutex_ctx *mtx;
 	struct resource *regs;
 	int i;
 
-	ddp = devm_kzalloc(dev, sizeof(*ddp), GFP_KERNEL);
-	if (!ddp)
+	mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL);
+	if (!mtx)
 		return -ENOMEM;
 
 	for (i = 0; i < 10; i++)
-		ddp->mutex[i].id = i;
+		mtx->mutex[i].id = i;
 
-	ddp->data = of_device_get_match_data(dev);
+	mtx->data = of_device_get_match_data(dev);
 
-	if (!ddp->data->no_clk) {
-		ddp->clk = devm_clk_get(dev, NULL);
-		if (IS_ERR(ddp->clk)) {
-			if (PTR_ERR(ddp->clk) != -EPROBE_DEFER)
+	if (!mtx->data->no_clk) {
+		mtx->clk = devm_clk_get(dev, NULL);
+		if (IS_ERR(mtx->clk)) {
+			if (PTR_ERR(mtx->clk) != -EPROBE_DEFER)
 				dev_err(dev, "Failed to get clock\n");
-			return PTR_ERR(ddp->clk);
+			return PTR_ERR(mtx->clk);
 		}
 	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ddp->regs = devm_ioremap_resource(dev, regs);
-	if (IS_ERR(ddp->regs)) {
+	mtx->regs = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(mtx->regs)) {
 		dev_err(dev, "Failed to map mutex registers\n");
-		return PTR_ERR(ddp->regs);
+		return PTR_ERR(mtx->regs);
 	}
 
-	platform_set_drvdata(pdev, ddp);
+	platform_set_drvdata(pdev, mtx);
 
 	return 0;
 }
 
-static int mtk_ddp_remove(struct platform_device *pdev)
+static int mtk_mutex_remove(struct platform_device *pdev)
 {
 	return 0;
 }
 
-static const struct of_device_id ddp_driver_dt_match[] = {
+static const struct of_device_id mutex_driver_dt_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-mutex",
-	  .data = &mt2701_ddp_driver_data},
+	  .data = &mt2701_mutex_driver_data},
 	{ .compatible = "mediatek,mt2712-disp-mutex",
-	  .data = &mt2712_ddp_driver_data},
+	  .data = &mt2712_mutex_driver_data},
 	{ .compatible = "mediatek,mt8167-disp-mutex",
-	  .data = &mt8167_ddp_driver_data},
+	  .data = &mt8167_mutex_driver_data},
 	{ .compatible = "mediatek,mt8173-disp-mutex",
-	  .data = &mt8173_ddp_driver_data},
+	  .data = &mt8173_mutex_driver_data},
 	{},
 };
-MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
+MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
 
-struct platform_driver mtk_ddp_driver = {
-	.probe		= mtk_ddp_probe,
-	.remove		= mtk_ddp_remove,
+struct platform_driver mtk_mutex_driver = {
+	.probe		= mtk_mutex_probe,
+	.remove		= mtk_mutex_remove,
 	.driver		= {
-		.name	= "mediatek-ddp",
+		.name	= "mediatek-mutex",
 		.owner	= THIS_MODULE,
-		.of_match_table = ddp_driver_dt_match,
+		.of_match_table = mutex_driver_dt_match,
 	},
 };
+
+builtin_platform_driver(mtk_mutex_driver);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index cfb7f5612ef0f7b47929a4c7ca7b85e5087d896c..4f02db65dedec2fadc25d1f1f24ef6abdde66729 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1269,6 +1269,7 @@ config FB_ATY
 	select FB_CFB_IMAGEBLIT
 	select FB_BACKLIGHT if FB_ATY_BACKLIGHT
 	select FB_MACMODES if PPC
+	select FB_ATY_CT if SPARC64 && PCI
 	help
 	  This driver supports graphics boards with the ATI Mach64 chips.
 	  Say Y if you have such a graphics board.
@@ -1279,7 +1280,6 @@ config FB_ATY
 config FB_ATY_CT
 	bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
 	depends on PCI && FB_ATY
-	default y if SPARC64 && PCI
 	help
 	  Say Y here to support use of ATI's 64-bit Rage boards (or other
 	  boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index c8feff0ee8da99716013ad19fb22c587e7cf3167..83c8e809955a2bec5b6e2328e536bd854e49e998 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2353,6 +2353,9 @@ static int aty_init(struct fb_info *info)
 	int gtb_memsize, has_var = 0;
 	struct fb_var_screeninfo var;
 	int ret;
+#ifdef CONFIG_ATARI
+	u8 dac_type;
+#endif
 
 	init_waitqueue_head(&par->vblank.wait);
 	spin_lock_init(&par->int_lock);
@@ -2360,13 +2363,12 @@ static int aty_init(struct fb_info *info)
 #ifdef CONFIG_FB_ATY_GX
 	if (!M64_HAS(INTEGRATED)) {
 		u32 stat0;
-		u8 dac_type, dac_subtype, clk_type;
+		u8 dac_subtype, clk_type;
 		stat0 = aty_ld_le32(CNFG_STAT0, par);
 		par->bus_type = (stat0 >> 0) & 0x07;
 		par->ram_type = (stat0 >> 3) & 0x07;
 		ramname = aty_gx_ram[par->ram_type];
 		/* FIXME: clockchip/RAMDAC probing? */
-		dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07;
 #ifdef CONFIG_ATARI
 		clk_type = CLK_ATI18818_1;
 		dac_type = (stat0 >> 9) & 0x07;
@@ -2375,7 +2377,6 @@ static int aty_init(struct fb_info *info)
 		else
 			dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type;
 #else
-		dac_type = DAC_IBMRGB514;
 		dac_subtype = DAC_IBMRGB514;
 		clk_type = CLK_IBMRGB514;
 #endif
@@ -3062,7 +3063,6 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
 	if (dp == of_console_device) {
 		struct fb_var_screeninfo *var = &default_var;
 		unsigned int N, P, Q, M, T, R;
-		u32 v_total, h_total;
 		struct crtc crtc;
 		u8 pll_regs[16];
 		u8 clock_cntl;
@@ -3078,9 +3078,6 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
 		crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
 		aty_crtc_to_var(&crtc, var);
 
-		h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
-		v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
-
 		/*
 		 * Read the PLL to figure actual Refresh Rate.
 		 */
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index f87cc81f4fa2b767ccb3ec4a5e963159d80e2aa2..011b07e44e0dfead323ee2f8ba0fecd0af5fd91a 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -281,10 +281,13 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
 void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll)
 {
 	struct atyfb_par *par = (struct atyfb_par *) info->par;
-	u32 crtc_gen_cntl, lcd_gen_cntrl;
+	u32 crtc_gen_cntl;
 	u8 tmp, tmp2;
 
-	lcd_gen_cntrl = 0;
+#ifdef CONFIG_FB_ATY_GENERIC_LCD
+	u32 lcd_gen_cntrl = 0;
+#endif
+
 #ifdef DEBUG
 	printk("atyfb(%s): about to program:\n"
 		"pll_ext_cntl=0x%02x pll_gen_cntl=0x%02x pll_vclk_cntl=0x%02x\n",
@@ -402,7 +405,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
 	struct atyfb_par *par = (struct atyfb_par *) info->par;
 	u8 mpost_div, xpost_div, sclk_post_div_real;
 	u32 q, memcntl, trp;
-	u32 dsp_config, dsp_on_off, vga_dsp_config, vga_dsp_on_off;
+	u32 dsp_config;
 #ifdef DEBUG
 	int pllmclk, pllsclk;
 #endif
@@ -488,9 +491,9 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
 
 	/* Allow BIOS to override */
 	dsp_config = aty_ld_le32(DSP_CONFIG, par);
-	dsp_on_off = aty_ld_le32(DSP_ON_OFF, par);
-	vga_dsp_config = aty_ld_le32(VGA_DSP_CONFIG, par);
-	vga_dsp_on_off = aty_ld_le32(VGA_DSP_ON_OFF, par);
+	aty_ld_le32(DSP_ON_OFF, par);
+	aty_ld_le32(VGA_DSP_CONFIG, par);
+	aty_ld_le32(VGA_DSP_ON_OFF, par);
 
 	if (dsp_config)
 		pll->ct.dsp_loop_latency = (dsp_config & DSP_LOOP_LATENCY) >> 16;
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index 9966c58aa26cd372adb92935af34f7bbb1dd7988..df55e23b7a5a15b4c719a3c6506dbaaea1316f4f 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -488,12 +488,10 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
 #if defined(DEBUG) && defined(CONFIG_FB_RADEON_I2C)
 		{
 			u8 *EDIDs[4] = { NULL, NULL, NULL, NULL };
-			int mon_types[4] = {MT_NONE, MT_NONE, MT_NONE, MT_NONE};
 			int i;
 
 			for (i = 0; i < 4; i++)
-				mon_types[i] = radeon_probe_i2c_connector(rinfo,
-									  i+1, &EDIDs[i]);
+				radeon_probe_i2c_connector(rinfo, i + 1, &EDIDs[i]);
 		}
 #endif /* DEBUG */
 		/*
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 0d9a6bb57a09be9b7a1896c9feb4e3b9c9c0c1b8..e7702fe1fe7d76eca07d9e62738c385c03ac9382 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -116,7 +116,7 @@ struct bw2_par {
 
 /**
  *      bw2_blank - Optional function.  Blanks the display.
- *      @blank_mode: the blank mode we want.
+ *      @blank: the blank mode we want.
  *      @info: frame buffer structure that represents a single frame buffer
  */
 static int
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 77f6470ce665744375b4b390a0af99fa6ebb3aa7..bdcc3f6ab6665b6b31984f0e05b0c0bf4e594695 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -179,7 +179,7 @@ static int cg3_setcolreg(unsigned regno,
 
 /**
  *      cg3_blank - Optional function.  Blanks the display.
- *      @blank_mode: the blank mode we want.
+ *      @blank: the blank mode we want.
  *      @info: frame buffer structure that represents a single frame buffer
  */
 static int cg3_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index a1c68cd48d7ed2ad54510accdc26e1ae27dac608..97ef43c259742b82f678237bb1bfe237a90b550f 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -511,7 +511,7 @@ static int cg6_setcolreg(unsigned regno,
 /**
  *	cg6_blank - Blanks the display.
  *
- *	@blank_mode: the blank mode we want.
+ *	@blank: the blank mode we want.
  *	@info: frame buffer structure that represents a single frame buffer
  */
 static int cg6_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index e9027172c0f552163c30c19d68f7233407766adf..93802abbbc72a5618d1bde35a59892bae72c6ec6 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -2463,8 +2463,6 @@ static void AttrOn(const struct cirrusfb_info *cinfo)
  */
 static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
 {
-	unsigned char dummy;
-
 	if (is_laguna(cinfo))
 		return;
 	if (cinfo->btype == BT_PICASSO) {
@@ -2473,18 +2471,18 @@ static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
 		WGen(cinfo, VGA_PEL_MSK, 0x00);
 		udelay(200);
 		/* next read dummy from pixel address (3c8) */
-		dummy = RGen(cinfo, VGA_PEL_IW);
+		RGen(cinfo, VGA_PEL_IW);
 		udelay(200);
 	}
 	/* now do the usual stuff to access the HDR */
 
-	dummy = RGen(cinfo, VGA_PEL_MSK);
+	RGen(cinfo, VGA_PEL_MSK);
 	udelay(200);
-	dummy = RGen(cinfo, VGA_PEL_MSK);
+	RGen(cinfo, VGA_PEL_MSK);
 	udelay(200);
-	dummy = RGen(cinfo, VGA_PEL_MSK);
+	RGen(cinfo, VGA_PEL_MSK);
 	udelay(200);
-	dummy = RGen(cinfo, VGA_PEL_MSK);
+	RGen(cinfo, VGA_PEL_MSK);
 	udelay(200);
 
 	WGen(cinfo, VGA_PEL_MSK, val);
@@ -2492,7 +2490,7 @@ static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
 
 	if (cinfo->btype == BT_PICASSO) {
 		/* now first reset HDR access counter */
-		dummy = RGen(cinfo, VGA_PEL_IW);
+		RGen(cinfo, VGA_PEL_IW);
 		udelay(200);
 
 		/* and at the end, restore the mask value */
@@ -2800,9 +2798,9 @@ static void bestclock(long freq, int *nom, int *den, int *div)
 
 #ifdef CIRRUSFB_DEBUG
 
-/**
+/*
  * cirrusfb_dbg_print_regs
- * @base: If using newmmio, the newmmio base address, otherwise %NULL
+ * @regbase: If using newmmio, the newmmio base address, otherwise %NULL
  * @reg_class: type of registers to read: %CRT, or %SEQ
  *
  * DESCRIPTION:
@@ -2847,7 +2845,7 @@ static void cirrusfb_dbg_print_regs(struct fb_info *info,
 	va_end(list);
 }
 
-/**
+/*
  * cirrusfb_dbg_reg_dump
  * @base: If using newmmio, the newmmio base address, otherwise %NULL
  *
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 2df56bd303d25d125c6a7813ed0bf2787abc1ec9..509311471d515a55ee248a45c89bf5d47b3761b3 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -64,9 +64,9 @@
 #undef in_le32
 #undef out_le32
 #define in_8(addr)		0
-#define out_8(addr, val)
+#define out_8(addr, val)	(void)(val)
 #define in_le32(addr)		0
-#define out_le32(addr, val)
+#define out_le32(addr, val)	(void)(val)
 #define pgprot_cached_wthru(prot) (prot)
 #else
 static void invalid_vram_cache(void __force *addr)
diff --git a/drivers/video/fbdev/core/fb_notify.c b/drivers/video/fbdev/core/fb_notify.c
index 74c2da5288848a99a410958c6bca1a7922d3dbd0..10e3b9a74adcb267aad17cf59e88399cf904a7a7 100644
--- a/drivers/video/fbdev/core/fb_notify.c
+++ b/drivers/video/fbdev/core/fb_notify.c
@@ -19,6 +19,8 @@ static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
 /**
  *	fb_register_client - register a client notifier
  *	@nb: notifier block to callback on events
+ *
+ *	Return: 0 on success, negative error code on failure.
  */
 int fb_register_client(struct notifier_block *nb)
 {
@@ -29,6 +31,8 @@ EXPORT_SYMBOL(fb_register_client);
 /**
  *	fb_unregister_client - unregister a client notifier
  *	@nb: notifier block to callback on events
+ *
+ *	Return: 0 on success, negative error code on failure.
  */
 int fb_unregister_client(struct notifier_block *nb)
 {
@@ -38,7 +42,10 @@ EXPORT_SYMBOL(fb_unregister_client);
 
 /**
  * fb_notifier_call_chain - notify clients of fb_events
+ * @val: value passed to callback
+ * @v: pointer passed to callback
  *
+ * Return: The return value of the last notifier function
  */
 int fb_notifier_call_chain(unsigned long val, void *v)
 {
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bf61598bf1c39ff4c1a6dd5bc94bb1a584ad327a..44a5cd2f54ccce9acb164685910c2e78d3a04823 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -56,8 +56,6 @@
  *  more details.
  */
 
-#undef FBCONDEBUG
-
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/fs.h>
@@ -82,12 +80,6 @@
 
 #include "fbcon.h"
 
-#ifdef FBCONDEBUG
-#  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-#else
-#  define DPRINTK(fmt, args...)
-#endif
-
 /*
  * FIXME: Locking
  *
@@ -1015,11 +1007,11 @@ static const char *fbcon_startup(void)
 	rows /= vc->vc_font.height;
 	vc_resize(vc, cols, rows);
 
-	DPRINTK("mode:   %s\n", info->fix.id);
-	DPRINTK("visual: %d\n", info->fix.visual);
-	DPRINTK("res:    %dx%d-%d\n", info->var.xres,
-		info->var.yres,
-		info->var.bits_per_pixel);
+	pr_debug("mode:   %s\n", info->fix.id);
+	pr_debug("visual: %d\n", info->fix.visual);
+	pr_debug("res:    %dx%d-%d\n", info->var.xres,
+		 info->var.yres,
+		 info->var.bits_per_pixel);
 
 	fbcon_add_cursor_timer(info);
 	return display_desc;
@@ -2013,7 +2005,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
 	    y_diff < 0 || y_diff > virt_fh) {
 		const struct fb_videomode *mode;
 
-		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+		pr_debug("attempting resize %ix%i\n", var.xres, var.yres);
 		mode = fb_find_best_mode(&var, &info->modelist);
 		if (mode == NULL)
 			return -EINVAL;
@@ -2023,7 +2015,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
 		if (virt_w > var.xres/virt_fw || virt_h > var.yres/virt_fh)
 			return -EINVAL;
 
-		DPRINTK("resize now %ix%i\n", var.xres, var.yres);
+		pr_debug("resize now %ix%i\n", var.xres, var.yres);
 		if (con_is_visible(vc)) {
 			var.activate = FB_ACTIVATE_NOW |
 				FB_ACTIVATE_FORCE;
@@ -3299,8 +3291,7 @@ static void fbcon_exit(void)
 
 		if (info->queue.func)
 			pending = cancel_work_sync(&info->queue);
-		DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
-			"no"));
+		pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no"));
 
 		for (j = first_fb_vc; j <= last_fb_vc; j++) {
 			if (con2fb_map[j] == i) {
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 1bf82dbc9e3cfdedec0c2734576de05ceea97e2a..b0e690f41025a23e2b9798f7675f5991f3797ad1 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -605,6 +605,7 @@ static void get_detailed_timing(unsigned char *block,
  * fb_create_modedb - create video mode database
  * @edid: EDID data
  * @dbsize: database size
+ * @specs: monitor specifications, may be NULL
  *
  * RETURNS: struct fb_videomode, @dbsize contains length of database
  *
@@ -1100,7 +1101,6 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
  *                                    2 * M
  *        M = 300;
  *        C = 30;
-
  */
 static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
 {
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index e57c00824965ca6656b456ab733b0a5ef7ee5c93..b80ba3d2a9b8befe43905074317ebe08dfb4fecf 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -139,7 +139,7 @@ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
 
 static void efifb_show_boot_graphics(struct fb_info *info)
 {
-	u32 bmp_width, bmp_height, bmp_pitch, screen_pitch, dst_x, y, src_y;
+	u32 bmp_width, bmp_height, bmp_pitch, dst_x, y, src_y;
 	struct screen_info *si = &screen_info;
 	struct bmp_file_header *file_header;
 	struct bmp_dib_header *dib_header;
@@ -193,7 +193,6 @@ static void efifb_show_boot_graphics(struct fb_info *info)
 	bmp_width = dib_header->width;
 	bmp_height = abs(dib_header->height);
 	bmp_pitch = round_up(3 * bmp_width, 4);
-	screen_pitch = si->lfb_linelength;
 
 	if ((file_header->bitmap_offset + bmp_pitch * bmp_height) >
 				bgrt_image_size)
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 948b731844334a8e6d746b3fee68dc73439f94e7..b3d580e57221ebedd14e65bf9f68d1b84cc67a0a 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -667,7 +667,7 @@ static int ffb_setcolreg(unsigned regno,
 
 /**
  *	ffb_blank - Optional function.  Blanks the display.
- *	@blank_mode: the blank mode we want.
+ *	@blank: the blank mode we want.
  *	@info: frame buffer structure that represents a single frame buffer
  */
 static int ffb_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 31270a8986e8e4b4866b053744b1089b00ce1ccc..c5b99a4861e8709db4e96e7d683dc1731a945491 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -198,7 +198,7 @@ static void gbe_reset(void)
 static void gbe_turn_off(void)
 {
 	int i;
-	unsigned int val, x, y, vpixen_off;
+	unsigned int val, y, vpixen_off;
 
 	gbe_turned_on = 0;
 
@@ -249,7 +249,6 @@ static void gbe_turn_off(void)
 
 	for (i = 0; i < 100000; i++) {
 		val = gbe->vt_xy;
-		x = GET_GBE_FIELD(VT_XY, X, val);
 		y = GET_GBE_FIELD(VT_XY, Y, val);
 		if (y < vpixen_off)
 			break;
@@ -260,7 +259,6 @@ static void gbe_turn_off(void)
 		       "gbefb: wait for vpixen_off timed out\n");
 	for (i = 0; i < 10000; i++) {
 		val = gbe->vt_xy;
-		x = GET_GBE_FIELD(VT_XY, X, val);
 		y = GET_GBE_FIELD(VT_XY, Y, val);
 		if (y > vpixen_off)
 			break;
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 9c83ec3f8e1f82f0d8bebc31ddfe9045450c072b..2b885cd046fe7918ab3956d65457a4322bfb76b2 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -305,11 +305,13 @@ static const struct of_device_id goldfish_fb_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id goldfish_fb_acpi_match[] = {
 	{ "GFSH0004", 0 },
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match);
+#endif
 
 static struct platform_driver goldfish_fb_driver = {
 	.probe		= goldfish_fb_probe,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index a45fcff1461fb8aef98d8d0a6fc0b790fa54ebeb..8bbac7182ad32a7e32f62142ba48612e83586834 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -357,8 +357,8 @@ static int hga_card_detect(void)
 
 /**
  *	hgafb_open - open the framebuffer device
- *	@info:pointer to fb_info object containing info for current hga board
- *	@int:open by console system or userland.
+ *	@info: pointer to fb_info object containing info for current hga board
+ *	@init: open by console system or userland.
  */
 
 static int hgafb_open(struct fb_info *info, int init)
@@ -370,9 +370,9 @@ static int hgafb_open(struct fb_info *info, int init)
 }
 
 /**
- *	hgafb_open - open the framebuffer device
- *	@info:pointer to fb_info object containing info for current hga board
- *	@int:open by console system or userland.
+ *	hgafb_release - open the framebuffer device
+ *	@info: pointer to fb_info object containing info for current hga board
+ *	@init: open by console system or userland.
  */
 
 static int hgafb_release(struct fb_info *info, int init)
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 40b11cce0ad6b4e04533c008f693ea361b1cba6d..3eb0f3583f4f3e1cd2ddfa77fa09dc01f214ef25 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -308,7 +308,7 @@ static int leo_setcolreg(unsigned regno,
 
 /**
  *      leo_blank - Optional function.  Blanks the display.
- *      @blank_mode: the blank mode we want.
+ *      @blank: the blank mode we want.
  *      @info: frame buffer structure that represents a single frame buffer
  */
 static int leo_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index 1911a47769b6958b98e7ed2bc124779a2fb8eb13..16401eb95c6cac964a47540a132223fe5e47fec8 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -17,8 +17,8 @@
 
 /**
  * spi_write - write command to the SPI port
+ * @spi:  the SPI device.
  * @data: can be 8/16/32-bit, MSB justified data to write.
- * @len:  data length.
  *
  * Wait bus transfer complete IRQ.
  * The caller is expected to perform the necessary locking.
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 894617ddabcb6b3f8d8f6f210cfa87ea7fc1d265..fabb271337ed22c35dd5003191c80896cef6b989 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -445,7 +445,6 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
 static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
 {
 	struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
-	uint32_t enabled;
 	unsigned long flags;
 
 	if (mx3_fbi->txd == NULL)
@@ -453,7 +452,7 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
 
 	spin_lock_irqsave(&mx3fb->lock, flags);
 
-	enabled = sdc_fb_uninit(mx3_fbi);
+	sdc_fb_uninit(mx3_fbi);
 
 	spin_unlock_irqrestore(&mx3fb->lock, flags);
 
@@ -732,7 +731,7 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi);
 
 /**
  * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
- * @info:	framebuffer information pointer
+ * @fbi:	framebuffer information pointer
  * @return:	0 on success or negative error code on failure.
  */
 static int mx3fb_set_fix(struct fb_info *fbi)
@@ -740,7 +739,7 @@ static int mx3fb_set_fix(struct fb_info *fbi)
 	struct fb_fix_screeninfo *fix = &fbi->fix;
 	struct fb_var_screeninfo *var = &fbi->var;
 
-	strncpy(fix->id, "DISP3 BG", 8);
+	memcpy(fix->id, "DISP3 BG", 8);
 
 	fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
 
@@ -1105,6 +1104,8 @@ static void __blank(int blank, struct fb_info *fbi)
 
 /**
  * mx3fb_blank() - blank the display.
+ * @blank:	blank value for the panel
+ * @fbi:	framebuffer information pointer
  */
 static int mx3fb_blank(int blank, struct fb_info *fbi)
 {
@@ -1126,7 +1127,7 @@ static int mx3fb_blank(int blank, struct fb_info *fbi)
 /**
  * mx3fb_pan_display() - pan or wrap the display
  * @var:	variable screen buffer information.
- * @info:	framebuffer information pointer.
+ * @fbi:	framebuffer information pointer.
  *
  * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
  */
@@ -1387,6 +1388,8 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
 
 /**
  * mx3fb_init_fbinfo() - initialize framebuffer information object.
+ * @dev: the device
+ * @ops:	framebuffer device operations
  * @return:	initialized framebuffer structure.
  */
 static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 09a20d4ab35f21aebc1db61baa0f22110775f0e6..c0f4f402da3f49e49d68490c4cc4a6742b5cdde7 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1843,7 +1843,6 @@ static int neo_init_hw(struct fb_info *info)
 	struct neofb_par *par = info->par;
 	int videoRam = 896;
 	int maxClock = 65000;
-	int CursorMem = 1024;
 	int CursorOff = 0x100;
 
 	DBG("neo_init_hw");
@@ -1895,19 +1894,16 @@ static int neo_init_hw(struct fb_info *info)
 	case FB_ACCEL_NEOMAGIC_NM2070:
 	case FB_ACCEL_NEOMAGIC_NM2090:
 	case FB_ACCEL_NEOMAGIC_NM2093:
-		CursorMem = 2048;
 		CursorOff = 0x100;
 		break;
 	case FB_ACCEL_NEOMAGIC_NM2097:
 	case FB_ACCEL_NEOMAGIC_NM2160:
-		CursorMem = 1024;
 		CursorOff = 0x100;
 		break;
 	case FB_ACCEL_NEOMAGIC_NM2200:
 	case FB_ACCEL_NEOMAGIC_NM2230:
 	case FB_ACCEL_NEOMAGIC_NM2360:
 	case FB_ACCEL_NEOMAGIC_NM2380:
-		CursorMem = 1024;
 		CursorOff = 0x1000;
 
 		par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
diff --git a/drivers/video/fbdev/nvidia/nv_setup.c b/drivers/video/fbdev/nvidia/nv_setup.c
index 2fa68669613afb52a811ffe3547a422cdd158a62..5404017e6957bd9fb97da403c1990f95a077aae0 100644
--- a/drivers/video/fbdev/nvidia/nv_setup.c
+++ b/drivers/video/fbdev/nvidia/nv_setup.c
@@ -89,9 +89,8 @@ u8 NVReadSeq(struct nvidia_par *par, u8 index)
 }
 void NVWriteAttr(struct nvidia_par *par, u8 index, u8 value)
 {
-	volatile u8 tmp;
 
-	tmp = VGA_RD08(par->PCIO, par->IOBase + 0x0a);
+	VGA_RD08(par->PCIO, par->IOBase + 0x0a);
 	if (par->paletteEnabled)
 		index &= ~0x20;
 	else
@@ -101,9 +100,7 @@ void NVWriteAttr(struct nvidia_par *par, u8 index, u8 value)
 }
 u8 NVReadAttr(struct nvidia_par *par, u8 index)
 {
-	volatile u8 tmp;
-
-	tmp = VGA_RD08(par->PCIO, par->IOBase + 0x0a);
+	VGA_RD08(par->PCIO, par->IOBase + 0x0a);
 	if (par->paletteEnabled)
 		index &= ~0x20;
 	else
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 744416dc530e3f8baac6b7ab36cf2c071616991e..3ca1bd7bb92fb69dfec7264b193872005d4c17cf 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -43,6 +43,7 @@ config FB_OMAP2_PANEL_DPI
 config FB_OMAP2_PANEL_DSI_CM
 	tristate "Generic DSI Command Mode Panel"
 	depends on BACKLIGHT_CLASS_DEVICE
+	depends on DRM_PANEL_DSI_CM = n
 	help
 	  Driver for generic DSI command mode panels.
 
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 3417618310ff81e0e6a96f6df58790b13873b1b6..cc2ad787d493fb1e55ebe0ac1363cdb04de378c5 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -75,7 +75,7 @@ static void dispc_dump_irqs(struct seq_file *s)
 
 	seq_printf(s, "irqs %d\n", stats.irq_count);
 #define PIS(x) \
-	seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+	seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1])
 
 	PIS(FRAMEDONE);
 	PIS(VSYNC);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 6f9c25fec994694495ef666360b7090acc3252aa..58c7aa279ab15ae0c962c0989436dd5a8f225f06 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1178,13 +1178,12 @@ static int dsi_regulator_init(struct platform_device *dsidev)
 
 static void _dsi_print_reset_status(struct platform_device *dsidev)
 {
-	u32 l;
 	int b0, b1, b2;
 
 	/* A dummy read using the SCP interface to any DSIPHY register is
 	 * required after DSIPHY reset to complete the reset of the DSI complex
 	 * I/O. */
-	l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+	dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
 
 	if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
 		b0 = 28;
@@ -1554,7 +1553,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
 
 	seq_printf(s, "irqs %d\n", stats.irq_count);
 #define PIS(x) \
-	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1])
 
 	seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
 	PIS(VC0);
@@ -3627,7 +3626,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
 static void dsi_proto_timings(struct platform_device *dsidev)
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-	unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
+	unsigned tlpx, tclk_zero, tclk_prepare;
 	unsigned tclk_pre, tclk_post;
 	unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
 	unsigned ths_trail, ths_exit;
@@ -3646,7 +3645,6 @@ static void dsi_proto_timings(struct platform_device *dsidev)
 
 	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
 	tlpx = FLD_GET(r, 20, 16) * 2;
-	tclk_trail = FLD_GET(r, 15, 8);
 	tclk_zero = FLD_GET(r, 7, 0);
 
 	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
@@ -4040,7 +4038,6 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-	u16 dw, dh;
 
 	dsi_perf_mark_setup(dsidev);
 
@@ -4049,11 +4046,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
 	dsi->framedone_callback = callback;
 	dsi->framedone_data = data;
 
-	dw = dsi->timings.x_res;
-	dh = dsi->timings.y_res;
-
 #ifdef DSI_PERF_MEASURE
-	dsi->update_bytes = dw * dh *
+	dsi->update_bytes = dsi->timings.x_res * dsi->timings.y_res *
 		dsi_get_pixel_size(dsi->pix_fmt) / 8;
 #endif
 	dsi_update_screen_dispc(dsidev);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
index 726c190862d40ff8fdbf31abe07fafca2a36399f..e6363a4209338014a78608436c9177f3d173c09e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
@@ -679,7 +679,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
 	struct hdmi_audio_format audio_format;
 	struct hdmi_audio_dma audio_dma;
 	struct hdmi_core_audio_config acore;
-	int err, n, cts, channel_count;
+	int n, cts, channel_count;
 	unsigned int fs_nr;
 	bool word_length_16b = false;
 
@@ -741,7 +741,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
 		return -EINVAL;
 	}
 
-	err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+	hdmi_compute_acr(pclk, fs_nr, &n, &cts);
 
 	/* Audio clock regeneration settings */
 	acore.n = n;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index eda29d3032e1e4336647104959aea7853b60a0ea..cb63bc0e92caa529169205b9c2b00e1268a87336 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -790,7 +790,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
 	struct hdmi_audio_format audio_format;
 	struct hdmi_audio_dma audio_dma;
 	struct hdmi_core_audio_config core_cfg;
-	int err, n, cts, channel_count;
+	int n, cts, channel_count;
 	unsigned int fs_nr;
 	bool word_length_16b = false;
 
@@ -833,7 +833,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
 		return -EINVAL;
 	}
 
-	err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+	hdmi_compute_acr(pclk, fs_nr, &n, &cts);
 	core_cfg.n = n;
 	core_cfg.cts = cts;
 
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 6da672e92643b8d4a38682ec29fec31339b2ade9..4e88a0a195ad14cb3e3a3fb6c340ba506d4c9a3d 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -175,7 +175,7 @@ static int p9100_setcolreg(unsigned regno,
 
 /**
  *      p9100_blank - Optional function.  Blanks the display.
- *      @blank_mode: the blank mode we want.
+ *      @blank: the blank mode we want.
  *      @info: frame buffer structure that represents a single frame buffer
  */
 static int
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 27893fa139b0802be1d19124fa16d2e3f2f81faf..c68725eebee3bd0cc1c6fa502d9b694a3668f74b 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1508,8 +1508,8 @@ static const struct fb_ops pm2fb_ops = {
  *
  * Initialise and allocate resource for PCI device.
  *
- * @param	pdev	PCI device.
- * @param	id	PCI device ID.
+ * @pdev:	PCI device.
+ * @id:		PCI device ID.
  */
 static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
@@ -1715,7 +1715,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  *
  * Release all device resources.
  *
- * @param	pdev	PCI device to clean up.
+ * @pdev:	PCI device to clean up.
  */
 static void pm2fb_remove(struct pci_dev *pdev)
 {
@@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
 
 
 #ifndef MODULE
-/**
+/*
  * Parse user specified options.
  *
  * This is, comma-separated options following `video=pm2fb:'.
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index ce55b9d2e862bbfa8e4bc9a80d1806f9cbb23ccc..55554b0433cb4d73a14f96aac5d559187dae6adf 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -464,7 +464,7 @@ static inline void reverse_order(u32 *l)
 
 /**
  * rivafb_load_cursor_image - load cursor image to hardware
- * @data: address to monochrome bitmap (1 = foreground color, 0 = background)
+ * @data8: address to monochrome bitmap (1 = foreground color, 0 = background)
  * @par:  pointer to private data
  * @w:    width of cursor image in pixels
  * @h:    height of cursor image in scanlines
@@ -843,9 +843,9 @@ static void riva_update_var(struct fb_var_screeninfo *var,
 /**
  * rivafb_do_maximize - 
  * @info: pointer to fb_info object containing info for current riva board
- * @var:
- * @nom:
- * @den:
+ * @var: standard kernel fb changeable data
+ * @nom: nom
+ * @den: den
  *
  * DESCRIPTION:
  * .
@@ -1214,7 +1214,6 @@ static int rivafb_set_par(struct fb_info *info)
 /**
  * rivafb_pan_display
  * @var: standard kernel fb changeable data
- * @con: TODO
  * @info: pointer to fb_info object containing info for current riva board
  *
  * DESCRIPTION:
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index bcf9c4b4de311b2abe03ad144f9133e23b0380f8..8b829b7200642f56fd744e3505fc829ffb54b6bb 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -836,17 +836,17 @@ static void nv10CalcArbitration
     nv10_sim_state *arb
 )
 {
-    int data, pagemiss, cas,width, video_enable, bpp;
-    int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs;
-    int nvclk_fill, us_extra;
+    int data, pagemiss, width, video_enable, bpp;
+    int nvclks, mclks, pclks, vpagemiss, crtpagemiss;
+    int nvclk_fill;
     int found, mclk_extra, mclk_loop, cbs, m1;
     int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
-    int us_m, us_m_min, us_n, us_p, video_drain_rate, crtc_drain_rate;
-    int vus_m, vus_n, vus_p;
-    int vpm_us, us_video, vlwm, cpm_us, us_crt,clwm;
+    int us_m, us_m_min, us_n, us_p, crtc_drain_rate;
+    int vus_m;
+    int vpm_us, us_video, cpm_us, us_crt,clwm;
     int clwm_rnd_down;
-    int craw, m2us, us_pipe, us_pipe_min, vus_pipe, p1clk, p2;
-    int pclks_2_top_fifo, min_mclk_extra;
+    int m2us, us_pipe_min, p1clk, p2;
+    int min_mclk_extra;
     int us_min_mclk_extra;
 
     fifo->valid = 1;
@@ -854,16 +854,13 @@ static void nv10CalcArbitration
     mclk_freq = arb->mclk_khz;
     nvclk_freq = arb->nvclk_khz;
     pagemiss = arb->mem_page_miss;
-    cas = arb->mem_latency;
     width = arb->memory_width/64;
     video_enable = arb->enable_video;
     bpp = arb->pix_bpp;
     mp_enable = arb->enable_mp;
     clwm = 0;
-    vlwm = 1024;
 
     cbs = 512;
-    vbs = 512;
 
     pclks = 4; /* lwm detect. */
 
@@ -924,17 +921,11 @@ static void nv10CalcArbitration
       us_min_mclk_extra = min_mclk_extra *1000*1000 / mclk_freq;
       us_n = nvclks*1000*1000 / nvclk_freq;/* nvclk latency in us */
       us_p = pclks*1000*1000 / pclk_freq;/* nvclk latency in us */
-      us_pipe = us_m + us_n + us_p;
       us_pipe_min = us_m_min + us_n + us_p;
-      us_extra = 0;
 
       vus_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */
-      vus_n = (4)*1000*1000 / nvclk_freq;/* nvclk latency in us */
-      vus_p = 0*1000*1000 / pclk_freq;/* pclk latency in us */
-      vus_pipe = vus_m + vus_n + vus_p;
 
       if(video_enable) {
-        video_drain_rate = pclk_freq * 4; /* MB/s */
         crtc_drain_rate = pclk_freq * bpp/8; /* MB/s */
 
         vpagemiss = 1; /* self generating page miss */
@@ -993,7 +984,6 @@ static void nv10CalcArbitration
               else if(crtc_drain_rate * 100  >= nvclk_fill * 98) {
                   clwm = 1024;
                   cbs = 512;
-                  us_extra = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ;
               }
           }
       }
@@ -1010,7 +1000,6 @@ static void nv10CalcArbitration
 
       m1 = clwm + cbs -  1024; /* Amount of overfill */
       m2us = us_pipe_min + us_min_mclk_extra;
-      pclks_2_top_fifo = (1024-clwm)/(8*width);
 
       /* pclk cycles to drain */
       p1clk = m2us * pclk_freq/(1000*1000); 
@@ -1038,7 +1027,6 @@ static void nv10CalcArbitration
               min_mclk_extra--;
         }
       }
-      craw = clwm;
 
       if(clwm < (1024-cbs+8)) clwm = 1024-cbs+8;
       data = (int)(clwm);
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 4541afcf9386ec25d640772de014c02d3f4d2869..d1b5f965bc96666a352e450529a7a31ff797dd3b 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -45,7 +45,7 @@
 #if 0
 #define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
 #else
-#define dbg(fmt, args...) do { } while (0)
+#define dbg(fmt, args...) do { no_printk(KERN_INFO fmt, ## args); } while (0)
 #endif
 
 /*
@@ -512,7 +512,6 @@ s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
 }
 
 /**
- *
  *	s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
  *	@info : framebuffer structure
  *	@rect : fb_fillrect structure
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index ba316bd56efd7afabd5d1f050b6939acb9e64b1f..3b134e1bbc384b8e65f52767fe26e30bfdad5f93 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -75,6 +75,7 @@ struct s3c_fb;
  * @buf_size: Offset of buffer size registers.
  * @buf_end: Offset of buffer end registers.
  * @osd: The base for the OSD registers.
+ * @osd_stride: stride of osd
  * @palette: Address of palette memory, or 0 if none.
  * @has_prtcon: Set if has PRTCON register.
  * @has_shadowcon: Set if has SHADOWCON register.
@@ -155,7 +156,7 @@ struct s3c_fb_palette {
  * @windata: The platform data supplied for the window configuration.
  * @parent: The hardware that this window is part of.
  * @fbinfo: Pointer pack to the framebuffer info for this window.
- * @varint: The variant information for this window.
+ * @variant: The variant information for this window.
  * @palette_buffer: Buffer/cache to hold palette entries.
  * @pseudo_palette: For use in TRUECOLOUR modes for entries 0..15/
  * @index: The window number of this window.
@@ -336,7 +337,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
 /**
  * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
  * @sfb: The hardware state.
- * @pixclock: The pixel clock wanted, in picoseconds.
+ * @pixclk: The pixel clock wanted, in picoseconds.
  *
  * Given the specified pixel clock, work out the necessary divider to get
  * close to the output frequency.
@@ -733,7 +734,7 @@ static inline unsigned int chan_to_field(unsigned int chan,
  * @red: The red field for the palette data.
  * @green: The green field for the palette data.
  * @blue: The blue field for the palette data.
- * @trans: The transparency (alpha) field for the palette data.
+ * @transp: The transparency (alpha) field for the palette data.
  * @info: The framebuffer being changed.
  */
 static int s3c_fb_setcolreg(unsigned regno,
@@ -1133,6 +1134,7 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
 
 /**
  * s3c_fb_release_win() - release resources for a framebuffer window.
+ * @sfb: The base resources for the hardware.
  * @win: The window to cleanup the resources for.
  *
  * Release the resources that where claimed for the hardware window,
@@ -1160,6 +1162,7 @@ static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
 /**
  * s3c_fb_probe_win() - register an hardware window
  * @sfb: The base resources for the hardware
+ * @win_no: The window number
  * @variant: The variant information for this window.
  * @res: Pointer to where to place the resultant window.
  *
@@ -1170,7 +1173,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
 			    struct s3c_fb_win_variant *variant,
 			    struct s3c_fb_win **res)
 {
-	struct fb_var_screeninfo *var;
 	struct fb_videomode initmode;
 	struct s3c_fb_pd_win *windata;
 	struct s3c_fb_win *win;
@@ -1198,7 +1200,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
 
 	win = fbinfo->par;
 	*res = win;
-	var = &fbinfo->var;
 	win->variant = *variant;
 	win->fbinfo = fbinfo;
 	win->parent = sfb;
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index fde27feae5d0c29bfd581939e66a475ddcc515e4..b568c646a76c2273081f8ff2ded26847cef09aae 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -2648,7 +2648,7 @@ static void
 SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
 		unsigned short ModeIdIndex, unsigned short RRTI)
 {
-   unsigned short data, infoflag = 0, modeflag, resindex;
+   unsigned short data, infoflag = 0, modeflag;
 #ifdef CONFIG_FB_SIS_315
    unsigned char  *ROMAddr  = SiS_Pr->VirtualRomBase;
    unsigned short data2, data3;
@@ -2659,7 +2659,6 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
    if(SiS_Pr->UseCustomMode) {
       infoflag = SiS_Pr->CInfoFlag;
    } else {
-      resindex = SiS_GetResInfo(SiS_Pr, ModeNo, ModeIdIndex);
       if(ModeNo > 0x13) {
 	 infoflag = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag;
       }
@@ -3538,17 +3537,13 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
 			struct fb_var_screeninfo *var, bool writeres
 )
 {
-   unsigned short HRE, HBE, HRS, HBS, HDE, HT;
-   unsigned short VRE, VBE, VRS, VBS, VDE, VT;
-   unsigned char  sr_data, cr_data, cr_data2;
-   int            A, B, C, D, E, F, temp;
+   unsigned short HRE, HBE, HRS, HDE;
+   unsigned short VRE, VBE, VRS, VDE;
+   unsigned char  sr_data, cr_data;
+   int            B, C, D, E, F, temp;
 
    sr_data = crdata[14];
 
-   /* Horizontal total */
-   HT =  crdata[0] | ((unsigned short)(sr_data & 0x03) << 8);
-   A = HT + 5;
-
    /* Horizontal display enable end */
    HDE = crdata[1] | ((unsigned short)(sr_data & 0x0C) << 6);
    E = HDE + 1;
@@ -3557,9 +3552,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
    HRS = crdata[4] | ((unsigned short)(sr_data & 0xC0) << 2);
    F = HRS - E - 3;
 
-   /* Horizontal blank start */
-   HBS = crdata[2] | ((unsigned short)(sr_data & 0x30) << 4);
-
    sr_data = crdata[15];
    cr_data = crdata[5];
 
@@ -3588,13 +3580,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
    sr_data = crdata[13];
    cr_data = crdata[7];
 
-   /* Vertical total */
-   VT  = crdata[6] |
-	 ((unsigned short)(cr_data & 0x01) << 8) |
-	 ((unsigned short)(cr_data & 0x20) << 4) |
-	 ((unsigned short)(sr_data & 0x01) << 10);
-   A = VT + 2;
-
    /* Vertical display enable end */
    VDE = crdata[10] |
 	 ((unsigned short)(cr_data & 0x02) << 7) |
@@ -3609,14 +3594,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
 	 ((unsigned short)(sr_data & 0x08) << 7);
    F = VRS + 1 - E;
 
-   cr_data2 = (crdata[16] & 0x01) << 5;
-
-   /* Vertical blank start */
-   VBS = crdata[11] |
-	 ((unsigned short)(cr_data  & 0x08) << 5) |
-	 ((unsigned short)(cr_data2 & 0x20) << 4) |
-	 ((unsigned short)(sr_data  & 0x04) << 8);
-
    /* Vertical blank end */
    VBE = crdata[12] | ((unsigned short)(sr_data & 0x10) << 4);
    temp = VBE - ((E - 1) & 511);
diff --git a/drivers/video/fbdev/sis/oem310.h b/drivers/video/fbdev/sis/oem310.h
index 8fce56e4482c098253bd7fe679e1c3532f3da2dd..ed28755715ce313d0ff3e55c018114c6b3968069 100644
--- a/drivers/video/fbdev/sis/oem310.h
+++ b/drivers/video/fbdev/sis/oem310.h
@@ -200,6 +200,7 @@ static const unsigned char SiS310_TVDelayCompensation_651302LV[] =	/* M650, 651,
 	0x33,0x33
 };
 
+#if 0 /* Not used */
 static const unsigned char SiS_TVDelay661_301[] =			/* 661, 301 */
 {
 	0x44,0x44,
@@ -219,6 +220,7 @@ static const unsigned char SiS_TVDelay661_301B[] =			/* 661, 301B et al */
 	0x44,0x44,
 	0x44,0x44
 };
+#endif
 
 static const unsigned char SiS310_TVDelayCompensation_LVDS[] =		/* LVDS */
 {
diff --git a/drivers/video/fbdev/sis/sis.h b/drivers/video/fbdev/sis/sis.h
index 9f4c3093ccb36c988a1be46fb4c97e100886ef6d..d632f096083b35aff05e7e1ad4e5be6e63a21760 100644
--- a/drivers/video/fbdev/sis/sis.h
+++ b/drivers/video/fbdev/sis/sis.h
@@ -15,7 +15,6 @@
 
 #include "vgatypes.h"
 #include "vstruct.h"
-#include "init.h"
 
 #define VER_MAJOR		1
 #define VER_MINOR		8
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 03c736f6f3d086d2febcdd900b47d1a9171f185e..266a5582f94d3fcb29cd1811fd32cdfddc379029 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -5029,7 +5029,6 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
 	static const u8 cs168[8] = {
 		0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00
 	};
-	u8 reg;
 	u8 v1;
 	u8 v2;
 	u8 v3;
@@ -5037,9 +5036,9 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
 	SiS_SetReg(SISCR, 0xb0, 0x80); /* DDR2 dual frequency mode */
 	SiS_SetReg(SISCR, 0x82, 0x77);
 	SiS_SetReg(SISCR, 0x86, 0x00);
-	reg = SiS_GetReg(SISCR, 0x86);
+	SiS_GetReg(SISCR, 0x86);
 	SiS_SetReg(SISCR, 0x86, 0x88);
-	reg = SiS_GetReg(SISCR, 0x86);
+	SiS_GetReg(SISCR, 0x86);
 	v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb];
 	if (ivideo->haveXGIROM) {
 		v1 = bios[regb + 0x168];
@@ -5049,9 +5048,9 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
 	SiS_SetReg(SISCR, 0x86, v1);
 	SiS_SetReg(SISCR, 0x82, 0x77);
 	SiS_SetReg(SISCR, 0x85, 0x00);
-	reg = SiS_GetReg(SISCR, 0x85);
+	SiS_GetReg(SISCR, 0x85);
 	SiS_SetReg(SISCR, 0x85, 0x88);
-	reg = SiS_GetReg(SISCR, 0x85);
+	SiS_GetReg(SISCR, 0x85);
 	SiS_SetReg(SISCR, 0x85, v2);
 	SiS_SetReg(SISCR, 0x82, v3);
 	SiS_SetReg(SISCR, 0x98, 0x01);
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index c05cdabeb11c2d543cfe40821d75b57e446b594a..27d4b0ace2d61bcf2b3ec84dd081535449eee92f 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1390,7 +1390,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	        fix->smem_start, info->screen_base,
 	        fix->smem_len >> 20);
 
-	f_ddprintk("regbase_virt: %#lx\n", par->mmio_vbase);
+	f_ddprintk("regbase_virt: %p\n", par->mmio_vbase);
 	f_ddprintk("membase_phys: %#lx\n", fix->smem_start);
 	f_ddprintk("fbbase_virt: %p\n", info->screen_base);
 
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 34b2e5b6e84a13d68760b069892c4011b53f55b4..1638a40fed2254c74b2807812317942c182dcf2f 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -196,7 +196,7 @@ static int tcx_setcolreg(unsigned regno,
 
 /**
  *      tcx_blank - Optional function.  Blanks the display.
- *      @blank_mode: the blank mode we want.
+ *      @blank: the blank mode we want.
  *      @info: frame buffer structure that represents a single frame buffer
  */
 static int
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index f056d80f6359f2dfd82c57fb9e753367bdfa1e45..67e37a62b07c3fec02ac9b4518453d87c2ef746f 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -206,9 +206,7 @@ static inline u8 crt_inb(struct tdfx_par *par, u32 idx)
 
 static inline void att_outb(struct tdfx_par *par, u32 idx, u8 val)
 {
-	unsigned char tmp;
-
-	tmp = vga_inb(par, IS1_R);
+	vga_inb(par, IS1_R);
 	vga_outb(par, ATT_IW, idx);
 	vga_outb(par, ATT_IW, val);
 }
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 666fbe2f671c9358bb469737f2820779a88a1f1b..ae0cf55406369c44c22865ab8afdf41d13c4ae4f 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -555,7 +555,7 @@ tgafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
 
 /**
  *      tgafb_blank - Optional function.  Blanks the display.
- *      @blank_mode: the blank mode we want.
+ *      @blank: the blank mode we want.
  *      @info: frame buffer structure that represents a single frame buffer
  */
 static int
@@ -837,7 +837,7 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
 	u32 *palette = ((u32 *)info->pseudo_palette);
 	unsigned long pos, line_length, i, j;
 	const unsigned char *data;
-	void __iomem *regs_base, *fb_base;
+	void __iomem *fb_base;
 
 	dx = image->dx;
 	dy = image->dy;
@@ -855,7 +855,6 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
 	if (dy + height > vyres)
 		height = vyres - dy;
 
-	regs_base = par->tga_regs_base;
 	fb_base = par->tga_fb_base;
 
 	pos = dy * line_length + (dx * 4);
@@ -1034,7 +1033,7 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 		     regs_base + TGA_MODE_REG);
 }
 
-/**
+/*
  *      tgafb_copyarea - REQUIRED function. Can use generic routines if
  *                       non acclerated hardware and packed pixel based.
  *                       Copies on area of the screen to another area.
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index f9b3c1cb9530f3119cae58603e77548edcced963..b9cdd02c100095d99ef929571115f82c28290f29 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1017,6 +1017,7 @@ static void dlfb_ops_destroy(struct fb_info *info)
 	}
 	vfree(dlfb->backing_buffer);
 	kfree(dlfb->edid);
+	dlfb_free_urb_list(dlfb);
 	usb_put_dev(dlfb->udev);
 	kfree(dlfb);
 
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index def14ac0ebe14ce85ce9681bc648abe2251bb34e..4df6772802d78b1bd2ea1650f7ee6b16a719af4f 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -423,7 +423,7 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
 	task->t.flags = TF_VBEIB;
 	task->t.buf_len = sizeof(struct vbe_ib);
 	task->buf = &par->vbe_ib;
-	strncpy(par->vbe_ib.vbe_signature, "VBE2", 4);
+	memcpy(par->vbe_ib.vbe_signature, "VBE2", 4);
 
 	err = uvesafb_exec(task);
 	if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
@@ -560,6 +560,8 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
 	task->t.regs.eax = 0x4f0a;
 	task->t.regs.ebx = 0x0;
 	err = uvesafb_exec(task);
+	if (err)
+		return err;
 
 	if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
 		par->pmi_setpal = par->ypan = 0;
@@ -1871,7 +1873,7 @@ static ssize_t v86d_show(struct device_driver *dev, char *buf)
 static ssize_t v86d_store(struct device_driver *dev, const char *buf,
 		size_t count)
 {
-	strncpy(v86d_path, buf, PATH_MAX);
+	strncpy(v86d_path, buf, PATH_MAX - 1);
 	return count;
 }
 static DRIVER_ATTR_RW(v86d);
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 4a869402d120d37b1d2476f5e06d08baee1dcd96..088b962076b51e27a7724d96ee4c46082fda7b94 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -537,11 +537,9 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
 	u32 clock;
 	struct via_display_timing timing;
 	struct fb_var_screeninfo panel_var;
-	const struct fb_videomode *mode_crt_table, *panel_crt_table;
+	const struct fb_videomode *panel_crt_table;
 
 	DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n");
-	/* Get mode table */
-	mode_crt_table = viafb_get_best_mode(set_hres, set_vres, 60);
 	/* Get panel table Pointer */
 	panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60);
 	viafb_fill_var_timing_info(&panel_var, panel_crt_table);
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 2445cfe617a96eff06f00ffc0776674fde875c13..42255d27a1dbbe86b75ec2d4bc95a9aacc20bc85 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -11,6 +11,7 @@
 #include <linux/fb.h>
 #include <linux/platform_device.h>
 #include "core/fb_draw.h"
+#include "wmt_ge_rops.h"
 
 #define GE_COMMAND_OFF		0x00
 #define GE_DEPTH_OFF		0x04
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index abc9ada798ee86ceef950422fd50a8208ffa3be7..f93b6abbe258193a64e2bfaada6d6554196b2e22 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -52,6 +52,7 @@ static int parse_timing_property(const struct device_node *np, const char *name,
 /**
  * of_parse_display_timing - parse display_timing entry from device_node
  * @np: device_node with the properties
+ * @dt: display_timing that contains the result. I may be partially written in case of errors
  **/
 static int of_parse_display_timing(const struct device_node *np,
 		struct display_timing *dt)
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 67aff2421c29e86be71d9435215b4723df7b3f39..e7d10ffd3b6603a6e4ff81bb8eb53ff57d5c0475 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -14,9 +14,9 @@
 
 /**
  * of_get_videomode - get the videomode #<index> from devicetree
- * @np - devicenode with the display_timings
- * @vm - set to return value
- * @index - index into list of display_timings
+ * @np: devicenode with the display_timings
+ * @vm: set to return value
+ * @index: index into list of display_timings
  *	    (Set this to OF_USE_NATIVE_MODE to use whatever mode is
  *	     specified as native mode in the DT.)
  *
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 664e120b93e6042120a782fe14c217bc4ccd0e0e..f3136750c490270029d1d1e0cd63d76b4af8a24b 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -28,10 +28,6 @@ struct drm_agp_head {
 
 #if IS_ENABLED(CONFIG_AGP)
 
-void drm_free_agp(struct agp_memory * handle, int pages);
-int drm_bind_agp(struct agp_memory * handle, unsigned int start);
-int drm_unbind_agp(struct agp_memory * handle);
-
 struct drm_agp_head *drm_agp_init(struct drm_device *dev);
 void drm_legacy_agp_clear(struct drm_device *dev);
 int drm_agp_acquire(struct drm_device *dev);
@@ -61,20 +57,6 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 
 #else /* CONFIG_AGP */
 
-static inline void drm_free_agp(struct agp_memory * handle, int pages)
-{
-}
-
-static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
-{
-	return -ENODEV;
-}
-
-static inline int drm_unbind_agp(struct agp_memory * handle)
-{
-	return -ENODEV;
-}
-
 static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 {
 	return NULL;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 54e051a957dfc18d3254b9404d2cfd65a58021f9..ce7023e9115d789dd2ddc4f951a33531d6f2175d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -248,6 +248,26 @@ struct drm_private_state_funcs {
  *    drm_dev_register()
  * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling
  *    drm_dev_unregister()
+ *
+ * If that private object is used to store a state shared by multiple
+ * CRTCs, proper care must be taken to ensure that non-blocking commits are
+ * properly ordered to avoid a use-after-free issue.
+ *
+ * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two
+ * different &drm_crtc using different &drm_plane and &drm_connector, so with no
+ * resources shared, there's no guarantee on which commit is going to happen
+ * first. However, the second &drm_atomic_commit will consider the first
+ * &drm_private_obj its old state, and will be in charge of freeing it whenever
+ * the second &drm_atomic_commit is done.
+ *
+ * If the first &drm_atomic_commit happens after it, it will consider its
+ * &drm_private_obj the new state and will be likely to access it, resulting in
+ * an access to a freed memory region. Drivers should store (and get a reference
+ * to) the &drm_crtc_commit structure in our private state in
+ * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that
+ * commit to complete as the first step of
+ * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to
+ * drm_atomic_helper_wait_for_dependencies().
  */
 struct drm_private_obj {
 	/**
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 5f47720440fa67af4945bcfcdf739fcb02aab31f..4045e2507e11c438340f2ec61662fc5571c53a21 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -147,10 +147,6 @@ int drm_atomic_helper_page_flip_target(
 				uint32_t flags,
 				uint32_t target,
 				struct drm_modeset_acquire_ctx *ctx);
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
-				       u16 *red, u16 *green, u16 *blue,
-				       uint32_t size,
-				       struct drm_modeset_acquire_ctx *ctx);
 
 /**
  * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index fcdc58d8b88b2c4563bd7479a7a7695566feab40..1922b278ffadfdb185312b979f617b3f1f328286 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -175,6 +175,46 @@ struct drm_scdc {
 	struct drm_scrambling scrambling;
 };
 
+/**
+ * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink
+ *
+ * Describes the DSC support provided by HDMI 2.1 sink.
+ * The information is fetched fom additional HFVSDB blocks defined
+ * for HDMI 2.1.
+ */
+struct drm_hdmi_dsc_cap {
+	/** @v_1p2: flag for dsc1.2 version support by sink */
+	bool v_1p2;
+
+	/** @native_420: Does sink support DSC with 4:2:0 compression */
+	bool native_420;
+
+	/**
+	 * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2
+	 * compressed formats
+	 */
+	bool all_bpp;
+
+	/**
+	 * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc
+	 */
+	u8 bpc_supported;
+
+	/** @max_slices: maximum number of Horizontal slices supported by */
+	u8 max_slices;
+
+	/** @clk_per_slice : max pixel clock in MHz supported per slice */
+	int clk_per_slice;
+
+	/** @max_lanes : dsc max lanes supported for Fixed rate Link training */
+	u8 max_lanes;
+
+	/** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */
+	u8 max_frl_rate_per_lane;
+
+	/** @total_chunk_kbytes: max size of chunks in KBs supported per line*/
+	u8 total_chunk_kbytes;
+};
 
 /**
  * struct drm_hdmi_info - runtime information about the connected HDMI sink
@@ -207,6 +247,15 @@ struct drm_hdmi_info {
 
 	/** @y420_dc_modes: bitmap of deep color support index */
 	u8 y420_dc_modes;
+
+	/** @max_frl_rate_per_lane: support fixed rate link */
+	u8 max_frl_rate_per_lane;
+
+	/** @max_lanes: supported by sink */
+	u8 max_lanes;
+
+	/** @dsc_cap: DSC capabilities of the sink */
+	struct drm_hdmi_dsc_cap dsc_cap;
 };
 
 /**
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5f43d64d2a074225df6005917dae4ed5a2a71858..13eeba2a750af91f0102057c4ce4778812d15777 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1223,6 +1223,39 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
 			      const char *name, ...);
 void drm_crtc_cleanup(struct drm_crtc *crtc);
 
+__printf(7, 8)
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+				    size_t size, size_t offset,
+				    struct drm_plane *primary,
+				    struct drm_plane *cursor,
+				    const struct drm_crtc_funcs *funcs,
+				    const char *name, ...);
+
+/**
+ * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with
+ *    specified primary and cursor planes.
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_crtc
+ * @member: the name of the &drm_crtc within @type.
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Allocates and initializes a new crtc object. Cleanup is automatically
+ * handled through registering drmm_crtc_cleanup() with drmm_add_action().
+ *
+ * The @drm_crtc_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new crtc, or ERR_PTR on failure.
+ */
+#define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \
+	((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \
+					       offsetof(type, member), \
+					       primary, cursor, funcs, \
+					       name, ##__VA_ARGS__))
+
 /**
  * drm_crtc_index - find the index of a registered CRTC
  * @crtc: CRTC to find index for
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 283a93ce4617261470a43354a9466852ab149240..d647223e8390872afc4951a609547fa50ef6406d 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -51,13 +51,6 @@ enum switch_power_state {
  * may contain multiple heads.
  */
 struct drm_device {
-	/**
-	 * @legacy_dev_list:
-	 *
-	 * List of devices per driver for stealth attach cleanup
-	 */
-	struct list_head legacy_dev_list;
-
 	/** @if_version: Highest interface version set */
 	int if_version;
 
@@ -83,11 +76,7 @@ struct drm_device {
 	} managed;
 
 	/** @driver: DRM driver managing the device */
-#ifdef CONFIG_DRM_LEGACY
-	struct drm_driver *driver;
-#else
 	const struct drm_driver *driver;
-#endif
 
 	/**
 	 * @dev_private:
@@ -293,10 +282,6 @@ struct drm_device {
 	/** @pdev: PCI device structure */
 	struct pci_dev *pdev;
 
-#ifdef __alpha__
-	/** @hose: PCI hose, only used on ALPHA platforms. */
-	struct pci_controller *hose;
-#endif
 	/** @num_crtcs: Number of CRTCs on this device */
 	unsigned int num_crtcs;
 
@@ -336,6 +321,14 @@ struct drm_device {
 	/* Everything below here is for legacy driver, never use! */
 	/* private: */
 #if IS_ENABLED(CONFIG_DRM_LEGACY)
+	/* List of devices per driver for stealth attach cleanup */
+	struct list_head legacy_dev_list;
+
+#ifdef __alpha__
+	/** @hose: PCI hose, only used on ALPHA platforms. */
+	struct pci_controller *hose;
+#endif
+
 	/* Context handle management - linked list of context handles */
 	struct list_head ctxlist;
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 6b40258927bf588d8005bb4c39f1a50526d1b21b..edffd1dcca3ed88f4a932eb26304a21cfa21cbea 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -411,6 +411,17 @@ struct drm_device;
 # define DP_DS_10BPC		            1
 # define DP_DS_12BPC		            2
 # define DP_DS_16BPC		            3
+/* HDMI2.1 PCON FRL CONFIGURATION */
+# define DP_PCON_MAX_FRL_BW                 (7 << 2)
+# define DP_PCON_MAX_0GBPS                  (0 << 2)
+# define DP_PCON_MAX_9GBPS                  (1 << 2)
+# define DP_PCON_MAX_18GBPS                 (2 << 2)
+# define DP_PCON_MAX_24GBPS                 (3 << 2)
+# define DP_PCON_MAX_32GBPS                 (4 << 2)
+# define DP_PCON_MAX_40GBPS                 (5 << 2)
+# define DP_PCON_MAX_48GBPS                 (6 << 2)
+# define DP_PCON_SOURCE_CTL_MODE            (1 << 5)
+
 /* offset 3 for DVI */
 # define DP_DS_DVI_DUAL_LINK		    (1 << 1)
 # define DP_DS_DVI_HIGH_COLOR_DEPTH	    (1 << 2)
@@ -421,6 +432,17 @@ struct drm_device;
 # define DP_DS_HDMI_YCBCR444_TO_422_CONV    (1 << 3)
 # define DP_DS_HDMI_YCBCR444_TO_420_CONV    (1 << 4)
 
+/*
+ * VESA DP-to-HDMI PCON Specification adds caps for colorspace
+ * conversion in DFP cap DPCD 83h. Sec6.1 Table-3.
+ * Based on the available support the source can enable
+ * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2
+ * DPCD 3052h.
+ */
+# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV    (1 << 5)
+# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV    (1 << 6)
+# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV   (1 << 7)
+
 #define DP_MAX_DOWNSTREAM_PORTS		    0x10
 
 /* DP Forward error Correction Registers */
@@ -430,6 +452,84 @@ struct drm_device;
 # define DP_FEC_CORR_BLK_ERROR_COUNT_CAP    (1 << 2)
 # define DP_FEC_BIT_ERROR_COUNT_CAP	    (1 << 3)
 
+/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
+#define DP_PCON_DSC_ENCODER_CAP_SIZE        0xC	/* 0x9E - 0x92 */
+#define DP_PCON_DSC_ENCODER                 0x092
+# define DP_PCON_DSC_ENCODER_SUPPORTED      (1 << 0)
+# define DP_PCON_DSC_PPS_ENC_OVERRIDE       (1 << 1)
+
+/* DP-HDMI2.1 PCON DSC Version */
+#define DP_PCON_DSC_VERSION                 0x093
+# define DP_PCON_DSC_MAJOR_MASK		    (0xF << 0)
+# define DP_PCON_DSC_MINOR_MASK		    (0xF << 4)
+# define DP_PCON_DSC_MAJOR_SHIFT	    0
+# define DP_PCON_DSC_MINOR_SHIFT	    4
+
+/* DP-HDMI2.1 PCON DSC RC Buffer block size */
+#define DP_PCON_DSC_RC_BUF_BLK_INFO	    0x094
+# define DP_PCON_DSC_RC_BUF_BLK_SIZE	    (0x3 << 0)
+# define DP_PCON_DSC_RC_BUF_BLK_1KB	    0
+# define DP_PCON_DSC_RC_BUF_BLK_4KB	    1
+# define DP_PCON_DSC_RC_BUF_BLK_16KB	    2
+# define DP_PCON_DSC_RC_BUF_BLK_64KB	    3
+
+/* DP-HDMI2.1 PCON DSC RC Buffer size */
+#define DP_PCON_DSC_RC_BUF_SIZE		    0x095
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */
+#define DP_PCON_DSC_SLICE_CAP_1		    0x096
+# define DP_PCON_DSC_1_PER_DSC_ENC     (0x1 << 0)
+# define DP_PCON_DSC_2_PER_DSC_ENC     (0x1 << 1)
+# define DP_PCON_DSC_4_PER_DSC_ENC     (0x1 << 3)
+# define DP_PCON_DSC_6_PER_DSC_ENC     (0x1 << 4)
+# define DP_PCON_DSC_8_PER_DSC_ENC     (0x1 << 5)
+# define DP_PCON_DSC_10_PER_DSC_ENC    (0x1 << 6)
+# define DP_PCON_DSC_12_PER_DSC_ENC    (0x1 << 7)
+
+#define DP_PCON_DSC_BUF_BIT_DEPTH	    0x097
+# define DP_PCON_DSC_BIT_DEPTH_MASK	    (0xF << 0)
+# define DP_PCON_DSC_DEPTH_9_BITS	    0
+# define DP_PCON_DSC_DEPTH_10_BITS	    1
+# define DP_PCON_DSC_DEPTH_11_BITS	    2
+# define DP_PCON_DSC_DEPTH_12_BITS	    3
+# define DP_PCON_DSC_DEPTH_13_BITS	    4
+# define DP_PCON_DSC_DEPTH_14_BITS	    5
+# define DP_PCON_DSC_DEPTH_15_BITS	    6
+# define DP_PCON_DSC_DEPTH_16_BITS	    7
+# define DP_PCON_DSC_DEPTH_8_BITS	    8
+
+#define DP_PCON_DSC_BLOCK_PREDICTION	    0x098
+# define DP_PCON_DSC_BLOCK_PRED_SUPPORT	    (0x1 << 0)
+
+#define DP_PCON_DSC_ENC_COLOR_FMT_CAP	    0x099
+# define DP_PCON_DSC_ENC_RGB		    (0x1 << 0)
+# define DP_PCON_DSC_ENC_YUV444		    (0x1 << 1)
+# define DP_PCON_DSC_ENC_YUV422_S	    (0x1 << 2)
+# define DP_PCON_DSC_ENC_YUV422_N	    (0x1 << 3)
+# define DP_PCON_DSC_ENC_YUV420_N	    (0x1 << 4)
+
+#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP	    0x09A
+# define DP_PCON_DSC_ENC_8BPC		    (0x1 << 1)
+# define DP_PCON_DSC_ENC_10BPC		    (0x1 << 2)
+# define DP_PCON_DSC_ENC_12BPC		    (0x1 << 3)
+
+#define DP_PCON_DSC_MAX_SLICE_WIDTH	    0x09B
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */
+#define DP_PCON_DSC_SLICE_CAP_2             0x09C
+# define DP_PCON_DSC_16_PER_DSC_ENC	    (0x1 << 0)
+# define DP_PCON_DSC_20_PER_DSC_ENC         (0x1 << 1)
+# define DP_PCON_DSC_24_PER_DSC_ENC         (0x1 << 2)
+
+/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */
+#define DP_PCON_DSC_BPP_INCR		    0x09E
+# define DP_PCON_DSC_BPP_INCR_MASK	    (0x7 << 0)
+# define DP_PCON_DSC_ONE_16TH_BPP	    0
+# define DP_PCON_DSC_ONE_8TH_BPP	    1
+# define DP_PCON_DSC_ONE_4TH_BPP	    2
+# define DP_PCON_DSC_ONE_HALF_BPP	    3
+# define DP_PCON_DSC_ONE_BPP		    4
+
 /* DP Extended DSC Capabilities */
 #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0  0x0a0   /* DP 1.4a SCR */
 #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1  0x0a1
@@ -935,6 +1035,11 @@ struct drm_device;
 # define DP_CEC_IRQ                          (1 << 2)
 
 #define DP_LINK_SERVICE_IRQ_VECTOR_ESI0     0x2005   /* 1.2 */
+# define RX_CAP_CHANGED                      (1 << 0)
+# define LINK_STATUS_CHANGED                 (1 << 1)
+# define STREAM_STATUS_CHANGED               (1 << 2)
+# define HDMI_LINK_STATUS_CHANGED            (1 << 3)
+# define CONNECTED_OFF_ENTRY_REQUESTED       (1 << 4)
 
 #define DP_PSR_ERROR_STATUS                 0x2006  /* XXX 1.2? */
 # define DP_PSR_LINK_CRC_ERROR              (1 << 0)
@@ -1054,6 +1159,51 @@ struct drm_device;
 #define DP_CEC_TX_MESSAGE_BUFFER               0x3020
 #define DP_CEC_MESSAGE_BUFFER_LENGTH             0x10
 
+/* PCON CONFIGURE-1 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_1             0x305A
+# define DP_PCON_ENABLE_MAX_FRL_BW             (7 << 0)
+# define DP_PCON_ENABLE_MAX_BW_0GBPS	       0
+# define DP_PCON_ENABLE_MAX_BW_9GBPS	       1
+# define DP_PCON_ENABLE_MAX_BW_18GBPS	       2
+# define DP_PCON_ENABLE_MAX_BW_24GBPS	       3
+# define DP_PCON_ENABLE_MAX_BW_32GBPS	       4
+# define DP_PCON_ENABLE_MAX_BW_40GBPS	       5
+# define DP_PCON_ENABLE_MAX_BW_48GBPS	       6
+# define DP_PCON_ENABLE_SOURCE_CTL_MODE       (1 << 3)
+# define DP_PCON_ENABLE_CONCURRENT_LINK       (1 << 4)
+# define DP_PCON_ENABLE_LINK_FRL_MODE         (1 << 5)
+# define DP_PCON_ENABLE_HPD_READY	      (1 << 6)
+# define DP_PCON_ENABLE_HDMI_LINK             (1 << 7)
+
+/* PCON CONFIGURE-2 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_2            0x305B
+# define DP_PCON_MAX_LINK_BW_MASK             (0x3F << 0)
+# define DP_PCON_FRL_BW_MASK_9GBPS            (1 << 0)
+# define DP_PCON_FRL_BW_MASK_18GBPS           (1 << 1)
+# define DP_PCON_FRL_BW_MASK_24GBPS           (1 << 2)
+# define DP_PCON_FRL_BW_MASK_32GBPS           (1 << 3)
+# define DP_PCON_FRL_BW_MASK_40GBPS           (1 << 4)
+# define DP_PCON_FRL_BW_MASK_48GBPS           (1 << 5)
+# define DP_PCON_FRL_LINK_TRAIN_EXTENDED      (1 << 6)
+
+/* PCON HDMI LINK STATUS */
+#define DP_PCON_HDMI_TX_LINK_STATUS           0x303B
+# define DP_PCON_HDMI_TX_LINK_ACTIVE          (1 << 0)
+# define DP_PCON_FRL_READY		      (1 << 1)
+
+/* PCON HDMI POST FRL STATUS */
+#define DP_PCON_HDMI_POST_FRL_STATUS          0x3036
+# define DP_PCON_HDMI_LINK_MODE               (1 << 0)
+# define DP_PCON_HDMI_MODE_TMDS               0
+# define DP_PCON_HDMI_MODE_FRL                1
+# define DP_PCON_HDMI_FRL_TRAINED_BW          (0x3F << 1)
+# define DP_PCON_FRL_TRAINED_BW_9GBPS	      (1 << 1)
+# define DP_PCON_FRL_TRAINED_BW_18GBPS	      (1 << 2)
+# define DP_PCON_FRL_TRAINED_BW_24GBPS	      (1 << 3)
+# define DP_PCON_FRL_TRAINED_BW_32GBPS	      (1 << 4)
+# define DP_PCON_FRL_TRAINED_BW_40GBPS	      (1 << 5)
+# define DP_PCON_FRL_TRAINED_BW_48GBPS	      (1 << 6)
+
 #define DP_PROTOCOL_CONVERTER_CONTROL_0		0x3050 /* DP 1.3 */
 # define DP_HDMI_DVI_OUTPUT_CONFIG		(1 << 0) /* DP 1.3 */
 #define DP_PROTOCOL_CONVERTER_CONTROL_1		0x3051 /* DP 1.3 */
@@ -1063,6 +1213,48 @@ struct drm_device;
 # define DP_HDMI_FORCE_SCRAMBLING		(1 << 3) /* DP 1.4 */
 #define DP_PROTOCOL_CONVERTER_CONTROL_2		0x3052 /* DP 1.3 */
 # define DP_CONVERSION_TO_YCBCR422_ENABLE	(1 << 0) /* DP 1.3 */
+# define DP_PCON_ENABLE_DSC_ENCODER	        (1 << 1)
+# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK	(0x3 << 2)
+# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED      0
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS     1
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER     2
+# define DP_CONVERSION_RGB_YCBCR_MASK	       (7 << 4)
+# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE  (1 << 4)
+# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE  (1 << 5)
+# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6)
+
+/* PCON Downstream HDMI ERROR Status per Lane */
+#define DP_PCON_HDMI_ERROR_STATUS_LN0          0x3037
+#define DP_PCON_HDMI_ERROR_STATUS_LN1          0x3038
+#define DP_PCON_HDMI_ERROR_STATUS_LN2          0x3039
+#define DP_PCON_HDMI_ERROR_STATUS_LN3          0x303A
+# define DP_PCON_HDMI_ERROR_COUNT_MASK         (0x7 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS   (1 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS     (1 << 1)
+# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2)
+
+/* PCON HDMI CONFIG PPS Override Buffer
+ * Valid Offsets to be added to Base : 0-127
+ */
+#define DP_PCON_HDMI_PPS_OVERRIDE_BASE        0x3100
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice height
+ * Offset-0 8LSBs of the Slice height.
+ * Offset-1 8MSBs of the Slice height.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT    0x3180
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice width
+ * Offset-0 8LSBs of the Slice width.
+ * Offset-1 8MSBs of the Slice width.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH    0x3182
+
+/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel
+ * Offset-0 8LSBs of the bits_per_pixel.
+ * Offset-1 2MSBs of the bits_per_pixel.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_BPP	     0x3184
 
 /* HDCP 1.3 and HDCP 2.2 */
 #define DP_AUX_HDCP_BKSV		0x68000
@@ -1837,16 +2029,13 @@ struct drm_dp_desc {
 
 int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
 		     bool is_branch);
-u32 drm_dp_get_edid_quirks(const struct edid *edid);
 
 /**
  * enum drm_dp_quirk - Display Port sink/branch device specific quirks
  *
  * Display Port sink and branch devices in the wild have a variety of bugs, try
  * to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them. Note that because some devices have
- * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
- * drm_dp_get_edid_quirks().
+ * implement workarounds for them.
  */
 enum drm_dp_quirk {
 	/**
@@ -1878,16 +2067,6 @@ enum drm_dp_quirk {
 	 * The DSC caps can be read from the physical aux instead.
 	 */
 	DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
-	/**
-	 * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
-	 *
-	 * The device is telling the truth when it says that it uses DPCD
-	 * backlight controls, even if the system's firmware disagrees. This
-	 * quirk should be checked against both the ident and panel EDID.
-	 * When present, the driver should honor the DPCD backlight
-	 * capabilities advertised.
-	 */
-	DP_QUIRK_FORCE_DPCD_BACKLIGHT,
 	/**
 	 * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
 	 *
@@ -1900,16 +2079,14 @@ enum drm_dp_quirk {
 /**
  * drm_dp_has_quirk() - does the DP device have a specific quirk
  * @desc: Device descriptor filled by drm_dp_read_desc()
- * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
  * @quirk: Quirk to query for
  *
  * Return true if DP device identified by @desc has @quirk.
  */
 static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
-		 enum drm_dp_quirk quirk)
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
 {
-	return (desc->quirks | edid_quirks) & BIT(quirk);
+	return desc->quirks & BIT(quirk);
 }
 
 #ifdef CONFIG_DRM_DP_CEC
@@ -1967,4 +2144,30 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
 				struct drm_dp_phy_test_params *data);
 int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
 				struct drm_dp_phy_test_params *data, u8 dp_rev);
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			       const u8 port_cap[4]);
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+				bool concurrent_mode);
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+				bool extended_train_mode);
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
+
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux);
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask);
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+					   struct drm_connector *connector);
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux);
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]);
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]);
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+					       const u8 port_cap[4], u8 color_spc);
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 02787319246a9b80c83474bbe3fb971e88877b0d..827838e0a97e6c2d2f50ae92a2715a227919ce29 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -499,8 +499,6 @@ struct drm_driver {
 	/* Everything below here is for legacy driver, never use! */
 	/* private: */
 
-	/* List of devices hanging off this driver with stealth attach. */
-	struct list_head legacy_dev_list;
 	int (*firstopen) (struct drm_device *);
 	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
 	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
index 53c51231b31c40b10b6d3768a10129e586dd58d1..cf43561e60fade84fc9059f80fe63e9f9c7ac811 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/drm_dsc.h
@@ -603,6 +603,7 @@ struct drm_dsc_pps_infoframe {
 } __packed;
 
 void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
 void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
 				const struct drm_dsc_config *dsc_cfg);
 int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index e97daf6ffbb193a74505026492b9f29825ee365d..a158f585f658b4a89ff06f840ad6ea30ae3a70e8 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -229,6 +229,36 @@ struct detailed_timing {
 				    DRM_EDID_YCBCR420_DC_36 | \
 				    DRM_EDID_YCBCR420_DC_30)
 
+/* HDMI 2.1 additional fields */
+#define DRM_EDID_MAX_FRL_RATE_MASK		0xf0
+#define DRM_EDID_FAPA_START_LOCATION		(1 << 0)
+#define DRM_EDID_ALLM				(1 << 1)
+#define DRM_EDID_FVA				(1 << 2)
+
+/* Deep Color specific */
+#define DRM_EDID_DC_30BIT_420			(1 << 0)
+#define DRM_EDID_DC_36BIT_420			(1 << 1)
+#define DRM_EDID_DC_48BIT_420			(1 << 2)
+
+/* VRR specific */
+#define DRM_EDID_CNMVRR				(1 << 3)
+#define DRM_EDID_CINEMA_VRR			(1 << 4)
+#define DRM_EDID_MDELTA				(1 << 5)
+#define DRM_EDID_VRR_MAX_UPPER_MASK		0xc0
+#define DRM_EDID_VRR_MAX_LOWER_MASK		0xff
+#define DRM_EDID_VRR_MIN_MASK			0x3f
+
+/* DSC specific */
+#define DRM_EDID_DSC_10BPC			(1 << 0)
+#define DRM_EDID_DSC_12BPC			(1 << 1)
+#define DRM_EDID_DSC_16BPC			(1 << 2)
+#define DRM_EDID_DSC_ALL_BPP			(1 << 3)
+#define DRM_EDID_DSC_NATIVE_420			(1 << 6)
+#define DRM_EDID_DSC_1P2			(1 << 7)
+#define DRM_EDID_DSC_MAX_FRL_RATE_MASK		0xf0
+#define DRM_EDID_DSC_MAX_SLICES			0xf
+#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES		0x3f
+
 /* ELD Header Block */
 #define DRM_ELD_HEADER_BLOCK_SIZE	4
 
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5dfa5f7a80a7d8d72402f48ccbc0afd2373f273a..5bf78b5bcb2b7d1d6a507680dabf78d6d8f31311 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -89,7 +89,7 @@ struct drm_encoder_funcs {
  * @head: list management
  * @base: base KMS object
  * @name: human readable name, can be overwritten by the driver
- * @funcs: control functions
+ * @funcs: control functions, can be NULL for simple managed encoders
  * @helper_private: mid-layer private data
  *
  * CRTCs drive pixels to encoders, which convert them into signals
@@ -194,6 +194,36 @@ int drm_encoder_init(struct drm_device *dev,
 		     const struct drm_encoder_funcs *funcs,
 		     int encoder_type, const char *name, ...);
 
+__printf(6, 7)
+void *__drmm_encoder_alloc(struct drm_device *dev,
+			   size_t size, size_t offset,
+			   const struct drm_encoder_funcs *funcs,
+			   int encoder_type,
+			   const char *name, ...);
+
+/**
+ * drmm_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Allocates and initializes an encoder. Encoder should be subclassed as part of
+ * driver encoder objects. Cleanup is automatically handled through registering
+ * drm_encoder_cleanup() with drmm_add_action().
+ *
+ * The @drm_encoder_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \
+	((type *)__drmm_encoder_alloc(dev, sizeof(type), \
+				      offsetof(type, member), funcs, \
+				      encoder_type, name, ##__VA_ARGS__))
+
 /**
  * drm_encoder_index - find the index of a registered encoder
  * @encoder: encoder to find index for
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 5e6daa1c982fe9a37cef9ed9fe9a475745fd0535..240049566592d1b053d2c500f9d11792c0dbe951 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -416,8 +416,5 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 				     bool write);
 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 			    u32 handle, u64 *offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
-			 struct drm_device *dev,
-			 uint32_t handle);
 
 #endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 5605c1b8f7790710c0b24bf5a6b9dd8f0c302caa..0a9711caa3e88877741ae6be87803b93d1d78d43 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -59,7 +59,7 @@ struct drm_gem_cma_object {
 		.poll		= drm_poll,\
 		.read		= drm_read,\
 		.llseek		= noop_llseek,\
-		.mmap		= drm_gem_cma_mmap,\
+		.mmap		= drm_gem_mmap,\
 		DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
 	}
 
@@ -76,9 +76,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
 			    struct drm_device *drm,
 			    struct drm_mode_create_dumb *args);
 
-/* set vm_flags and we can change the VM attribute to other one at here */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-
 /* allocate physical memory */
 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
 					      size_t size);
@@ -96,14 +93,13 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
 void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
 			    const struct drm_gem_object *obj);
 
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *
 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
 				  struct dma_buf_attachment *attach,
 				  struct sg_table *sgt);
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
-			   struct vm_area_struct *vma);
-int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 
 /**
  * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
@@ -123,7 +119,7 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
 	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
-	.gem_prime_mmap		= drm_gem_cma_prime_mmap
+	.gem_prime_mmap		= drm_gem_prime_mmap
 
 /**
  * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index fe58dbb46962ac09f980cfc3078e3167fe5643d9..ac22c246542af149aaf4108a0b211633fcd0a91f 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -101,11 +101,11 @@
 
 /* Following Macros take a byte at a time for bit(s) masking */
 /*
- * TODO: This has to be changed for DP MST, as multiple stream on
- * same port is possible.
- * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ * TODO: HDCP_2_2_MAX_CONTENT_STREAMS_CNT is based upon actual
+ * H/W MST streams capacity.
+ * This required to be moved out to platform specific header.
  */
-#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT	1
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT	4
 #define HDCP_2_2_TXCAP_MASK_LEN			2
 #define HDCP_2_2_RXCAPS_LEN			3
 #define HDCP_2_2_RX_REPEATER(x)			((x) & BIT(0))
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index d77f6e65b1c610073dfb579329e961bd18842965..631b22f9757dd9cc982f131c5a407576ef7c0464 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -28,5 +28,5 @@ struct drm_device;
 
 int drm_irq_install(struct drm_device *dev, int irq);
 int drm_irq_uninstall(struct drm_device *dev);
-
+int devm_drm_irq_install(struct drm_device *dev, int irq);
 #endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 852d7451eeb12943d9be4f77126d8a043df8f5fe..8ed04e9be997000be0837e4af3514b6abda384a9 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -198,8 +198,10 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
 				     size_t align);
 void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
 
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+int drm_legacy_pci_init(const struct drm_driver *driver,
+			struct pci_driver *pdriver);
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+			 struct pci_driver *pdriver);
 
 #else
 
@@ -214,13 +216,13 @@ static inline void drm_pci_free(struct drm_device *dev,
 {
 }
 
-static inline int drm_legacy_pci_init(struct drm_driver *driver,
+static inline int drm_legacy_pci_init(const struct drm_driver *driver,
 				      struct pci_driver *pdriver)
 {
 	return -EINVAL;
 }
 
-static inline void drm_legacy_pci_exit(struct drm_driver *driver,
+static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
 				       struct pci_driver *pdriver)
 {
 }
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index ca4114633bf943d3f2bb33d659f0dc55459714ce..b45c6fbf53ac4923fbf20dff794fde3333b0159e 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -44,8 +44,6 @@ int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
 					    drmres_release_t action,
 					    void *data, const char *name);
 
-void drmm_add_final_kfree(struct drm_device *dev, void *container);
-
 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
 
 /**
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index c2827ceaba0d2b4bd6829e75a9f18b5fd5a8888f..f543d6e3e822c2921848981126eb6119377987e3 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -172,7 +172,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
  * mipi_dbi_command - MIPI DCS command with optional parameter(s)
  * @dbi: MIPI DBI structure
  * @cmd: Command
- * @seq...: Optional parameter(s)
+ * @seq: Optional parameter(s)
  *
  * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
  * get/read.
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index a0d79d1c51e2fa47eced58e142d9a19a276b4c6c..29ba4adf0c536b5bd5778667c72b749bc3e2082c 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -461,9 +461,19 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
 void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
 				   struct videomode *vm);
 void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
+
+#if defined(CONFIG_OF)
 int of_get_drm_display_mode(struct device_node *np,
 			    struct drm_display_mode *dmode, u32 *bus_flags,
 			    int index);
+#else
+static inline int of_get_drm_display_mode(struct device_node *np,
+					  struct drm_display_mode *dmode,
+					  u32 *bus_flags, int index)
+{
+	return -EINVAL;
+}
+#endif
 
 void drm_mode_set_name(struct drm_display_mode *mode);
 int drm_mode_vrefresh(const struct drm_display_mode *mode);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 16ff3fa148f5ce42daaab7db457549aa6819d6bd..eb706342861d4dd8eb2a227d5f1407f019895cb7 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -866,13 +866,19 @@ struct drm_connector_helper_funcs {
 	 * The usual way to implement this is to cache the EDID retrieved in the
 	 * probe callback somewhere in the driver-private connector structure.
 	 * In this function drivers then parse the modes in the EDID and add
-	 * them by calling drm_add_edid_modes(). But connectors that driver a
+	 * them by calling drm_add_edid_modes(). But connectors that drive a
 	 * fixed panel can also manually add specific modes using
 	 * drm_mode_probed_add(). Drivers which manually add modes should also
 	 * make sure that the &drm_connector.display_info,
 	 * &drm_connector.width_mm and &drm_connector.height_mm fields are
 	 * filled in.
 	 *
+	 * Note that the caller function will automatically add standard VESA
+	 * DMT modes up to 1024x768 if the .get_modes() helper operation returns
+	 * no mode and if the connector status is connector_status_connected or
+	 * connector_status_unknown. There is no need to call
+	 * drm_add_modes_noedid() manually in that case.
+	 *
 	 * Virtual drivers that just want some standard VESA mode with a given
 	 * resolution can call drm_add_modes_noedid(), and mark the preferred
 	 * one using drm_set_preferred_mode().
@@ -1395,6 +1401,27 @@ struct drm_mode_config_helper_funcs {
 	 * drm_atomic_helper_commit_tail().
 	 */
 	void (*atomic_commit_tail)(struct drm_atomic_state *state);
+
+	/**
+	 * @atomic_commit_setup:
+	 *
+	 * This hook is used by the default atomic_commit() hook implemented in
+	 * drm_atomic_helper_commit() together with the nonblocking helpers (see
+	 * drm_atomic_helper_setup_commit()) to extend the DRM commit setup. It
+	 * is not used by the atomic helpers.
+	 *
+	 * This function is called at the end of
+	 * drm_atomic_helper_setup_commit(), so once the commit has been
+	 * properly setup across the generic DRM object states. It allows
+	 * drivers to do some additional commit tracking that isn't related to a
+	 * CRTC, plane or connector, tracked in a &drm_private_obj structure.
+	 *
+	 * Note that the documentation of &drm_private_obj has more details on
+	 * how one should implement this.
+	 *
+	 * This hook is optional.
+	 */
+	int (*atomic_commit_setup)(struct drm_atomic_state *state);
 };
 
 #endif
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 1d82b264e5e481d9d2b8dac8c3008fb05979feee..8ef06ee1c8ebdd59c08dcd441156cda0aebcd14f 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -764,6 +764,48 @@ int drm_plane_init(struct drm_device *dev,
 		   bool is_primary);
 void drm_plane_cleanup(struct drm_plane *plane);
 
+__printf(10, 11)
+void *__drmm_universal_plane_alloc(struct drm_device *dev,
+				   size_t size, size_t offset,
+				   uint32_t possible_crtcs,
+				   const struct drm_plane_funcs *funcs,
+				   const uint32_t *formats,
+				   unsigned int format_count,
+				   const uint64_t *format_modifiers,
+				   enum drm_plane_type plane_type,
+				   const char *name, ...);
+
+/**
+ * drmm_universal_plane_alloc - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ *                    DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. Cleanup is
+ * automatically handled through registering drm_plane_cleanup() with
+ * drmm_add_action().
+ *
+ * The @drm_plane_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+				   format_count, format_modifiers, plane_type, name, ...) \
+	((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \
+					      offsetof(type, member), \
+					      possible_crtcs, funcs, formats, \
+					      format_count, format_modifiers, \
+					      plane_type, name, ##__VA_ARGS__))
+
 /**
  * drm_plane_index - find the index of a registered plane
  * @plane: plane to find index for
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 0991a47a1567014f3f133e05d0215901055841ca..54f2c58305d2b95a758684514683b9219909d9b2 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -105,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
 
 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
 
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
-				     dma_addr_t *addrs, int max_pages);
-
+int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages,
+			       int max_pages);
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+				   int max_pages);
 
 #endif /* __DRM_PRIME_H__ */
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 4a0a80d658c723db7d8ff5d71627e2f5d32c2765..bbf5c1fdd7b0aa0a34a13b8dc6f98f77a7abae51 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -114,7 +114,7 @@ struct drm_property {
 	 *     by the property. Bitmask properties are created using
 	 *     drm_property_create_bitmask().
 	 *
-	 * DRM_MODE_PROB_OBJECT
+	 * DRM_MODE_PROP_OBJECT
 	 *     Object properties are used to link modeset objects. This is used
 	 *     extensively in the atomic support to create the display pipeline,
 	 *     by linking &drm_framebuffer to &drm_plane, &drm_plane to
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index e7f4d24cdd00524c0c992d86dab73168ca84c689..39f2deee709c2669b736d527bb7b8463dcfcd2cd 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -206,6 +206,19 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
 		r1->y1 == r2->y1 && r1->y2 == r2->y2;
 }
 
+/**
+ * drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form.
+ * @dst: rect to be stored the converted value
+ * @src: rect in 16.16 fixed point form
+ */
+static inline void drm_rect_fp_to_int(struct drm_rect *dst,
+				      const struct drm_rect *src)
+{
+	drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16,
+		      drm_rect_width(src) >> 16,
+		      drm_rect_height(src) >> 16);
+}
+
 bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
 bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
 			  const struct drm_rect *clip);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index a026375464ff2be55acf6e11cc8c60e2a9b514b0..e6dbf3161c2fea32b83cdb6921d640759b61a401 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -185,4 +185,28 @@ int drm_simple_encoder_init(struct drm_device *dev,
 			    struct drm_encoder *encoder,
 			    int encoder_type);
 
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+				  size_t offset, int encoder_type);
+
+/**
+ * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic
+ *                             functionality.
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type.
+ * @encoder_type: user visible type of the encoder
+ *
+ * Allocates and initializes an encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * Cleanup is automatically handled through registering drm_encoder_cleanup()
+ * with drmm_add_action().
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \
+	((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \
+					     offsetof(type, member), \
+					     encoder_type))
+
 #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 92436553fd6a8124f25be33a49df7c5bb4881e4b..975e8a67947f01cf9c91cba0c7efd94af2069eba 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -171,10 +171,10 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
  * struct drm_sched_job - A job to be run by an entity.
  *
  * @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @list: a job participates in a "pending" and "done" lists.
  * @sched: the scheduler instance on which this job is scheduled.
  * @s_fence: contains the fences for the scheduling of job.
  * @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
  * @id: a unique id assigned to each job scheduled on the scheduler.
  * @karma: increment on every hang caused by this job. If this exceeds the hang
  *         limit of the scheduler then the job is marked guilty and will not
@@ -189,21 +189,21 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
  */
 struct drm_sched_job {
 	struct spsc_node		queue_node;
+	struct list_head		list;
 	struct drm_gpu_scheduler	*sched;
 	struct drm_sched_fence		*s_fence;
 	struct dma_fence_cb		finish_cb;
-	struct list_head		node;
 	uint64_t			id;
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
-	struct drm_sched_entity  *entity;
+	struct drm_sched_entity         *entity;
 	struct dma_fence_cb		cb;
 };
 
 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 					    int threshold)
 {
-	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+	return s_job && atomic_inc_return(&s_job->karma) > threshold;
 }
 
 /**
@@ -260,8 +260,8 @@ struct drm_sched_backend_ops {
  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
  *            timeout interval is over.
  * @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
  * @score: score to help loadbalancer pick a idle sched
@@ -282,7 +282,7 @@ struct drm_gpu_scheduler {
 	atomic64_t			job_id_count;
 	struct delayed_work		work_tdr;
 	struct task_struct		*thread;
-	struct list_head		ring_mirror_list;
+	struct list_head		pending_list;
 	spinlock_t			job_list_lock;
 	int				hang_limit;
 	atomic_t                        score;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2564e66e67d747cfb07bb6b0b5e3cd4a6f4fe7f6..e17be324d95f5d47aa00af1242192febdbc63dff 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -125,7 +125,6 @@ struct ttm_buffer_object {
 	struct ttm_bo_device *bdev;
 	enum ttm_bo_type type;
 	void (*destroy) (struct ttm_buffer_object *);
-	unsigned long num_pages;
 	size_t acc_size;
 
 	/**
@@ -310,6 +309,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
  * ttm_bo_move_to_lru_tail
  *
  * @bo: The buffer object.
+ * @mem: Resource object.
  * @bulk: optional bulk move structure to remember BO positions
  *
  * Move this BO to the tail of all lru lists used to lookup and reserve an
@@ -317,6 +317,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
  * held, and is used to make a BO less likely to be considered for eviction.
  */
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+			     struct ttm_resource *mem,
 			     struct ttm_lru_bulk_move *bulk);
 
 /**
@@ -397,13 +398,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
 
 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct ttm_buffer_object *bo,
-			 unsigned long size,
-			 enum ttm_bo_type type,
+			 size_t size, enum ttm_bo_type type,
 			 struct ttm_placement *placement,
 			 uint32_t page_alignment,
 			 struct ttm_operation_ctx *ctx,
-			 size_t acc_size,
-			 struct sg_table *sg,
+			 size_t acc_size, struct sg_table *sg,
 			 struct dma_resv *resv,
 			 void (*destroy) (struct ttm_buffer_object *));
 
@@ -445,7 +444,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
  */
 int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
-		unsigned long size, enum ttm_bo_type type,
+		size_t size, enum ttm_bo_type type,
 		struct ttm_placement *placement,
 		uint32_t page_alignment, bool interrubtible, size_t acc_size,
 		struct sg_table *sg, struct dma_resv *resv,
@@ -600,6 +599,7 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
 static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
 {
 	dma_resv_assert_held(bo->base.resv);
+	WARN_ON_ONCE(!kref_read(&bo->kref));
 	++bo->pin_count;
 }
 
@@ -613,6 +613,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
 {
 	dma_resv_assert_held(bo->base.resv);
 	WARN_ON_ONCE(!bo->pin_count);
+	WARN_ON_ONCE(!kref_read(&bo->kref));
 	--bo->pin_count;
 }
 
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index f02f7cf9ae9063b195602c0a1af456e24e1a886e..423348414c59b82dd253a013300ccd39a228e9bc 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -39,7 +39,6 @@
 
 #include "ttm_bo_api.h"
 #include "ttm_memory.h"
-#include "ttm_module.h"
 #include "ttm_placement.h"
 #include "ttm_tt.h"
 #include "ttm_pool.h"
@@ -492,10 +491,11 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 	return 0;
 }
 
-static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
 {
 	spin_lock(&ttm_bo_glob.lru_lock);
-	ttm_bo_move_to_lru_tail(bo, NULL);
+	ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
 	spin_unlock(&ttm_bo_glob.lru_lock);
 }
 
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index f48a70d39ac56c1b106654bd28dd8f49deb58dd1..da0ed7e8c9155bdce58d25c95aaadc19861d1c25 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -171,7 +171,6 @@ struct ttm_bus_placement {
 struct ttm_resource {
 	void *mm_node;
 	unsigned long start;
-	unsigned long size;
 	unsigned long num_pages;
 	uint32_t page_alignment;
 	uint32_t mem_type;
@@ -191,6 +190,10 @@ struct ttm_resource {
 static inline void
 ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
 {
+	int i;
+
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; i++)
+		WARN_ON(!list_empty(&man->lru[i]));
 	man->use_type = used;
 }
 
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index da27e9d8fa64ee509c7a5a208e69a86dbaf51ad0..6c8eb9a4de81ad2ae505065301d01c9fb8c7e70b 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -99,8 +99,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
  */
 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
 		uint32_t page_flags, enum ttm_caching caching);
-int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
-		    uint32_t page_flags, enum ttm_caching caching);
 int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
 		   uint32_t page_flags, enum ttm_caching caching);
 
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index cf72699cb2bc89c0218c4b87cae61967ba44a15e..efdc56b9d95f4d6d9dccc0885c08535a4711a760 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -85,14 +85,16 @@ struct dma_buf_ops {
 	/**
 	 * @pin:
 	 *
-	 * This is called by dma_buf_pin and lets the exporter know that the
-	 * DMA-buf can't be moved any more.
+	 * This is called by dma_buf_pin() and lets the exporter know that the
+	 * DMA-buf can't be moved any more. The exporter should pin the buffer
+	 * into system memory to make sure it is generally accessible by other
+	 * devices.
 	 *
-	 * This is called with the dmabuf->resv object locked and is mutual
+	 * This is called with the &dmabuf.resv object locked and is mutual
 	 * exclusive with @cache_sgt_mapping.
 	 *
-	 * This callback is optional and should only be used in limited use
-	 * cases like scanout and not for temporary pin operations.
+	 * This is called automatically for non-dynamic importers from
+	 * dma_buf_attach().
 	 *
 	 * Returns:
 	 *
@@ -103,7 +105,7 @@ struct dma_buf_ops {
 	/**
 	 * @unpin:
 	 *
-	 * This is called by dma_buf_unpin and lets the exporter know that the
+	 * This is called by dma_buf_unpin() and lets the exporter know that the
 	 * DMA-buf can be moved again.
 	 *
 	 * This is called with the dmabuf->resv object locked and is mutual
@@ -152,6 +154,12 @@ struct dma_buf_ops {
 	 * On failure, returns a negative error value wrapped into a pointer.
 	 * May also return -EINTR when a signal was received while being
 	 * blocked.
+	 *
+	 * Note that exporters should not try to cache the scatter list, or
+	 * return the same one for multiple calls. Caching is done either by the
+	 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+	 * of the scatter list is transferred to the caller, and returned by
+	 * @unmap_dma_buf.
 	 */
 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
 					 enum dma_data_direction);
@@ -183,24 +191,19 @@ struct dma_buf_ops {
 	 * @begin_cpu_access:
 	 *
 	 * This is called from dma_buf_begin_cpu_access() and allows the
-	 * exporter to ensure that the memory is actually available for cpu
-	 * access - the exporter might need to allocate or swap-in and pin the
-	 * backing storage. The exporter also needs to ensure that cpu access is
-	 * coherent for the access direction. The direction can be used by the
-	 * exporter to optimize the cache flushing, i.e. access with a different
+	 * exporter to ensure that the memory is actually coherent for cpu
+	 * access. The exporter also needs to ensure that cpu access is coherent
+	 * for the access direction. The direction can be used by the exporter
+	 * to optimize the cache flushing, i.e. access with a different
 	 * direction (read instead of write) might return stale or even bogus
 	 * data (e.g. when the exporter needs to copy the data to temporary
 	 * storage).
 	 *
-	 * This callback is optional.
+	 * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+	 * command for userspace mappings established through @mmap, and also
+	 * for kernel mappings established with @vmap.
 	 *
-	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
-	 * from userspace (where storage shouldn't be pinned to avoid handing
-	 * de-factor mlock rights to userspace) and for the kernel-internal
-	 * users of the various kmap interfaces, where the backing storage must
-	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
-	 * there's no in-kernel users of the kmap interfaces yet this isn't a
-	 * real problem.
+	 * This callback is optional.
 	 *
 	 * Returns:
 	 *
@@ -216,9 +219,7 @@ struct dma_buf_ops {
 	 *
 	 * This is called from dma_buf_end_cpu_access() when the importer is
 	 * done accessing the CPU. The exporter can use this to flush caches and
-	 * unpin any resources pinned in @begin_cpu_access.
-	 * The result of any dma_buf kmap calls after end_cpu_access is
-	 * undefined.
+	 * undo anything else done in @begin_cpu_access.
 	 *
 	 * This callback is optional.
 	 *
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b32126d26997e2eae6b53543cf40f9fd97f4db8b..53f4904ee83d784b939c5c2f179b751876b3be2a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1232,6 +1232,15 @@ void pci_update_resource(struct pci_dev *dev, int resno);
 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
 void pci_release_resource(struct pci_dev *dev, int resno);
+static inline int pci_rebar_bytes_to_size(u64 bytes)
+{
+	bytes = roundup_pow_of_two(bytes);
+
+	/* Return BAR size as defined in the resizable BAR specification */
+	return max(ilog2(bytes), 20) - 20;
+}
+
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
 bool pci_device_is_present(struct pci_dev *pdev);
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
new file mode 100644
index 0000000000000000000000000000000000000000..6fe4ffbde290a1438a36f52462f8463289e7f3bf
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef MTK_MUTEX_H
+#define MTK_MUTEX_H
+
+struct regmap;
+struct device;
+struct mtk_mutex;
+
+struct mtk_mutex *mtk_mutex_get(struct device *dev);
+int mtk_mutex_prepare(struct mtk_mutex *mutex);
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+			enum mtk_ddp_comp_id id);
+void mtk_mutex_enable(struct mtk_mutex *mutex);
+void mtk_mutex_disable(struct mtk_mutex *mutex);
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+			   enum mtk_ddp_comp_id id);
+void mtk_mutex_unprepare(struct mtk_mutex *mutex);
+void mtk_mutex_put(struct mtk_mutex *mutex);
+void mtk_mutex_acquire(struct mtk_mutex *mutex);
+void mtk_mutex_release(struct mtk_mutex *mutex);
+
+#endif /* MTK_MUTEX_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 977caf96c8d2a389a00f86573338eb2af414c86c..fc6dfeba04a5ed4e2bae354e74dc2f246846740a 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -121,9 +121,9 @@ extern struct pci_dev *vga_default_device(void);
 extern void vga_set_default_device(struct pci_dev *pdev);
 extern int vga_remove_vgacon(struct pci_dev *pdev);
 #else
-static inline struct pci_dev *vga_default_device(void) { return NULL; };
-static inline void vga_set_default_device(struct pci_dev *pdev) { };
-static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; };
+static inline struct pci_dev *vga_default_device(void) { return NULL; }
+static inline void vga_set_default_device(struct pci_dev *pdev) { }
+static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }
 #endif
 
 /*
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 808b48a93330bad83759a3cd81331f812296a12a..0827037c54847898c6771146c5ac285d57849ede 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
  * Header for the Direct Rendering Manager
  *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
  *
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
  */
 
 /*
@@ -85,7 +84,7 @@ typedef unsigned int drm_context_t;
 typedef unsigned int drm_drawable_t;
 typedef unsigned int drm_magic_t;
 
-/**
+/*
  * Cliprect.
  *
  * \warning: If you change this structure, make sure you change
@@ -101,7 +100,7 @@ struct drm_clip_rect {
 	unsigned short y2;
 };
 
-/**
+/*
  * Drawable information.
  */
 struct drm_drawable_info {
@@ -109,7 +108,7 @@ struct drm_drawable_info {
 	struct drm_clip_rect *rects;
 };
 
-/**
+/*
  * Texture region,
  */
 struct drm_tex_region {
@@ -120,7 +119,7 @@ struct drm_tex_region {
 	unsigned int age;
 };
 
-/**
+/*
  * Hardware lock.
  *
  * The lock structure is a simple cache-line aligned integer.  To avoid
@@ -132,7 +131,7 @@ struct drm_hw_lock {
 	char padding[60];			/**< Pad to cache line */
 };
 
-/**
+/*
  * DRM_IOCTL_VERSION ioctl argument type.
  *
  * \sa drmGetVersion().
@@ -149,7 +148,7 @@ struct drm_version {
 	char __user *desc;	  /**< User-space buffer to hold desc */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
  *
  * \sa drmGetBusid() and drmSetBusId().
@@ -168,7 +167,7 @@ struct drm_block {
 	int unused;
 };
 
-/**
+/*
  * DRM_IOCTL_CONTROL ioctl argument type.
  *
  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -183,7 +182,7 @@ struct drm_control {
 	int irq;
 };
 
-/**
+/*
  * Type of memory to map.
  */
 enum drm_map_type {
@@ -195,7 +194,7 @@ enum drm_map_type {
 	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
 };
 
-/**
+/*
  * Memory mapping flags.
  */
 enum drm_map_flags {
@@ -214,7 +213,7 @@ struct drm_ctx_priv_map {
 	void *handle;		 /**< Handle of map */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
  * argument type.
  *
@@ -231,7 +230,7 @@ struct drm_map {
 	/*   Private data */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_CLIENT ioctl argument type.
  */
 struct drm_client {
@@ -263,7 +262,7 @@ enum drm_stat_type {
 	    /* Add to the *END* of the list */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_STATS ioctl argument type.
  */
 struct drm_stats {
@@ -274,7 +273,7 @@ struct drm_stats {
 	} data[15];
 };
 
-/**
+/*
  * Hardware locking flags.
  */
 enum drm_lock_flags {
@@ -289,7 +288,7 @@ enum drm_lock_flags {
 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
 };
 
-/**
+/*
  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
  *
  * \sa drmGetLock() and drmUnlock().
@@ -299,7 +298,7 @@ struct drm_lock {
 	enum drm_lock_flags flags;
 };
 
-/**
+/*
  * DMA flags
  *
  * \warning
@@ -328,7 +327,7 @@ enum drm_dma_flags {
 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
 };
 
-/**
+/*
  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
  *
  * \sa drmAddBufs().
@@ -351,7 +350,7 @@ struct drm_buf_desc {
 				  */
 };
 
-/**
+/*
  * DRM_IOCTL_INFO_BUFS ioctl argument type.
  */
 struct drm_buf_info {
@@ -359,7 +358,7 @@ struct drm_buf_info {
 	struct drm_buf_desc __user *list;
 };
 
-/**
+/*
  * DRM_IOCTL_FREE_BUFS ioctl argument type.
  */
 struct drm_buf_free {
@@ -367,7 +366,7 @@ struct drm_buf_free {
 	int __user *list;
 };
 
-/**
+/*
  * Buffer information
  *
  * \sa drm_buf_map.
@@ -379,7 +378,7 @@ struct drm_buf_pub {
 	void __user *address;	       /**< Address of buffer */
 };
 
-/**
+/*
  * DRM_IOCTL_MAP_BUFS ioctl argument type.
  */
 struct drm_buf_map {
@@ -392,7 +391,7 @@ struct drm_buf_map {
 	struct drm_buf_pub __user *list;	/**< Buffer information */
 };
 
-/**
+/*
  * DRM_IOCTL_DMA ioctl argument type.
  *
  * Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -417,7 +416,7 @@ enum drm_ctx_flags {
 	_DRM_CONTEXT_2DONLY = 0x02
 };
 
-/**
+/*
  * DRM_IOCTL_ADD_CTX ioctl argument type.
  *
  * \sa drmCreateContext() and drmDestroyContext().
@@ -427,7 +426,7 @@ struct drm_ctx {
 	enum drm_ctx_flags flags;
 };
 
-/**
+/*
  * DRM_IOCTL_RES_CTX ioctl argument type.
  */
 struct drm_ctx_res {
@@ -435,14 +434,14 @@ struct drm_ctx_res {
 	struct drm_ctx __user *contexts;
 };
 
-/**
+/*
  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
  */
 struct drm_draw {
 	drm_drawable_t handle;
 };
 
-/**
+/*
  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
  */
 typedef enum {
@@ -456,14 +455,14 @@ struct drm_update_draw {
 	unsigned long long data;
 };
 
-/**
+/*
  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
  */
 struct drm_auth {
 	drm_magic_t magic;
 };
 
-/**
+/*
  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
  *
  * \sa drmGetInterruptFromBusID().
@@ -505,7 +504,7 @@ struct drm_wait_vblank_reply {
 	long tval_usec;
 };
 
-/**
+/*
  * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
  *
  * \sa drmWaitVBlank().
@@ -518,7 +517,7 @@ union drm_wait_vblank {
 #define _DRM_PRE_MODESET 1
 #define _DRM_POST_MODESET 2
 
-/**
+/*
  * DRM_IOCTL_MODESET_CTL ioctl argument type
  *
  * \sa drmModesetCtl().
@@ -528,7 +527,7 @@ struct drm_modeset_ctl {
 	__u32 cmd;
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
  *
  * \sa drmAgpEnable().
@@ -537,7 +536,7 @@ struct drm_agp_mode {
 	unsigned long mode;	/**< AGP mode */
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
  *
  * \sa drmAgpAlloc() and drmAgpFree().
@@ -549,7 +548,7 @@ struct drm_agp_buffer {
 	unsigned long physical;	/**< Physical used by i810 */
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
  *
  * \sa drmAgpBind() and drmAgpUnbind().
@@ -559,7 +558,7 @@ struct drm_agp_binding {
 	unsigned long offset;	/**< In bytes -- will round to page boundary */
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_INFO ioctl argument type.
  *
  * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -580,7 +579,7 @@ struct drm_agp_info {
 	unsigned short id_device;
 };
 
-/**
+/*
  * DRM_IOCTL_SG_ALLOC ioctl argument type.
  */
 struct drm_scatter_gather {
@@ -588,7 +587,7 @@ struct drm_scatter_gather {
 	unsigned long handle;	/**< Used for mapping / unmapping */
 };
 
-/**
+/*
  * DRM_IOCTL_SET_VERSION ioctl argument type.
  */
 struct drm_set_version {
@@ -598,14 +597,14 @@ struct drm_set_version {
 	int drm_dd_minor;
 };
 
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
 struct drm_gem_close {
 	/** Handle of the object to be closed. */
 	__u32 handle;
 	__u32 pad;
 };
 
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
 struct drm_gem_flink {
 	/** Handle for the object being named */
 	__u32 handle;
@@ -614,7 +613,7 @@ struct drm_gem_flink {
 	__u32 name;
 };
 
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
 struct drm_gem_open {
 	/** Name of object being opened */
 	__u32 name;
@@ -652,7 +651,7 @@ struct drm_gem_open {
 #define DRM_CAP_SYNCOBJ		0x13
 #define DRM_CAP_SYNCOBJ_TIMELINE	0x14
 
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
 	__u64 capability;
 	__u64 value;
@@ -678,7 +677,9 @@ struct drm_get_cap {
 /**
  * DRM_CLIENT_CAP_ATOMIC
  *
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
  */
 #define DRM_CLIENT_CAP_ATOMIC	3
 
@@ -698,7 +699,7 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
 
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
 	__u64 capability;
 	__u64 value;
@@ -950,7 +951,7 @@ extern "C" {
 
 #define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 
-/**
+/*
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
  * Generic IOCTLS restart at 0xA0.
@@ -961,7 +962,7 @@ extern "C" {
 #define DRM_COMMAND_BASE                0x40
 #define DRM_COMMAND_END			0xA0
 
-/**
+/*
  * Header for events written back to userspace on the drm fd.  The
  * type defines the type of event, the length specifies the total
  * length of the event (including the header), and user_data is
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 723c8e23ca87d1e2ce27a2b006ccb748ae0561d6..f76de49c768f87275e988d946d904fab2f1b10a0 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -527,6 +527,25 @@ extern "C" {
  */
 #define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
 
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
 /*
  * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
  *
@@ -1036,9 +1055,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
  * Not all combinations are valid, and different SoCs may support different
  * combinations of layout and options.
  */
-#define __fourcc_mod_amlogic_layout_mask 0xf
+#define __fourcc_mod_amlogic_layout_mask 0xff
 #define __fourcc_mod_amlogic_options_shift 8
-#define __fourcc_mod_amlogic_options_mask 0xf
+#define __fourcc_mod_amlogic_options_mask 0xff
 
 #define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
 	fourcc_mod_code(AMLOGIC, \
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index b49fbf2bdc408d914b4839d0e3e5f7fadc776c8d..1c064627e6c333a4c853bc0ebf43a4770ca22bd6 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -414,15 +414,12 @@ enum drm_mode_subconnector {
  *
  * If the @count_modes field is set to zero, the kernel will perform a forced
  * probe on the connector to refresh the connector status, modes and EDID.
- * A forced-probe can be slow and the ioctl will block. A force-probe can cause
- * flickering and temporary freezes, so it should not be performed
- * automatically.
+ * A forced-probe can be slow, might cause flickering and the ioctl will block.
  *
- * User-space shouldn't need to force-probe connectors in general: the kernel
- * will automatically take care of probing connectors that don't support
- * hot-plug detection when appropriate. However, user-space may force-probe
- * connectors on user request (e.g. clicking a "Scan connectors" button, or
- * opening a UI to manage screens).
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
  */
 struct drm_mode_get_connector {
 	/** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index fa1f3d62f9a6cebb300c63bf82955af91ab2a6ed..1987e2ea79a3b86263529b5d3207ac89c767e6fd 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -177,8 +177,9 @@ enum drm_i915_pmu_engine_sample {
 #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
 #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
 #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
+#define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
 
-#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 28384f35477313e68f66565320cd27a0c2f10843..d4a5e41d11739ac387af554dee3aeebc24ad2c50 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -23,7 +23,7 @@
 #  define SST_DEBUG_FUNC 1
 #  define SST_DEBUG_VAR  1
 #else
-#  define dprintk(X...)
+#  define dprintk(X...)		no_printk(X)
 #  define SST_DEBUG_REG  0
 #  define SST_DEBUG_FUNC 0
 #  define SST_DEBUG_VAR  0
@@ -48,7 +48,7 @@
 #if (SST_DEBUG_FUNC > 1)
 #  define f_ddprintk(X...)	dprintk(" " X)
 #else
-#  define f_ddprintk(X...)
+#  define f_ddprintk(X...)	no_printk(X)
 #endif
 #if (SST_DEBUG_FUNC > 2)
 #  define f_dddprintk(X...)	dprintk(" " X)