diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs index 13b5b2ec3be769..1b98b6503b23f5 100644 --- a/Documentation/ABI/testing/sysfs-driver-habanalabs +++ b/Documentation/ABI/testing/sysfs-driver-habanalabs @@ -201,7 +201,19 @@ What: /sys/class/habanalabs/hl/status Date: Jan 2019 KernelVersion: 5.1 Contact: ogabbay@kernel.org -Description: Status of the card: "Operational", "Malfunction", "In reset". +Description: Status of the card: + + * "operational" - Device is available for work. + * "in reset" - Device is going through reset, will be + available shortly. + * "disabled" - Device is not usable. + * "needs reset" - Device is not usable until a hard reset + is initiated. + * "in device creation" - Device is not available yet, as it + is still initializing. + * "in reset after device release" - Device is going through + a compute-reset which is executed after a device release + (relevant for Gaudi2 only). What: /sys/class/habanalabs/hl/thermal_ver Date: Jan 2019 diff --git a/Documentation/accel/introduction.rst b/Documentation/accel/introduction.rst index 11cade51f387a6..89984dfececf0b 100644 --- a/Documentation/accel/introduction.rst +++ b/Documentation/accel/introduction.rst @@ -67,9 +67,9 @@ tree - drivers/accel/. The accelerator devices will be exposed to the user space with the dedicated 261 major number and will have the following convention: -- device char files - /dev/accel/accel* -- sysfs - /sys/class/accel/accel*/ -- debugfs - /sys/kernel/debug/accel/*/ +- device char files - /dev/accel/accel\* +- sysfs - /sys/class/accel/accel\*/ +- debugfs - /sys/kernel/debug/accel/\*/ Getting Started =============== diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt deleted file mode 100644 index 525a4bfd863407..00000000000000 --- a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt +++ /dev/null @@ -1,112 +0,0 @@ -Cadence DSI bridge -================== - -The Cadence DSI bridge is a DPI to DSI bridge supporting up to 4 DSI lanes. - -Required properties: -- compatible: should be set to "cdns,dsi". -- reg: physical base address and length of the controller's registers. -- interrupts: interrupt line connected to the DSI bridge. -- clocks: DSI bridge clocks. -- clock-names: must contain "dsi_p_clk" and "dsi_sys_clk". -- phys: phandle link to the MIPI D-PHY controller. -- phy-names: must contain "dphy". -- #address-cells: must be set to 1. -- #size-cells: must be set to 0. - -Optional properties: -- resets: DSI reset lines. -- reset-names: can contain "dsi_p_rst". - -Required subnodes: -- ports: Ports as described in Documentation/devicetree/bindings/graph.txt. - 2 ports are available: - * port 0: this port is only needed if some of your DSI devices are - controlled through an external bus like I2C or SPI. Can have at - most 4 endpoints. The endpoint number is directly encoding the - DSI virtual channel used by this device. - * port 1: represents the DPI input. - Other ports will be added later to support the new kind of inputs. - -- one subnode per DSI device connected on the DSI bus. Each DSI device should - contain a reg property encoding its virtual channel. - -Example: - dsi0: dsi@fd0c0000 { - compatible = "cdns,dsi"; - reg = <0x0 0xfd0c0000 0x0 0x1000>; - clocks = <&pclk>, <&sysclk>; - clock-names = "dsi_p_clk", "dsi_sys_clk"; - interrupts = <1>; - phys = <&dphy0>; - phy-names = "dphy"; - #address-cells = <1>; - #size-cells = <0>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@1 { - reg = <1>; - dsi0_dpi_input: endpoint { - remote-endpoint = <&xxx_dpi_output>; - }; - }; - }; - - panel: dsi-dev@0 { - compatible = ""; - reg = <0>; - }; - }; - -or - - dsi0: dsi@fd0c0000 { - compatible = "cdns,dsi"; - reg = <0x0 0xfd0c0000 0x0 0x1000>; - clocks = <&pclk>, <&sysclk>; - clock-names = "dsi_p_clk", "dsi_sys_clk"; - interrupts = <1>; - phys = <&dphy1>; - phy-names = "dphy"; - #address-cells = <1>; - #size-cells = <0>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - dsi0_output: endpoint@0 { - reg = <0>; - remote-endpoint = <&dsi_panel_input>; - }; - }; - - port@1 { - reg = <1>; - dsi0_dpi_input: endpoint { - remote-endpoint = <&xxx_dpi_output>; - }; - }; - }; - }; - - i2c@xxx { - panel: panel@59 { - compatible = ""; - reg = <0x59>; - - port { - dsi_panel_input: endpoint { - remote-endpoint = <&dsi0_output>; - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.yaml new file mode 100644 index 00000000000000..23060324d16e61 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.yaml @@ -0,0 +1,180 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/cdns,dsi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cadence DSI bridge + +maintainers: + - Boris Brezillon + +description: | + CDNS DSI is a bridge device which converts DPI to DSI + +properties: + compatible: + enum: + - cdns,dsi + - ti,j721e-dsi + + reg: + minItems: 1 + items: + - description: + Register block for controller's registers. + - description: + Register block for wrapper settings registers in case of TI J7 SoCs. + + clocks: + items: + - description: PSM clock, used by the IP + - description: sys clock, used by the IP + + clock-names: + items: + - const: dsi_p_clk + - const: dsi_sys_clk + + phys: + maxItems: 1 + + phy-names: + const: dphy + + interrupts: + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + const: dsi_p_rst + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Output port representing the DSI output. It can have + at most 4 endpoints. The endpoint number is directly encoding + the DSI virtual channel used by this device. + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Input port representing the DPI input. + + required: + - port@1 + +allOf: + - $ref: ../dsi-controller.yaml# + + - if: + properties: + compatible: + contains: + const: ti,j721e-dsi + then: + properties: + reg: + minItems: 2 + maxItems: 2 + power-domains: + maxItems: 1 + else: + properties: + reg: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - phys + - phy-names + - ports + +unevaluatedProperties: false + +examples: + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + + dsi@fd0c0000 { + compatible = "cdns,dsi"; + reg = <0x0 0xfd0c0000 0x0 0x1000>; + clocks = <&pclk>, <&sysclk>; + clock-names = "dsi_p_clk", "dsi_sys_clk"; + interrupts = <1>; + phys = <&dphy0>; + phy-names = "dphy"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <1>; + endpoint { + remote-endpoint = <&xxx_dpi_output>; + }; + }; + }; + + panel@0 { + compatible = "panasonic,vvx10f034n00"; + reg = <0>; + power-supply = <&vcc_lcd_reg>; + }; + }; + }; + + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + + dsi@fd0c0000 { + compatible = "cdns,dsi"; + reg = <0x0 0xfd0c0000 0x0 0x1000>; + clocks = <&pclk>, <&sysclk>; + clock-names = "dsi_p_clk", "dsi_sys_clk"; + interrupts = <1>; + phys = <&dphy1>; + phy-names = "dphy"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&dsi_panel_input>; + }; + }; + + port@1 { + reg = <1>; + endpoint { + remote-endpoint = <&xxx_dpi_output>; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml b/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml index b19be0804abe59..6e0e3ba9b49eef 100644 --- a/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml +++ b/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml @@ -16,7 +16,9 @@ description: | properties: compatible: - const: fsl,imx8mp-ldb + enum: + - fsl,imx8mp-ldb + - fsl,imx93-ldb clocks: maxItems: 1 @@ -57,6 +59,18 @@ required: - clocks - ports +allOf: + - if: + properties: + compatible: + contains: + const: fsl,imx93-ldb + then: + properties: + ports: + properties: + port@2: false + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml index b697c42399ea3a..c9a882ee6d98f2 100644 --- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml @@ -52,9 +52,49 @@ properties: maxItems: 1 description: extcon specifier for the Power Delivery - port: - $ref: /schemas/graph.yaml#/properties/port - description: A port node pointing to DPI host port node + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false + description: A port node pointing to DPI host port node + + properties: + endpoint: + $ref: /schemas/graph.yaml#/$defs/endpoint-base + unevaluatedProperties: false + + properties: + link-frequencies: + minItems: 1 + maxItems: 1 + description: Allowed max link frequencies in Hz + + port@1: + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false + description: Video port for DP output + + properties: + endpoint: + $ref: /schemas/graph.yaml#/$defs/endpoint-base + unevaluatedProperties: false + + properties: + data-lanes: + minItems: 1 + uniqueItems: true + items: + - enum: [ 0, 1 ] + - const: 1 + - const: 2 + - const: 3 + + required: + - port@0 + - port@1 required: - compatible @@ -63,6 +103,7 @@ required: - interrupts - reset-gpios - extcon + - ports additionalProperties: false @@ -85,9 +126,24 @@ examples: reset-gpios = <&pio 179 1>; extcon = <&usbc_extcon>; - port { - it6505_in: endpoint { - remote-endpoint = <&dpi_out>; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + it6505_in: endpoint { + remote-endpoint = <&dpi_out>; + link-frequencies = /bits/ 64 <150000000>; + }; + }; + + port@1 { + reg = <1>; + it6505_out: endpoint { + remote-endpoint = <&dp_in>; + data-lanes = <0 1>; + }; }; }; }; diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml index d3454da1247a0f..a7eb2603691fbe 100644 --- a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml @@ -17,7 +17,9 @@ description: | properties: compatible: - const: ite,it66121 + enum: + - ite,it66121 + - ite,it6610 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml index afeeb967393d9e..d33026f85e1913 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml @@ -11,13 +11,14 @@ maintainers: description: | This binding describes the MIPI DSI/CSI-2 encoder embedded in the Renesas - R-Car V3U SoC. The encoder can operate in either DSI or CSI-2 mode, with up + R-Car Gen4 SoCs. The encoder can operate in either DSI or CSI-2 mode, with up to four data lanes. properties: compatible: enum: - renesas,r8a779a0-dsi-csi2-tx # for V3U + - renesas,r8a779g0-dsi-csi2-tx # for V4H reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml index 131d5b63ec4f7b..e08c24633926b2 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi.yaml @@ -22,6 +22,7 @@ properties: items: - enum: - renesas,r9a07g044-mipi-dsi # RZ/G2{L,LC} + - renesas,r9a07g054-mipi-dsi # RZ/V2L - const: renesas,rzg2l-mipi-dsi reg: diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml index d4d585485e7b4e..92741486c24dd3 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml @@ -31,6 +31,7 @@ properties: - items: - enum: - mediatek,mt8186-disp-aal + - mediatek,mt8188-disp-aal - mediatek,mt8192-disp-aal - mediatek,mt8195-disp-aal - const: mediatek,mt8183-disp-aal diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml index 63fb02014a56a9..b04820c95b222d 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml @@ -27,12 +27,13 @@ properties: - const: mediatek,mt8192-disp-ccorr - items: - enum: + - mediatek,mt8188-disp-ccorr - mediatek,mt8195-disp-ccorr - const: mediatek,mt8192-disp-ccorr - items: - enum: - mediatek,mt8186-disp-ccorr - - const: mediatek,mt8183-disp-ccorr + - const: mediatek,mt8192-disp-ccorr reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml index d2f89ee7996f1b..62306c88f48571 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml @@ -37,6 +37,7 @@ properties: - enum: - mediatek,mt8183-disp-color - mediatek,mt8186-disp-color + - mediatek,mt8188-disp-color - mediatek,mt8192-disp-color - mediatek,mt8195-disp-color - const: mediatek,mt8173-disp-color diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml index 8ad8187c02d162..5c7445c174e526 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml @@ -27,6 +27,7 @@ properties: - items: - enum: - mediatek,mt8186-disp-dither + - mediatek,mt8188-disp-dither - mediatek,mt8192-disp-dither - mediatek,mt8195-disp-dither - const: mediatek,mt8183-disp-dither diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml index a89ea0ea754279..a5c6a91fac7104 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml @@ -28,6 +28,7 @@ properties: - items: - enum: - mediatek,mt8186-disp-gamma + - mediatek,mt8188-disp-gamma - mediatek,mt8192-disp-gamma - mediatek,mt8195-disp-gamma - const: mediatek,mt8183-disp-gamma diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml index a2a27d0ca038cc..065e526f950e13 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml @@ -36,6 +36,7 @@ properties: - const: mediatek,mt2701-disp-ovl - items: - enum: + - mediatek,mt8188-disp-ovl - mediatek,mt8195-disp-ovl - const: mediatek,mt8183-disp-ovl - items: diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml index 654080bfbdfb41..27de644954010a 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml @@ -26,6 +26,7 @@ properties: - items: - enum: - mediatek,mt8186-disp-postmask + - mediatek,mt8188-disp-postmask - const: mediatek,mt8192-disp-postmask reg: diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml index 0882ae86e6c4ca..3ade2ece3fedc2 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml @@ -31,6 +31,10 @@ properties: - const: mediatek,mt8183-disp-rdma - items: - const: mediatek,mt8195-disp-rdma + - items: + - enum: + - mediatek,mt8188-disp-rdma + - const: mediatek,mt8195-disp-rdma - items: - enum: - mediatek,mt7623-disp-rdma diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml index f2515af8256f0f..efe4257c031fe1 100644 --- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml +++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml @@ -21,6 +21,9 @@ properties: - qcom,sc7280-edp - qcom,sc8180x-dp - qcom,sc8180x-edp + - qcom,sc8280xp-dp + - qcom,sc8280xp-edp + - qcom,sdm845-dp - qcom,sm8350-dp reg: @@ -81,6 +84,7 @@ properties: data-lanes: $ref: /schemas/types.yaml#/definitions/uint32-array + deprecated: true minItems: 1 maxItems: 4 items: @@ -102,8 +106,28 @@ properties: description: Input endpoint of the controller port@1: - $ref: /schemas/graph.yaml#/properties/port + $ref: /schemas/graph.yaml#/$defs/port-base description: Output endpoint of the controller + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + unevaluatedProperties: false + properties: + data-lanes: + minItems: 1 + maxItems: 4 + items: + enum: [ 0, 1, 2, 3 ] + + link-frequencies: + minItems: 1 + maxItems: 4 + items: + enum: [ 1620000000, 2700000000, 5400000000, 8100000000 ] + + required: + - port@0 + - port@1 required: - compatible @@ -127,11 +151,10 @@ allOf: enum: - qcom,sc7280-edp - qcom,sc8180x-edp + - qcom,sc8280xp-edp then: properties: "#sound-dai-cells": false - reg: - maxItems: 4 else: properties: aux-bus: false @@ -193,6 +216,8 @@ examples: reg = <1>; endpoint { remote-endpoint = <&typec>; + data-lanes = <0 1>; + link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>; }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/dpu-common.yaml b/Documentation/devicetree/bindings/display/msm/dpu-common.yaml index 8ffbc30c6b7f20..3f953aa5e69497 100644 --- a/Documentation/devicetree/bindings/display/msm/dpu-common.yaml +++ b/Documentation/devicetree/bindings/display/msm/dpu-common.yaml @@ -13,7 +13,15 @@ maintainers: description: | Common properties for QCom DPU display controller. +# Do not select this by default, otherwise it is also selected for all +# display-controller@ nodes +select: + false + properties: + $nodename: + pattern: '^display-controller@[0-9a-f]+$' + interrupts: maxItems: 1 @@ -40,10 +48,6 @@ properties: - port@0 required: - - compatible - - reg - - reg-names - - clocks - interrupts - power-domains - operating-points-v2 diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml index 6e2fd6e9fa7f0a..e75a3efe4dace9 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml @@ -9,14 +9,33 @@ title: Qualcomm Display DSI controller maintainers: - Krishna Manikandan -allOf: - - $ref: "../dsi-controller.yaml#" - properties: compatible: - enum: - - qcom,mdss-dsi-ctrl - - qcom,dsi-ctrl-6g-qcm2290 + oneOf: + - items: + - enum: + - qcom,apq8064-dsi-ctrl + - qcom,msm8916-dsi-ctrl + - qcom,msm8953-dsi-ctrl + - qcom,msm8974-dsi-ctrl + - qcom,msm8996-dsi-ctrl + - qcom,msm8998-dsi-ctrl + - qcom,qcm2290-dsi-ctrl + - qcom,sc7180-dsi-ctrl + - qcom,sc7280-dsi-ctrl + - qcom,sdm660-dsi-ctrl + - qcom,sdm845-dsi-ctrl + - qcom,sm8150-dsi-ctrl + - qcom,sm8250-dsi-ctrl + - qcom,sm8350-dsi-ctrl + - qcom,sm8450-dsi-ctrl + - qcom,sm8550-dsi-ctrl + - const: qcom,mdss-dsi-ctrl + - items: + - enum: + - dsi-ctrl-6g-qcm2290 + - const: qcom,mdss-dsi-ctrl + deprecated: true reg: maxItems: 1 @@ -28,22 +47,23 @@ properties: maxItems: 1 clocks: - items: - - description: Display byte clock - - description: Display byte interface clock - - description: Display pixel clock - - description: Display core clock - - description: Display AHB clock - - description: Display AXI clock + description: | + Several clocks are used, depending on the variant. Typical ones are:: + - bus:: Display AHB clock. + - byte:: Display byte clock. + - byte_intf:: Display byte interface clock. + - core:: Display core clock. + - core_mss:: Core MultiMedia SubSystem clock. + - iface:: Display AXI clock. + - mdp_core:: MDP Core clock. + - mnoc:: MNOC clock + - pixel:: Display pixel clock. + minItems: 3 + maxItems: 9 clock-names: - items: - - const: byte - - const: byte_intf - - const: pixel - - const: core - - const: iface - - const: bus + minItems: 3 + maxItems: 9 phys: maxItems: 1 @@ -52,10 +72,6 @@ properties: deprecated: true const: dsi - "#address-cells": true - - "#size-cells": true - syscon-sfpb: description: A phandle to mmss_sfpb syscon node (only for DSIv2). $ref: "/schemas/types.yaml#/definitions/phandle" @@ -67,12 +83,16 @@ properties: 2 DSI links. assigned-clocks: - maxItems: 2 + minItems: 2 + maxItems: 4 description: | Parents of "byte" and "pixel" for the given platform. + For DSIv2 platforms this should contain "byte", "esc", "src" and + "pixel_src" clocks. assigned-clock-parents: - maxItems: 2 + minItems: 2 + maxItems: 4 description: | The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block. @@ -103,7 +123,7 @@ properties: properties: data-lanes: maxItems: 4 - minItems: 4 + minItems: 1 items: enum: [ 0, 1, 2, 3 ] @@ -119,7 +139,7 @@ properties: properties: data-lanes: maxItems: 4 - minItems: 4 + minItems: 1 items: enum: [ 0, 1, 2, 3 ] @@ -127,6 +147,26 @@ properties: - port@0 - port@1 + avdd-supply: + description: + Phandle to vdd regulator device node + + vcca-supply: + description: + Phandle to vdd regulator device node + + vdd-supply: + description: + VDD regulator + + vddio-supply: + description: + VDD-IO regulator + + vdda-supply: + description: + VDDA regulator + required: - compatible - reg @@ -139,7 +179,192 @@ required: - assigned-clock-parents - ports -additionalProperties: false +allOf: + - $ref: ../dsi-controller.yaml# + - if: + properties: + compatible: + contains: + enum: + - qcom,apq8064-dsi-ctrl + then: + properties: + clocks: + maxItems: 7 + clock-names: + items: + - const: iface + - const: bus + - const: core_mmss + - const: src + - const: byte + - const: pixel + - const: core + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8916-dsi-ctrl + then: + properties: + clocks: + maxItems: 6 + clock-names: + items: + - const: mdp_core + - const: iface + - const: bus + - const: byte + - const: pixel + - const: core + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8953-dsi-ctrl + then: + properties: + clocks: + maxItems: 6 + clock-names: + items: + - const: mdp_core + - const: iface + - const: bus + - const: byte + - const: pixel + - const: core + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8974-dsi-ctrl + then: + properties: + clocks: + maxItems: 7 + clock-names: + items: + - const: mdp_core + - const: iface + - const: bus + - const: byte + - const: pixel + - const: core + - const: core_mmss + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8996-dsi-ctrl + then: + properties: + clocks: + maxItems: 7 + clock-names: + items: + - const: mdp_core + - const: byte + - const: iface + - const: bus + - const: core_mmss + - const: pixel + - const: core + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8998-dsi-ctrl + then: + properties: + clocks: + maxItems: 6 + clock-names: + items: + - const: byte + - const: byte_intf + - const: pixel + - const: core + - const: iface + - const: bus + + - if: + properties: + compatible: + contains: + enum: + - qcom,sc7180-dsi-ctrl + - qcom,sc7280-dsi-ctrl + - qcom,sm8150-dsi-ctrl + - qcom,sm8250-dsi-ctrl + - qcom,sm8350-dsi-ctrl + - qcom,sm8450-dsi-ctrl + - qcom,sm8550-dsi-ctrl + then: + properties: + clocks: + maxItems: 6 + clock-names: + items: + - const: byte + - const: byte_intf + - const: pixel + - const: core + - const: iface + - const: bus + + - if: + properties: + compatible: + contains: + enum: + - qcom,sdm660-dsi-ctrl + then: + properties: + clocks: + maxItems: 9 + clock-names: + items: + - const: mdp_core + - const: byte + - const: byte_intf + - const: mnoc + - const: iface + - const: bus + - const: core_mmss + - const: pixel + - const: core + + - if: + properties: + compatible: + contains: + enum: + - qcom,sdm845-dsi-ctrl + then: + properties: + clocks: + maxItems: 6 + clock-names: + items: + - const: byte + - const: byte_intf + - const: pixel + - const: core + - const: iface + - const: bus + +unevaluatedProperties: false examples: - | @@ -149,7 +374,7 @@ examples: #include dsi@ae94000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sc7180-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae94000 0x400>; reg-names = "dsi_ctrl"; diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml index 2f1fd140c87df9..cf4a338c466102 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml @@ -16,6 +16,7 @@ properties: compatible: enum: - qcom,dsi-phy-28nm-hpm + - qcom,dsi-phy-28nm-hpm-fam-b - qcom,dsi-phy-28nm-lp - qcom,dsi-phy-28nm-8960 diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml index c851770bbdf270..8e9031bbde731f 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml @@ -18,6 +18,10 @@ properties: - qcom,dsi-phy-7nm - qcom,dsi-phy-7nm-8150 - qcom,sc7280-dsi-phy-7nm + - qcom,sm6375-dsi-phy-7nm + - qcom,sm8350-dsi-phy-5nm + - qcom,sm8450-dsi-phy-5nm + - qcom,sm8550-dsi-phy-4nm reg: items: @@ -44,7 +48,6 @@ required: - compatible - reg - reg-names - - vdds-supply unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml index 76d40f7933ddea..0f6f08890e7e08 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml @@ -4,14 +4,13 @@ $id: http://devicetree.org/schemas/display/msm/dsi-phy-common.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Description of Qualcomm Display DSI PHY common dt properties +title: Qualcomm Display DSI PHY Common Properties maintainers: - Krishna Manikandan -description: | - This defines the DSI PHY dt properties which are common for all - dsi phy versions. +description: + Common properties for Qualcomm Display DSI PHY. properties: "#clock-cells": diff --git a/Documentation/devicetree/bindings/display/msm/gpu.yaml b/Documentation/devicetree/bindings/display/msm/gpu.yaml index c5f49842dc7b56..db8afc636576b5 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gpu.yaml @@ -149,6 +149,8 @@ allOf: description: GPU 3D engine clock - const: rbbmtimer description: GPU RBBM Timer for Adreno 5xx series + - const: rbcpr + description: GPU RB Core Power Reduction clock minItems: 2 maxItems: 7 diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt deleted file mode 100644 index 65d03c58dee6f1..00000000000000 --- a/Documentation/devicetree/bindings/display/msm/mdp5.txt +++ /dev/null @@ -1,132 +0,0 @@ -Qualcomm adreno/snapdragon MDP5 display controller - -Description: - -This is the bindings documentation for the MDP5 display -controller found in SoCs like MSM8974, APQ8084, MSM8916, MSM8994 and MSM8996. - -MDP5: -Required properties: -- compatible: - * "qcom,mdp5" - MDP5 -- reg: Physical base address and length of the controller's registers. -- reg-names: The names of register regions. The following regions are required: - * "mdp_phys" -- interrupts: Interrupt line from MDP5 to MDSS interrupt controller. -- clocks: device clocks. See ../clocks/clock-bindings.txt for details. -- clock-names: the following clocks are required. -- * "bus" -- * "iface" -- * "core" -- * "vsync" -- ports: contains the list of output ports from MDP. These connect to interfaces - that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a - special case since it is a part of the MDP block itself). - - Each output port contains an endpoint that describes how it is connected to an - external interface. These are described by the standard properties documented - here: - Documentation/devicetree/bindings/graph.txt - Documentation/devicetree/bindings/media/video-interfaces.txt - - The availability of output ports can vary across SoC revisions: - - For MSM8974 and APQ8084: - Port 0 -> MDP_INTF0 (eDP) - Port 1 -> MDP_INTF1 (DSI1) - Port 2 -> MDP_INTF2 (DSI2) - Port 3 -> MDP_INTF3 (HDMI) - - For MSM8916: - Port 0 -> MDP_INTF1 (DSI1) - - For MSM8994 and MSM8996: - Port 0 -> MDP_INTF1 (DSI1) - Port 1 -> MDP_INTF2 (DSI2) - Port 2 -> MDP_INTF3 (HDMI) - -Optional properties: -- clock-names: the following clocks are optional: - * "lut" - * "tbu" - * "tbu_rt" - -Example: - -/ { - ... - - mdss: mdss@1a00000 { - compatible = "qcom,mdss"; - reg = <0x1a00000 0x1000>, - <0x1ac8000 0x3000>; - reg-names = "mdss_phys", "vbif_phys"; - - power-domains = <&gcc MDSS_GDSC>; - - clocks = <&gcc GCC_MDSS_AHB_CLK>, - <&gcc GCC_MDSS_AXI_CLK>, - <&gcc GCC_MDSS_VSYNC_CLK>; - clock-names = "iface", - "bus", - "vsync" - - interrupts = <0 72 0>; - - interrupt-controller; - #interrupt-cells = <1>; - - #address-cells = <1>; - #size-cells = <1>; - ranges; - - mdp: mdp@1a01000 { - compatible = "qcom,mdp5"; - reg = <0x1a01000 0x90000>; - reg-names = "mdp_phys"; - - interrupt-parent = <&mdss>; - interrupts = <0 0>; - - clocks = <&gcc GCC_MDSS_AHB_CLK>, - <&gcc GCC_MDSS_AXI_CLK>, - <&gcc GCC_MDSS_MDP_CLK>, - <&gcc GCC_MDSS_VSYNC_CLK>; - clock-names = "iface", - "bus", - "core", - "vsync"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - mdp5_intf1_out: endpoint { - remote-endpoint = <&dsi0_in>; - }; - }; - }; - }; - - dsi0: dsi@1a98000 { - ... - ports { - ... - port@0 { - reg = <0>; - dsi0_in: endpoint { - remote-endpoint = <&mdp5_intf1_out>; - }; - }; - ... - }; - ... - }; - - dsi_phy0: dsi-phy@1a98300 { - ... - }; - }; -}; diff --git a/Documentation/devicetree/bindings/display/msm/mdss-common.yaml b/Documentation/devicetree/bindings/display/msm/mdss-common.yaml index 27d7242657b2a9..ccd7d641752368 100644 --- a/Documentation/devicetree/bindings/display/msm/mdss-common.yaml +++ b/Documentation/devicetree/bindings/display/msm/mdss-common.yaml @@ -15,7 +15,15 @@ description: Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates sub-blocks like DPU display controller, DSI and DP interfaces etc. +# Do not select this by default, otherwise it is also selected for qcom,mdss +# devices. +select: + false + properties: + $nodename: + pattern: "^display-subsystem@[0-9a-f]+$" + reg: maxItems: 1 @@ -70,7 +78,6 @@ properties: - description: MDSS_CORE reset required: - - compatible - reg - reg-names - power-domains diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml new file mode 100644 index 00000000000000..ef461ad6ce4a60 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml @@ -0,0 +1,156 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,mdp5.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Adreno/Snapdragon Mobile Display controller (MDP5) + +description: + MDP5 display controller found in SoCs like MSM8974, APQ8084, MSM8916, MSM8994 + and MSM8996. + +maintainers: + - Dmitry Baryshkov + - Rob Clark + +properties: + compatible: + oneOf: + - const: qcom,mdp5 + deprecated: true + - items: + - enum: + - qcom,apq8084-mdp5 + - qcom,msm8916-mdp5 + - qcom,msm8917-mdp5 + - qcom,msm8953-mdp5 + - qcom,msm8974-mdp5 + - qcom,msm8976-mdp5 + - qcom,msm8994-mdp5 + - qcom,msm8996-mdp5 + - qcom,sdm630-mdp5 + - qcom,sdm660-mdp5 + - const: qcom,mdp5 + + $nodename: + pattern: '^display-controller@[0-9a-f]+$' + + reg: + maxItems: 1 + + reg-names: + items: + - const: mdp_phys + + interrupts: + maxItems: 1 + + clocks: + minItems: 4 + maxItems: 7 + + clock-names: + oneOf: + - minItems: 4 + items: + - const: iface + - const: bus + - const: core + - const: vsync + - const: lut + - const: tbu + - const: tbu_rt + #MSM8996 has additional iommu clock + - items: + - const: iface + - const: bus + - const: core + - const: iommu + - const: vsync + + interconnects: + minItems: 1 + items: + - description: Interconnect path from mdp0 (or a single mdp) port to the data bus + - description: Interconnect path from mdp1 port to the data bus + - description: Interconnect path from rotator port to the data bus + + interconnect-names: + minItems: 1 + items: + - const: mdp0-mem + - const: mdp1-mem + - const: rotator-mem + + iommus: + items: + - description: apps SMMU with the Stream-ID mask for Hard-Fail port0 + + power-domains: + maxItems: 1 + + operating-points-v2: true + opp-table: + type: object + + ports: + $ref: /schemas/graph.yaml#/properties/ports + description: > + Contains the list of output ports from DPU device. These ports + connect to interfaces that are external to the DPU hardware, + such as DSI, DP etc. MDP5 devices support up to 4 ports: + one or two DSI ports, HDMI and eDP. + + patternProperties: + "^port@[0-3]+$": + $ref: /schemas/graph.yaml#/properties/port + + # at least one port is required + required: + - port@0 + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - ports + +additionalProperties: false + +examples: + - | + #include + #include + display-controller@1a01000 { + compatible = "qcom,mdp5"; + reg = <0x1a01000 0x90000>; + reg-names = "mdp_phys"; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + clocks = <&gcc GCC_MDSS_AHB_CLK>, + <&gcc GCC_MDSS_AXI_CLK>, + <&gcc GCC_MDSS_MDP_CLK>, + <&gcc GCC_MDSS_VSYNC_CLK>; + clock-names = "iface", + "bus", + "core", + "vsync"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml index ba0460268731b3..20889e40943009 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml @@ -15,6 +15,9 @@ description: encapsulates sub-blocks like MDP5, DSI, HDMI, eDP, etc. properties: + $nodename: + pattern: "^display-subsystem@[0-9a-f]+$" + compatible: enum: - qcom,mdss @@ -44,18 +47,30 @@ properties: The MDSS power domain provided by GCC clocks: - minItems: 1 - items: - - description: Display abh clock - - description: Display axi clock - - description: Display vsync clock + oneOf: + - minItems: 3 + items: + - description: Display abh clock + - description: Display axi clock + - description: Display vsync clock + - description: Display core clock + - minItems: 1 + items: + - description: Display abh clock + - description: Display core clock clock-names: - minItems: 1 - items: - - const: iface - - const: bus - - const: vsync + oneOf: + - minItems: 3 + items: + - const: iface + - const: bus + - const: vsync + - const: core + - minItems: 1 + items: + - const: iface + - const: core "#address-cells": const: 1 @@ -84,17 +99,19 @@ required: - ranges patternProperties: - "^mdp@[1-9a-f][0-9a-f]*$": + "^display-controller@[1-9a-f][0-9a-f]*$": type: object properties: compatible: - const: qcom,mdp5 + contains: + const: qcom,mdp5 "^dsi@[1-9a-f][0-9a-f]*$": type: object properties: compatible: - const: qcom,mdss-dsi-ctrl + contains: + const: qcom,mdss-dsi-ctrl "^phy@[1-9a-f][0-9a-f]*$": type: object @@ -107,12 +124,6 @@ patternProperties: - qcom,dsi-phy-20nm - qcom,dsi-phy-28nm-hpm - qcom,dsi-phy-28nm-lp - - "^hdmi-phy@[1-9a-f][0-9a-f]*$": - type: object - properties: - compatible: - enum: - qcom,hdmi-phy-8084 - qcom,hdmi-phy-8660 - qcom,hdmi-phy-8960 @@ -137,7 +148,7 @@ examples: - | #include #include - mdss@1a00000 { + display-subsystem@1a00000 { compatible = "qcom,mdss"; reg = <0x1a00000 0x1000>, <0x1ac8000 0x3000>; @@ -161,8 +172,8 @@ examples: #size-cells = <1>; ranges; - mdp@1a01000 { - compatible = "qcom,mdp5"; + display-controller@1a01000 { + compatible = "qcom,msm8916-mdp5", "qcom,mdp5"; reg = <0x01a01000 0x89000>; reg-names = "mdp_phys"; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,msm8998-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,msm8998-dpu.yaml index b02adba36e9ece..8d3cd46260fb60 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,msm8998-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,msm8998-dpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/msm/qcom,msm8998-dpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display DPU dt properties for MSM8998 target +title: Qualcomm Display DPU on MSM8998 maintainers: - AngeloGioacchino Del Regno @@ -13,8 +13,7 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - items: - - const: qcom,msm8998-dpu + const: qcom,msm8998-dpu reg: items: @@ -46,6 +45,13 @@ properties: - const: core - const: vsync +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,msm8998-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,msm8998-mdss.yaml index cf52ff77a41aa6..3c2b6ed98a568e 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,msm8998-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,msm8998-mdss.yaml @@ -18,8 +18,7 @@ $ref: /schemas/display/msm/mdss-common.yaml# properties: compatible: - items: - - const: qcom,msm8998-mdss + const: qcom,msm8998-mdss clocks: items: @@ -47,7 +46,9 @@ patternProperties: type: object properties: compatible: - const: qcom,mdss-dsi-ctrl + items: + - const: qcom,msm8998-dsi-ctrl + - const: qcom,mdss-dsi-ctrl "^phy@[0-9a-f]+$": type: object @@ -55,6 +56,9 @@ patternProperties: compatible: const: qcom,dsi-phy-10nm-8998 +required: + - compatible + unevaluatedProperties: false examples: @@ -126,7 +130,7 @@ examples: }; dsi@c994000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,msm8998-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0c994000 0x400>; reg-names = "dsi_ctrl"; @@ -196,7 +200,7 @@ examples: }; dsi@c996000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,msm8998-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0c996000 0x400>; reg-names = "dsi_ctrl"; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-dpu.yaml index a7b382f01b5690..414f4e7ebdf1d5 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-dpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/msm/qcom,qcm2290-dpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display DPU dt properties for QCM2290 target +title: Qualcomm Display DPU on QCM2290 maintainers: - Loic Poulain @@ -13,8 +13,7 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - items: - - const: qcom,qcm2290-dpu + const: qcom,qcm2290-dpu reg: items: @@ -42,6 +41,13 @@ properties: - const: lut - const: vsync +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml index 4795e13c7b597b..2995b84b2cd4eb 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml @@ -18,8 +18,7 @@ $ref: /schemas/display/msm/mdss-common.yaml# properties: compatible: - items: - - const: qcom,qcm2290-mdss + const: qcom,qcm2290-mdss clocks: items: @@ -61,6 +60,9 @@ patternProperties: compatible: const: qcom,dsi-phy-14nm-2290 +required: + - compatible + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml index bd590a6b5b96ad..1fb8321d9ee80c 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/msm/qcom,sc7180-dpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display DPU dt properties for SC7180 target +title: Qualcomm Display DPU on SC7180 maintainers: - Krishna Manikandan @@ -13,8 +13,7 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - items: - - const: qcom,sc7180-dpu + const: qcom,sc7180-dpu reg: items: @@ -44,6 +43,13 @@ properties: - const: core - const: vsync +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc7180-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc7180-mdss.yaml index 13e396d61a5129..42ef06edddc429 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sc7180-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc7180-mdss.yaml @@ -18,8 +18,7 @@ $ref: /schemas/display/msm/mdss-common.yaml# properties: compatible: - items: - - const: qcom,sc7180-mdss + const: qcom,sc7180-mdss clocks: items: @@ -59,7 +58,9 @@ patternProperties: type: object properties: compatible: - const: qcom,mdss-dsi-ctrl + items: + - const: qcom,sc7180-dsi-ctrl + - const: qcom,mdss-dsi-ctrl "^phy@[0-9a-f]+$": type: object @@ -67,6 +68,9 @@ patternProperties: compatible: const: qcom,dsi-phy-10nm +required: + - compatible + unevaluatedProperties: false examples: @@ -142,7 +146,7 @@ examples: }; dsi@ae94000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sc7180-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae94000 0x400>; reg-names = "dsi_ctrl"; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml index 924059b387b6ba..26dc073bd19a19 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc7280-dpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/msm/qcom,sc7280-dpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display DPU dt properties for SC7280 +title: Qualcomm Display DPU on SC7280 maintainers: - Krishna Manikandan @@ -43,6 +43,13 @@ properties: - const: core - const: vsync +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc7280-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc7280-mdss.yaml index a3de1744ba119c..078e1d1a7d2fca 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sc7280-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc7280-mdss.yaml @@ -58,7 +58,9 @@ patternProperties: type: object properties: compatible: - const: qcom,mdss-dsi-ctrl + items: + - const: qcom,sc7280-dsi-ctrl + - const: qcom,mdss-dsi-ctrl "^edp@[0-9a-f]+$": type: object @@ -74,6 +76,9 @@ patternProperties: - qcom,sc7280-dsi-phy-7nm - qcom,sc7280-edp-phy +required: + - compatible + unevaluatedProperties: false examples: @@ -162,7 +167,7 @@ examples: }; dsi@ae94000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sc7280-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae94000 0x400>; reg-names = "dsi_ctrl"; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc8280xp-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc8280xp-dpu.yaml new file mode 100644 index 00000000000000..f2c8e16cf0675c --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc8280xp-dpu.yaml @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sc8280xp-dpu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SC8280XP Display Processing Unit + +maintainers: + - Bjorn Andersson + +description: + Device tree bindings for SC8280XP Display Processing Unit. + +$ref: /schemas/display/msm/dpu-common.yaml# + +properties: + compatible: + const: qcom,sc8280xp-dpu + + reg: + items: + - description: Address offset and size for mdp register set + - description: Address offset and size for vbif register set + + reg-names: + items: + - const: mdp + - const: vbif + + clocks: + items: + - description: Display hf axi clock + - description: Display sf axi clock + - description: Display ahb clock + - description: Display lut clock + - description: Display core clock + - description: Display vsync clock + + clock-names: + items: + - const: bus + - const: nrt_bus + - const: iface + - const: lut + - const: core + - const: vsync + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-controller@ae01000 { + compatible = "qcom,sc8280xp-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc0 DISP_CC_MDSS_AHB_CLK>, + <&dispcc0 DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc0 DISP_CC_MDSS_MDP_CLK>, + <&dispcc0 DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "bus", + "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc0 DISP_CC_MDSS_MDP_CLK>, + <&dispcc0 DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <460000000>, + <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SC8280XP_MMCX>; + + interrupt-parent = <&mdss0>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + endpoint { + remote-endpoint = <&mdss0_dp0_in>; + }; + }; + + port@4 { + reg = <4>; + endpoint { + remote-endpoint = <&mdss0_dp1_in>; + }; + }; + + port@5 { + reg = <5>; + endpoint { + remote-endpoint = <&mdss0_dp3_in>; + }; + }; + + port@6 { + reg = <6>; + endpoint { + remote-endpoint = <&mdss0_dp2_in>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc8280xp-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc8280xp-mdss.yaml new file mode 100644 index 00000000000000..c239544bc37f2b --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc8280xp-mdss.yaml @@ -0,0 +1,151 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sc8280xp-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SC8280XP Mobile Display Subsystem + +maintainers: + - Bjorn Andersson + +description: + Device tree bindings for MSM Mobile Display Subsystem (MDSS) that encapsulates + sub-blocks like DPU display controller, DSI and DP interfaces etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + const: qcom,sc8280xp-mdss + + clocks: + items: + - description: Display AHB clock from gcc + - description: Display AHB clock from dispcc + - description: Display core clock + + clock-names: + items: + - const: iface + - const: ahb + - const: core + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sc8280xp-dpu + + "^displayport-controller@[0-9a-f]+$": + type: object + properties: + compatible: + enum: + - qcom,sc8280xp-dp + - qcom,sc8280xp-edp + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-subsystem@ae00000 { + compatible = "qcom,sc8280xp-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + power-domains = <&dispcc0 MDSS_GDSC>; + + clocks = <&gcc GCC_DISP_AHB_CLK>, + <&dispcc0 DISP_CC_MDSS_AHB_CLK>, + <&dispcc0 DISP_CC_MDSS_MDP_CLK>; + clock-names = "iface", + "ahb", + "core"; + + resets = <&dispcc0 DISP_CC_MDSS_CORE_BCR>; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + interconnects = <&mmss_noc MASTER_MDP0 0 &mc_virt SLAVE_EBI1 0>, + <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>; + interconnect-names = "mdp0-mem", "mdp1-mem"; + + iommus = <&apps_smmu 0x1000 0x402>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,sc8280xp-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc0 DISP_CC_MDSS_AHB_CLK>, + <&dispcc0 DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc0 DISP_CC_MDSS_MDP_CLK>, + <&dispcc0 DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "bus", + "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc0 DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdss0_mdp_opp_table>; + power-domains = <&rpmhpd SC8280XP_MMCX>; + + interrupt-parent = <&mdss0>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + endpoint { + remote-endpoint = <&mdss0_dp0_in>; + }; + }; + + port@4 { + reg = <4>; + endpoint { + remote-endpoint = <&mdss0_dp1_in>; + }; + }; + + port@5 { + reg = <5>; + endpoint { + remote-endpoint = <&mdss0_dp3_in>; + }; + }; + + port@6 { + reg = <6>; + endpoint { + remote-endpoint = <&mdss0_dp2_in>; + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sdm845-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sdm845-dpu.yaml index 5719b45f28602d..0f7765d832e7d1 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sdm845-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sdm845-dpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/msm/qcom,sdm845-dpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display DPU dt properties for SDM845 target +title: Qualcomm Display DPU on SDM845 maintainers: - Krishna Manikandan @@ -13,8 +13,7 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - items: - - const: qcom,sdm845-dpu + const: qcom,sdm845-dpu reg: items: @@ -42,6 +41,13 @@ properties: - const: core - const: vsync +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sdm845-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sdm845-mdss.yaml index 31ca6f99fc2237..6ecb00920d7f69 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sdm845-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sdm845-mdss.yaml @@ -18,8 +18,7 @@ $ref: /schemas/display/msm/mdss-common.yaml# properties: compatible: - items: - - const: qcom,sdm845-mdss + const: qcom,sdm845-mdss clocks: items: @@ -47,11 +46,19 @@ patternProperties: compatible: const: qcom,sdm845-dpu + "^displayport-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sdm845-dp + "^dsi@[0-9a-f]+$": type: object properties: compatible: - const: qcom,mdss-dsi-ctrl + items: + - const: qcom,sdm845-dsi-ctrl + - const: qcom,mdss-dsi-ctrl "^phy@[0-9a-f]+$": type: object @@ -59,6 +66,9 @@ patternProperties: compatible: const: qcom,dsi-phy-10nm +required: + - compatible + unevaluatedProperties: false examples: @@ -128,7 +138,7 @@ examples: }; dsi@ae94000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sdm845-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae94000 0x400>; reg-names = "dsi_ctrl"; @@ -198,7 +208,7 @@ examples: }; dsi@ae96000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sdm845-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae96000 0x400>; reg-names = "dsi_ctrl"; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6115-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6115-dpu.yaml index 4a39a303140998..bf62c2f5325ad5 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm6115-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6115-dpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/msm/qcom,sm6115-dpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display DPU dt properties for SM6115 target +title: Qualcomm Display DPU on SM6115 maintainers: - Dmitry Baryshkov @@ -13,8 +13,7 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - items: - - const: qcom,sm6115-dpu + const: qcom,sm6115-dpu reg: items: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml index 886858ef670005..2491cb100b3388 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml @@ -18,8 +18,7 @@ $ref: /schemas/display/msm/mdss-common.yaml# properties: compatible: - items: - - const: qcom,sm6115-mdss + const: qcom,sm6115-mdss clocks: items: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8150-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-dpu.yaml new file mode 100644 index 00000000000000..2b3f3fe9bdf7ee --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-dpu.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm8150-dpu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM8150 Display DPU + +maintainers: + - Dmitry Baryshkov + +$ref: /schemas/display/msm/dpu-common.yaml# + +properties: + compatible: + const: qcom,sm8150-dpu + + reg: + items: + - description: Address offset and size for mdp register set + - description: Address offset and size for vbif register set + + reg-names: + items: + - const: mdp + - const: vbif + + clocks: + items: + - description: Display ahb clock + - description: Display hf axi clock + - description: Display core clock + - description: Display vsync clock + + clock-names: + items: + - const: iface + - const: bus + - const: core + - const: vsync + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-controller@ae01000 { + compatible = "qcom,sm8150-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "iface", "bus", "core", "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SM8150_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml new file mode 100644 index 00000000000000..5182e958e0691d --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml @@ -0,0 +1,332 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm8150-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM8150 Display MDSS + +maintainers: + - Dmitry Baryshkov + +description: + Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates + sub-blocks like DPU display controller, DSI and DP interfaces etc. Device tree + bindings of MDSS are mentioned for SM8150 target. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + items: + - const: qcom,sm8150-mdss + + clocks: + items: + - description: Display AHB clock from gcc + - description: Display hf axi clock + - description: Display sf axi clock + - description: Display core clock + + clock-names: + items: + - const: iface + - const: bus + - const: nrt_bus + - const: core + + iommus: + maxItems: 1 + + interconnects: + maxItems: 2 + + interconnect-names: + maxItems: 2 + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sm8150-dpu + + "^dsi@[0-9a-f]+$": + type: object + properties: + compatible: + items: + - const: qcom,sm8150-dsi-ctrl + - const: qcom,mdss-dsi-ctrl + + "^phy@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,dsi-phy-7nm + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + #include + + display-subsystem@ae00000 { + compatible = "qcom,sm8150-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>, + <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>; + interconnect-names = "mdp0-mem", "mdp1-mem"; + + power-domains = <&dispcc MDSS_GDSC>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>; + clock-names = "iface", "bus", "nrt_bus", "core"; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + iommus = <&apps_smmu 0x800 0x420>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,sm8150-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "iface", "bus", "core", "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SM8150_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + dpu_intf2_out: endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-171428571 { + opp-hz = /bits/ 64 <171428571>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-345000000 { + opp-hz = /bits/ 64 <345000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-460000000 { + opp-hz = /bits/ 64 <460000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + + dsi@ae94000 { + compatible = "qcom,sm8150-dsi-ctrl", "qcom,mdss-dsi-ctrl"; + reg = <0x0ae94000 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <4>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK>, + <&dispcc DISP_CC_MDSS_ESC0_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; + assigned-clock-parents = <&dsi0_phy 0>, <&dsi0_phy 1>; + + operating-points-v2 = <&dsi_opp_table>; + power-domains = <&rpmhpd SM8150_MMCX>; + + phys = <&dsi0_phy>; + phy-names = "dsi"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi0_in: endpoint { + remote-endpoint = <&dpu_intf1_out>; + }; + }; + + port@1 { + reg = <1>; + dsi0_out: endpoint { + }; + }; + }; + + dsi_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-187500000 { + opp-hz = /bits/ 64 <187500000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-358000000 { + opp-hz = /bits/ 64 <358000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + }; + }; + + dsi0_phy: phy@ae94400 { + compatible = "qcom,dsi-phy-7nm"; + reg = <0x0ae94400 0x200>, + <0x0ae94600 0x280>, + <0x0ae94900 0x260>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&rpmhcc RPMH_CXO_CLK>; + clock-names = "iface", "ref"; + vdds-supply = <&vreg_dsi_phy>; + }; + + dsi@ae96000 { + compatible = "qcom,sm8150-dsi-ctrl", "qcom,mdss-dsi-ctrl"; + reg = <0x0ae96000 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <5>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>, + <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK1_CLK>, + <&dispcc DISP_CC_MDSS_ESC1_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, + <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>; + assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>; + + operating-points-v2 = <&dsi_opp_table>; + power-domains = <&rpmhpd SM8150_MMCX>; + + phys = <&dsi1_phy>; + phy-names = "dsi"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi1_in: endpoint { + remote-endpoint = <&dpu_intf2_out>; + }; + }; + + port@1 { + reg = <1>; + dsi1_out: endpoint { + }; + }; + }; + }; + + dsi1_phy: phy@ae96400 { + compatible = "qcom,dsi-phy-7nm"; + reg = <0x0ae96400 0x200>, + <0x0ae96600 0x280>, + <0x0ae96900 0x260>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&rpmhcc RPMH_CXO_CLK>; + clock-names = "iface", "ref"; + vdds-supply = <&vreg_dsi_phy>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8250-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8250-dpu.yaml index 9ff8a265c85f68..687c8c170cd425 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8250-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8250-dpu.yaml @@ -39,6 +39,13 @@ properties: - const: core - const: vsync +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8250-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8250-mdss.yaml index 0d3be5386b3f46..368d3db0ce9679 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8250-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8250-mdss.yaml @@ -18,8 +18,7 @@ $ref: /schemas/display/msm/mdss-common.yaml# properties: compatible: - items: - - const: qcom,sm8250-mdss + const: qcom,sm8250-mdss clocks: items: @@ -55,7 +54,9 @@ patternProperties: type: object properties: compatible: - const: qcom,mdss-dsi-ctrl + items: + - const: qcom,sm8250-dsi-ctrl + - const: qcom,mdss-dsi-ctrl "^phy@[0-9a-f]+$": type: object @@ -63,6 +64,9 @@ patternProperties: compatible: const: qcom,dsi-phy-7nm +required: + - compatible + unevaluatedProperties: false examples: @@ -167,7 +171,7 @@ examples: }; dsi@ae94000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sm8250-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae94000 0x400>; reg-names = "dsi_ctrl"; @@ -257,7 +261,7 @@ examples: }; dsi@ae96000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sm8250-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x0ae96000 0x400>; reg-names = "dsi_ctrl"; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8350-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-dpu.yaml new file mode 100644 index 00000000000000..120500395c9a98 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-dpu.yaml @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm8350-dpu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM8350 Display DPU + +maintainers: + - Robert Foss + +$ref: /schemas/display/msm/dpu-common.yaml# + +properties: + compatible: + const: qcom,sm8350-dpu + + reg: + items: + - description: Address offset and size for mdp register set + - description: Address offset and size for vbif register set + + reg-names: + items: + - const: mdp + - const: vbif + + clocks: + items: + - description: Display hf axi clock + - description: Display sf axi clock + - description: Display ahb clock + - description: Display lut clock + - description: Display core clock + - description: Display vsync clock + + clock-names: + items: + - const: bus + - const: nrt_bus + - const: iface + - const: lut + - const: core + - const: vsync + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-controller@ae01000 { + compatible = "qcom,sm8350-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "bus", + "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SM8350_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-345000000 { + opp-hz = /bits/ 64 <345000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-460000000 { + opp-hz = /bits/ 64 <460000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml new file mode 100644 index 00000000000000..4d94dbff305415 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml @@ -0,0 +1,223 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm8350-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM8350 Display MDSS + +maintainers: + - Robert Foss + +description: + MSM Mobile Display Subsystem(MDSS) that encapsulates sub-blocks like + DPU display controller, DSI and DP interfaces etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + items: + - const: qcom,sm8350-mdss + + clocks: + items: + - description: Display AHB clock from gcc + - description: Display hf axi clock + - description: Display sf axi clock + - description: Display core clock + + clock-names: + items: + - const: iface + - const: bus + - const: nrt_bus + - const: core + + iommus: + maxItems: 1 + + interconnects: + maxItems: 2 + + interconnect-names: + items: + - const: mdp0-mem + - const: mdp1-mem + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sm8350-dpu + + "^dsi@[0-9a-f]+$": + type: object + properties: + compatible: + items: + - const: qcom,sm8350-dsi-ctrl + - const: qcom,mdss-dsi-ctrl + + "^phy@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,dsi-phy-5nm-8350 + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + #include + + display-subsystem@ae00000 { + compatible = "qcom,sm8350-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + interconnects = <&mmss_noc MASTER_MDP0 0 &mc_virt SLAVE_EBI1 0>, + <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>; + interconnect-names = "mdp0-mem", "mdp1-mem"; + + power-domains = <&dispcc MDSS_GDSC>; + resets = <&dispcc DISP_CC_MDSS_CORE_BCR>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>; + clock-names = "iface", "bus", "nrt_bus", "core"; + + iommus = <&apps_smmu 0x820 0x402>; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,sm8350-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "bus", + "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SM8350_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-345000000 { + opp-hz = /bits/ 64 <345000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-460000000 { + opp-hz = /bits/ 64 <460000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + + dsi0: dsi@ae94000 { + compatible = "qcom,sm8350-dsi-ctrl", "qcom,mdss-dsi-ctrl"; + reg = <0x0ae94000 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <4>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK>, + <&dispcc DISP_CC_MDSS_ESC0_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; + assigned-clock-parents = <&mdss_dsi0_phy 0>, + <&mdss_dsi0_phy 1>; + + operating-points-v2 = <&dsi_opp_table>; + power-domains = <&rpmhpd SM8350_MMCX>; + + phys = <&mdss_dsi0_phy>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi0_in: endpoint { + remote-endpoint = <&dpu_intf1_out>; + }; + }; + + port@1 { + reg = <1>; + dsi0_out: endpoint { + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8450-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8450-dpu.yaml new file mode 100644 index 00000000000000..0d17ece1c45337 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8450-dpu.yaml @@ -0,0 +1,139 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm8450-dpu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM8450 Display DPU + +maintainers: + - Dmitry Baryshkov + +$ref: /schemas/display/msm/dpu-common.yaml# + +properties: + compatible: + const: qcom,sm8450-dpu + + reg: + items: + - description: Address offset and size for mdp register set + - description: Address offset and size for vbif register set + + reg-names: + items: + - const: mdp + - const: vbif + + clocks: + items: + - description: Display hf axi + - description: Display sf axi + - description: Display ahb + - description: Display lut + - description: Display core + - description: Display vsync + + clock-names: + items: + - const: bus + - const: nrt_bus + - const: iface + - const: lut + - const: core + - const: vsync + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-controller@ae01000 { + compatible = "qcom,sm8450-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "bus", + "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SM8450_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + dpu_intf2_out: endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-172000000{ + opp-hz = /bits/ 64 <172000000>; + required-opps = <&rpmhpd_opp_low_svs_d1>; + }; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-325000000 { + opp-hz = /bits/ 64 <325000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-375000000 { + opp-hz = /bits/ 64 <375000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml new file mode 100644 index 00000000000000..4c6929e2534c26 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml @@ -0,0 +1,345 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm8450-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM8450 Display MDSS + +maintainers: + - Dmitry Baryshkov + +description: + SM8450 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like + DPU display controller, DSI and DP interfaces etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + const: qcom,sm8450-mdss + + clocks: + items: + - description: Display AHB + - description: Display hf AXI + - description: Display sf AXI + - description: Display core + + iommus: + maxItems: 1 + + interconnects: + maxItems: 2 + + interconnect-names: + maxItems: 2 + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sm8450-dpu + + "^dsi@[0-9a-f]+$": + type: object + properties: + compatible: + items: + - const: qcom,sm8450-dsi-ctrl + - const: qcom,mdss-dsi-ctrl + + "^phy@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,dsi-phy-5nm-8450 + +required: + - compatible + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + #include + + display-subsystem@ae00000 { + compatible = "qcom,sm8450-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + interconnects = <&mmss_noc MASTER_MDP_DISP 0 &mc_virt SLAVE_EBI1_DISP 0>, + <&mmss_noc MASTER_MDP_DISP 0 &mc_virt SLAVE_EBI1_DISP 0>; + interconnect-names = "mdp0-mem", "mdp1-mem"; + + resets = <&dispcc DISP_CC_MDSS_CORE_BCR>; + + power-domains = <&dispcc MDSS_GDSC>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>; + clock-names = "iface", "bus", "nrt_bus", "core"; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + iommus = <&apps_smmu 0x2800 0x402>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,sm8450-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&gcc GCC_DISP_SF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "bus", + "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd SM8450_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + dpu_intf2_out: endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-172000000{ + opp-hz = /bits/ 64 <172000000>; + required-opps = <&rpmhpd_opp_low_svs_d1>; + }; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-325000000 { + opp-hz = /bits/ 64 <325000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-375000000 { + opp-hz = /bits/ 64 <375000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + + dsi@ae94000 { + compatible = "qcom,sm8450-dsi-ctrl", "qcom,mdss-dsi-ctrl"; + reg = <0x0ae94000 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <4>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK>, + <&dispcc DISP_CC_MDSS_ESC0_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; + assigned-clock-parents = <&dsi0_phy 0>, <&dsi0_phy 1>; + + operating-points-v2 = <&dsi_opp_table>; + power-domains = <&rpmhpd SM8450_MMCX>; + + phys = <&dsi0_phy>; + phy-names = "dsi"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi0_in: endpoint { + remote-endpoint = <&dpu_intf1_out>; + }; + }; + + port@1 { + reg = <1>; + dsi0_out: endpoint { + }; + }; + }; + + dsi_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-160310000{ + opp-hz = /bits/ 64 <160310000>; + required-opps = <&rpmhpd_opp_low_svs_d1>; + }; + + opp-187500000 { + opp-hz = /bits/ 64 <187500000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-358000000 { + opp-hz = /bits/ 64 <358000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + }; + }; + + dsi0_phy: phy@ae94400 { + compatible = "qcom,dsi-phy-5nm-8450"; + reg = <0x0ae94400 0x200>, + <0x0ae94600 0x280>, + <0x0ae94900 0x260>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&rpmhcc RPMH_CXO_CLK>; + clock-names = "iface", "ref"; + vdds-supply = <&vreg_dsi_phy>; + }; + + dsi@ae96000 { + compatible = "qcom,sm8450-dsi-ctrl", "qcom,mdss-dsi-ctrl"; + reg = <0x0ae96000 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <5>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>, + <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK1_CLK>, + <&dispcc DISP_CC_MDSS_ESC1_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, + <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>; + assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>; + + operating-points-v2 = <&dsi_opp_table>; + power-domains = <&rpmhpd SM8450_MMCX>; + + phys = <&dsi1_phy>; + phy-names = "dsi"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi1_in: endpoint { + remote-endpoint = <&dpu_intf2_out>; + }; + }; + + port@1 { + reg = <1>; + dsi1_out: endpoint { + }; + }; + }; + }; + + dsi1_phy: phy@ae96400 { + compatible = "qcom,dsi-phy-5nm-8450"; + reg = <0x0ae96400 0x200>, + <0x0ae96600 0x280>, + <0x0ae96900 0x260>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&rpmhcc RPMH_CXO_CLK>; + clock-names = "iface", "ref"; + vdds-supply = <&vreg_dsi_phy>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/panel/auo,a030jtn01.yaml b/Documentation/devicetree/bindings/display/panel/auo,a030jtn01.yaml new file mode 100644 index 00000000000000..86c834eb4d98fb --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/auo,a030jtn01.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/auo,a030jtn01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: AUO A030JTN01 3.0" (320x480 pixels) 24-bit TFT LCD panel + +description: | + Delta RGB 8-bit panel found in some Retrogame handhelds + +maintainers: + - Paul Cercueil + - Christophe Branchereau + +allOf: + - $ref: panel-common.yaml# + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + const: auo,a030jtn01 + + reg: + maxItems: 1 + +required: + - compatible + - reg + - power-supply + - reset-gpios + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "auo,a030jtn01"; + reg = <0>; + + spi-max-frequency = <10000000>; + + reset-gpios = <&gpe 4 GPIO_ACTIVE_LOW>; + power-supply = <&lcd_power>; + + backlight = <&backlight>; + + port { + panel_input: endpoint { + remote-endpoint = <&panel_output>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml b/Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml new file mode 100644 index 00000000000000..d54e96b2a9e155 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/focaltech,gpt3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Focaltech GPT3 3.0" (640x480 pixels) IPS LCD panel + +maintainers: + - Christophe Branchereau + +allOf: + - $ref: panel-common.yaml# + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + const: focaltech,gpt3 + + reg: + maxItems: 1 + +required: + - compatible + - reg + - power-supply + - reset-gpios + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "focaltech,gpt3"; + reg = <0>; + + spi-max-frequency = <3125000>; + + reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>; + + backlight = <&backlight>; + power-supply = <&vcc>; + + port { + panel_input: endpoint { + remote-endpoint = <&panel_output>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml new file mode 100644 index 00000000000000..1b2a1baa26f917 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/himax,hx8394.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Himax HX8394 MIPI-DSI LCD panel controller + +maintainers: + - Ondrej Jirman + - Javier Martinez Canillas + +description: + Device tree bindings for panels based on the Himax HX8394 controller, + such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with + a MIPI-DSI video interface. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + - hannstar,hsd060bhw4 + - const: himax,hx8394 + + reg: true + + reset-gpios: true + + backlight: true + + port: true + + vcc-supply: + description: Panel power supply + + iovcc-supply: + description: I/O voltage supply + +required: + - compatible + - reg + - reset-gpios + - backlight + - port + - vcc-supply + - iovcc-supply + +additionalProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "hannstar,hsd060bhw4", "himax,hx8394"; + reg = <0>; + vcc-supply = <®_2v8_p>; + iovcc-supply = <®_1v8_p>; + reset-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; + backlight = <&backlight>; + + port { + mipi_in_panel: endpoint { + remote-endpoint = <&mipi_out_panel>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml b/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml index c2df8d28aaf5f5..9b701df5e9d28f 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml @@ -22,8 +22,9 @@ description: | The standard defines the following interface signals for type C: - Power: - Vdd: Power supply for display module + Called power-supply in this binding. - Vddi: Logic level supply for interface signals - Combined into one in this binding called: power-supply + Called io-supply in this binding. - Interface: - CSx: Chip select - SCL: Serial clock @@ -80,6 +81,11 @@ properties: Controller data/command selection (D/CX) in 4-line SPI mode. If not set, the controller is in 3-line SPI mode. + io-supply: + description: | + Logic level supply for interface signals (Vddi). + No need to set if this is the same as power-supply. + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml b/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml new file mode 100644 index 00000000000000..84562a5b710ae2 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/visionox,vtdr6130.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Visionox VTDR6130 AMOLED DSI Panel + +maintainers: + - Neil Armstrong + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: visionox,vtdr6130 + + reg: + maxItems: 1 + description: DSI virtual channel + + vddio-supply: true + vci-supply: true + vdd-supply: true + port: true + reset-gpios: true + +additionalProperties: false + +required: + - compatible + - reg + - vddio-supply + - vci-supply + - vdd-supply + - reset-gpios + - port + +examples: + - | + #include + dsi { + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "visionox,vtdr6130"; + reg = <0>; + + vddio-supply = <&vreg_l12b_1p8>; + vci-supply = <&vreg_l13b_3p0>; + vdd-supply = <&vreg_l11b_1p2>; + + reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>; + + port { + panel0_in: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml index b3e588022082d8..d4830f52c5122d 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.yaml +++ b/Documentation/devicetree/bindings/display/renesas,du.yaml @@ -40,6 +40,7 @@ properties: - renesas,du-r8a77990 # for R-Car E3 compatible DU - renesas,du-r8a77995 # for R-Car D3 compatible DU - renesas,du-r8a779a0 # for R-Car V3U compatible DU + - renesas,du-r8a779g0 # for R-Car V4H compatible DU reg: maxItems: 1 @@ -762,6 +763,7 @@ allOf: contains: enum: - renesas,du-r8a779a0 + - renesas,du-r8a779g0 then: properties: clocks: diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml index dd64f70b5014d5..3c9f29e428a4f6 100644 --- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml +++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml @@ -63,6 +63,11 @@ properties: reg: description: Location and size of the framebuffer memory + memory-region: + maxItems: 1 + description: Phandle to a node describing the memory to be used for the + framebuffer. If present, overrides the "reg" property (if one exists). + clocks: description: List of clocks used by the framebuffer. @@ -94,6 +99,7 @@ properties: * `x1r5g5b5` - 16-bit pixels, d[14:10]=r, d[9:5]=g, d[4:0]=b * `x2r10g10b10` - 32-bit pixels, d[29:20]=r, d[19:10]=g, d[9:0]=b * `x8r8g8b8` - 32-bit pixels, d[23:16]=r, d[15:8]=g, d[7:0]=b + * `x8b8g8r8` - 32-bit pixels, d[23:16]=b, d[15:8]=g, d[7:0]=r enum: - a1r5g5b5 - a2r10g10b10 @@ -105,6 +111,7 @@ properties: - x1r5g5b5 - x2r10g10b10 - x8r8g8b8 + - x8b8g8r8 display: $ref: /schemas/types.yaml#/definitions/phandle diff --git a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-other.yaml b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-other.yaml index fdb277edebebbf..0c8f03b78608fc 100644 --- a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-other.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-other.yaml @@ -43,6 +43,9 @@ properties: vddio-supply: description: phandle to VDD I/O supply regulator + '#clock-cells': + const: 0 + '#phy-cells': const: 0 @@ -53,7 +56,6 @@ allOf: contains: enum: - qcom,hdmi-phy-8660 - - qcom,hdmi-phy-8960 then: properties: clocks: @@ -63,6 +65,24 @@ allOf: - const: slave_iface vddio-supply: false + - if: + properties: + compatible: + contains: + enum: + - qcom,hdmi-phy-8960 + then: + properties: + clocks: + minItems: 1 + maxItems: 2 + clock-names: + minItems: 1 + items: + - const: slave_iface + - const: pxo + vddio-supply: false + - if: properties: compatible: @@ -96,9 +116,10 @@ examples: "hdmi_pll"; reg = <0x4a00400 0x60>, <0x4a00500 0x100>; + #clock-cells = <0>; #phy-cells = <0>; power-domains = <&mmcc 1>; - clock-names = "slave_iface"; - clocks = <&clk 21>; + clock-names = "slave_iface", "pxo"; + clocks = <&clk 21>, <&pxo_board>; core-vdda-supply = <&pm8921_hdmi_mvs>; }; diff --git a/Documentation/devicetree/bindings/reserved-memory/framebuffer.yaml b/Documentation/devicetree/bindings/reserved-memory/framebuffer.yaml new file mode 100644 index 00000000000000..05b6648b3458e6 --- /dev/null +++ b/Documentation/devicetree/bindings/reserved-memory/framebuffer.yaml @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/reserved-memory/framebuffer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: /reserved-memory framebuffer node bindings + +maintainers: + - devicetree-spec@vger.kernel.org + +allOf: + - $ref: reserved-memory.yaml + +properties: + compatible: + const: framebuffer + description: > + This indicates a region of memory meant to be used as a framebuffer for + a set of display devices. It can be used by an operating system to keep + the framebuffer from being overwritten and use it as the backing memory + for a display device (such as simple-framebuffer). + +unevaluatedProperties: false + +examples: + - | + / { + compatible = "foo"; + model = "foo"; + #address-cells = <1>; + #size-cells = <1>; + + chosen { + framebuffer { + compatible = "simple-framebuffer"; + memory-region = <&fb>; + }; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + fb: framebuffer@80000000 { + compatible = "framebuffer"; + reg = <0x80000000 0x007e9000>; + }; + }; + }; +... diff --git a/Documentation/fb/modedb.rst b/Documentation/fb/modedb.rst index e5337503314605..bb2889c6ea27ee 100644 --- a/Documentation/fb/modedb.rst +++ b/Documentation/fb/modedb.rst @@ -29,7 +29,10 @@ Things between square brackets are optional. Valid names are:: - NSTC: 480i output, with the CCIR System-M TV mode and NTSC color encoding + - NTSC-J: 480i output, with the CCIR System-M TV mode, the NTSC color + encoding, and a black level equal to the blanking level. - PAL: 576i output, with the CCIR System-B TV mode and PAL color encoding + - PAL-M: 480i output, with the CCIR System-M TV mode and PAL color encoding If 'M' is specified in the mode_option argument (after and before and , if specified) the timings will be calculated using @@ -70,6 +73,8 @@ Valid options are:: - reflect_y (boolean): Perform an axial symmetry on the Y axis - rotate (integer): Rotate the initial framebuffer by x degrees. Valid values are 0, 90, 180 and 270. + - tv_mode: Analog TV mode. One of "NTSC", "NTSC-443", "NTSC-J", "PAL", + "PAL-M", "PAL-N", or "SECAM". - panel_orientation, one of "normal", "upside_down", "left_side_up", or "right_side_up". For KMS drivers only, this sets the "panel orientation" property on the kms connector as hint for kms users. diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv index 98c6988e424e65..395a7b7bfaefb9 100644 --- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv @@ -1,8 +1,10 @@ -Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version -Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3 -Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0 -Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2 -Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1 -SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1 -Ryzen 5000 series, GREEN SARDINE, DCN 2.1, 9.3, VCN 2.2, 4.1.1 -Ryzen 6000 Zen, YELLOW CARP, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3 +Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version, MP0 version +Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3, n/a +Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0, 10.0.0 +Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2, 11.0.3 +Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1, 10.0.1 +SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0 +Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1 +Ryzen 6000 series / Ryzen 7x35 series, YELLOW CARP / Rembrandt / Rembrandt+, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3 +Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 +Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8 diff --git a/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv b/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv index 84617aa35dab84..882d2518f8ed26 100644 --- a/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv @@ -22,3 +22,5 @@ AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN AMD Radeon RX 6700 XT / 6800M / 6700M, NAVY_FLOUNDER, DCN 3.0.0, 10.3.2, VCN 3.0.0, 5.2.2 AMD Radeon RX 6600(XT) /6600M /W6600 /W6600M, DIMGREY_CAVEFISH, DCN 3.0.2, 10.3.4, VCN 3.0.16, 5.2.4 AMD Radeon RX 6500M /6300M /W6500M /W6300M, BEIGE_GOBY, DCN 3.0.3, 10.3.5, VCN 3.0.33, 5.2.5 +AMD Radeon RX 7900 XT /XTX, , DCN 3.2.0, 11.0.0, VCN 4.0.0, 6.0.0 +AMD Radeon RX 7600M (XT) /7700S /7600S, , DCN 3.2.1, 11.0.2, VCN 4.0.4, 6.0.2 diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst index 1800543d45f740..be131e963d87c3 100644 --- a/Documentation/gpu/amdgpu/driver-misc.rst +++ b/Documentation/gpu/amdgpu/driver-misc.rst @@ -37,7 +37,7 @@ Accelerated Processing Units (APU) Info .. csv-table:: :header-rows: 1 - :widths: 3, 2, 2, 1, 1, 1 + :widths: 3, 2, 2, 1, 1, 1, 1 :file: ./apu-asic-info-table.csv Discrete GPU Info diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index a4860ffd6e862d..b8ab05e42dbb5d 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -188,6 +188,13 @@ Bridge Helper Reference .. kernel-doc:: drivers/gpu/drm/drm_bridge.c :export: +MIPI-DSI bridge operation +------------------------- + +.. kernel-doc:: drivers/gpu/drm/drm_bridge.c + :doc: dsi bridge operations + + Bridge Connector Helper Reference --------------------------------- diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index b4377a545425b1..c92d425cb2dd2f 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -520,6 +520,12 @@ HDMI Specific Connector Properties .. kernel-doc:: drivers/gpu/drm/drm_connector.c :doc: HDMI connector properties +Analog TV Specific Connector Properties +--------------------------------------- + +.. kernel-doc:: drivers/gpu/drm/drm_connector.c + :doc: Analog TV Connector Properties + Standard CRTC Properties ------------------------ diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst index ce47b42924816c..65fb3036a58039 100644 --- a/Documentation/gpu/drm-uapi.rst +++ b/Documentation/gpu/drm-uapi.rst @@ -402,19 +402,19 @@ It's possible to run the IGT-tests in a VM in two ways: 1. Use IGT inside a VM 2. Use IGT from the host machine and write the results in a shared directory. -As follow, there is an example of using a VM with a shared directory with -the host machine to run igt-tests. As an example it's used virtme:: +Following is an example of using a VM with a shared directory with +the host machine to run igt-tests. This example uses virtme:: $ virtme-run --rwdir /path/for/shared_dir --kdir=path/for/kernel/directory --mods=auto -Run the igt-tests in the guest machine, as example it's ran the 'kms_flip' +Run the igt-tests in the guest machine. This example runs the 'kms_flip' tests:: $ /path/for/igt-gpu-tools/scripts/run-tests.sh -p -s -t "kms_flip.*" -v -In this example, instead of build the igt_runner, Piglit is used -(-p option); it's created html summary of the tests results and it's saved -in the folder "igt-gpu-tools/results"; it's executed only the igt-tests +In this example, instead of building the igt_runner, Piglit is used +(-p option). It creates an HTML summary of the test results and saves +them in the folder "igt-gpu-tools/results". It executes only the igt-tests matching the -t option. Display CRC Support diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index b2c6aaf1edf275..1f8a5ebe188eb6 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -508,17 +508,18 @@ Clean up the debugfs support There's a bunch of issues with it: -- The drm_info_list ->show() function doesn't even bother to cast to the drm - structure for you. This is lazy. +- Convert drivers to support the drm_debugfs_add_files() function instead of + the drm_debugfs_create_files() function. + +- Improve late-register debugfs by rolling out the same debugfs pre-register + infrastructure for connector and crtc too. That way, the drivers won't need to + split their setup code into init and register anymore. - We probably want to have some support for debugfs files on crtc/connectors and maybe other kms objects directly in core. There's even drm_print support in the funcs for these objects to dump kms state, so it's all there. And then the ->show() functions should obviously give you a pointer to the right object. -- The drm_info_list stuff is centered on drm_minor instead of drm_device. For - anything we want to print drm_device (or maybe drm_file) is the right thing. - - The drm_driver->debugfs_init hooks we have is just an artifact of the old midlayered load sequence. DRM debugfs should work more like sysfs, where you can create properties/files for an object anytime you want, and the core @@ -527,8 +528,6 @@ There's a bunch of issues with it: this (together with the drm_minor->drm_device move) would allow us to remove debugfs_init. -Previous RFC that hasn't landed yet: https://lore.kernel.org/dri-devel/20200513114130.28641-2-wambui.karugax@gmail.com/ - Contact: Daniel Vetter Level: Intermediate diff --git a/Documentation/gpu/vc4.rst b/Documentation/gpu/vc4.rst index 5df1d98b954461..5e5e92e4091959 100644 --- a/Documentation/gpu/vc4.rst +++ b/Documentation/gpu/vc4.rst @@ -54,6 +54,25 @@ VEC (Composite TV out) encoder .. kernel-doc:: drivers/gpu/drm/vc4/vc4_vec.c :doc: VC4 SDTV module +KUnit Tests +=========== + +The VC4 Driver uses KUnit to perform driver-specific unit and +integration tests. + +These tests are using a mock driver and can be ran using the +command below, on either arm or arm64 architectures, + +.. code-block:: bash + + $ ./tools/testing/kunit/kunit.py run \ + --kunitconfig=drivers/gpu/drm/vc4/tests/.kunitconfig \ + --cross_compile aarch64-linux-gnu- --arch arm64 + +Parts of the driver that are currently covered by tests are: + * The HVS to PixelValve dynamic FIFO assignment, for the BCM2835-7 + and BCM2711. + Memory Management and 3D Command Submission =========================================== diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 362bee0be8abd8..0a1882e296ae08 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -221,6 +221,7 @@ Code Seq# Include File Comments 'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver 'b' 00-FF conflict! bit3 vme host bridge +'b' 00-0F linux/dma-buf.h conflict! 'c' all linux/cm4000_cs.h conflict! 'c' 00-7F linux/comstats.h conflict! 'c' 00-7F linux/coda.h conflict! diff --git a/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst b/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst index bf283a1b5581da..24a7715420594a 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst @@ -262,7 +262,12 @@ the second byte and Y'\ :sub:`7-0` in the third byte. ================= These formats, commonly referred to as YUYV or YUY2, subsample the chroma -components horizontally by 2, storing 2 pixels in 4 bytes. +components horizontally by 2, storing 2 pixels in a container. The container +is 32-bits for 8-bit formats, and 64-bits for 10+-bit formats. + +The packed YUYV formats with more than 8 bits per component are stored as four +16-bit little-endian words. Each word's most significant bits contain one +component, and the least significant bits are zero padding. .. raw:: latex @@ -270,7 +275,7 @@ components horizontally by 2, storing 2 pixels in 4 bytes. .. tabularcolumns:: |p{3.4cm}|p{1.2cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}| -.. flat-table:: Packed YUV 4:2:2 Formats +.. flat-table:: Packed YUV 4:2:2 Formats in 32-bit container :header-rows: 1 :stub-columns: 0 @@ -337,6 +342,46 @@ components horizontally by 2, storing 2 pixels in 4 bytes. - Y'\ :sub:`3` - Cb\ :sub:`2` +.. tabularcolumns:: |p{3.4cm}|p{1.2cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}| + +.. flat-table:: Packed YUV 4:2:2 Formats in 64-bit container + :header-rows: 1 + :stub-columns: 0 + + * - Identifier + - Code + - Word 0 + - Word 1 + - Word 2 + - Word 3 + * .. _V4L2-PIX-FMT-Y210: + + - ``V4L2_PIX_FMT_Y210`` + - 'Y210' + + - Y'\ :sub:`0` (bits 15-6) + - Cb\ :sub:`0` (bits 15-6) + - Y'\ :sub:`1` (bits 15-6) + - Cr\ :sub:`0` (bits 15-6) + * .. _V4L2-PIX-FMT-Y212: + + - ``V4L2_PIX_FMT_Y212`` + - 'Y212' + + - Y'\ :sub:`0` (bits 15-4) + - Cb\ :sub:`0` (bits 15-4) + - Y'\ :sub:`1` (bits 15-4) + - Cr\ :sub:`0` (bits 15-4) + * .. _V4L2-PIX-FMT-Y216: + + - ``V4L2_PIX_FMT_Y216`` + - 'Y216' + + - Y'\ :sub:`0` (bits 15-0) + - Cb\ :sub:`0` (bits 15-0) + - Y'\ :sub:`1` (bits 15-0) + - Cr\ :sub:`0` (bits 15-0) + .. raw:: latex \normalsize diff --git a/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst b/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst index 30f51cd33f99a8..d330aeb4d3ebf7 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst @@ -763,6 +763,200 @@ nomenclature that instead use the order of components as seen in a 24- or \normalsize +10 Bits Per Component +===================== + +These formats store a 30-bit RGB triplet with an optional 2 bit alpha in four +bytes. They are named based on the order of the RGB components as seen in a +32-bit word, which is then stored in memory in little endian byte order +(unless otherwise noted by the presence of bit 31 in the 4CC value), and on the +number of bits for each component. + +.. raw:: latex + + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} + +.. tabularcolumns:: |p{2.8cm}|p{2.0cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}| + + +.. flat-table:: RGB Formats 10 Bits Per Color Component + :header-rows: 2 + :stub-columns: 0 + + * - Identifier + - Code + - :cspan:`7` Byte 0 in memory + - :cspan:`7` Byte 1 + - :cspan:`7` Byte 2 + - :cspan:`7` Byte 3 + * - + - + - 7 + - 6 + - 5 + - 4 + - 3 + - 2 + - 1 + - 0 + + - 7 + - 6 + - 5 + - 4 + - 3 + - 2 + - 1 + - 0 + + - 7 + - 6 + - 5 + - 4 + - 3 + - 2 + - 1 + - 0 + + - 7 + - 6 + - 5 + - 4 + - 3 + - 2 + - 1 + - 0 + * .. _V4L2-PIX-FMT-RGBX1010102: + + - ``V4L2_PIX_FMT_RGBX1010102`` + - 'RX30' + + - b\ :sub:`5` + - b\ :sub:`4` + - b\ :sub:`3` + - b\ :sub:`2` + - b\ :sub:`1` + - b\ :sub:`0` + - x + - x + + - g\ :sub:`3` + - g\ :sub:`2` + - g\ :sub:`1` + - g\ :sub:`0` + - b\ :sub:`9` + - b\ :sub:`8` + - b\ :sub:`7` + - b\ :sub:`6` + + - r\ :sub:`1` + - r\ :sub:`0` + - g\ :sub:`9` + - g\ :sub:`8` + - g\ :sub:`7` + - g\ :sub:`6` + - g\ :sub:`5` + - g\ :sub:`4` + + - r\ :sub:`9` + - r\ :sub:`8` + - r\ :sub:`7` + - r\ :sub:`6` + - r\ :sub:`5` + - r\ :sub:`4` + - r\ :sub:`3` + - r\ :sub:`2` + - + * .. _V4L2-PIX-FMT-RGBA1010102: + + - ``V4L2_PIX_FMT_RGBA1010102`` + - 'RA30' + + - b\ :sub:`5` + - b\ :sub:`4` + - b\ :sub:`3` + - b\ :sub:`2` + - b\ :sub:`1` + - b\ :sub:`0` + - a\ :sub:`1` + - a\ :sub:`0` + + - g\ :sub:`3` + - g\ :sub:`2` + - g\ :sub:`1` + - g\ :sub:`0` + - b\ :sub:`9` + - b\ :sub:`8` + - b\ :sub:`7` + - b\ :sub:`6` + + - r\ :sub:`1` + - r\ :sub:`0` + - g\ :sub:`9` + - g\ :sub:`8` + - g\ :sub:`7` + - g\ :sub:`6` + - g\ :sub:`5` + - g\ :sub:`4` + + - r\ :sub:`9` + - r\ :sub:`8` + - r\ :sub:`7` + - r\ :sub:`6` + - r\ :sub:`5` + - r\ :sub:`4` + - r\ :sub:`3` + - r\ :sub:`2` + - + * .. _V4L2-PIX-FMT-ARGB2101010: + + - ``V4L2_PIX_FMT_ARGB2101010`` + - 'AR30' + + - b\ :sub:`7` + - b\ :sub:`6` + - b\ :sub:`5` + - b\ :sub:`4` + - b\ :sub:`3` + - b\ :sub:`2` + - b\ :sub:`1` + - b\ :sub:`0` + + - g\ :sub:`5` + - g\ :sub:`4` + - g\ :sub:`3` + - g\ :sub:`2` + - g\ :sub:`1` + - g\ :sub:`0` + - b\ :sub:`9` + - b\ :sub:`8` + + - r\ :sub:`3` + - r\ :sub:`2` + - r\ :sub:`1` + - r\ :sub:`0` + - g\ :sub:`9` + - g\ :sub:`8` + - g\ :sub:`7` + - g\ :sub:`6` + + - a\ :sub:`1` + - a\ :sub:`0` + - r\ :sub:`9` + - r\ :sub:`8` + - r\ :sub:`7` + - r\ :sub:`6` + - r\ :sub:`5` + - r\ :sub:`4` + - + +.. raw:: latex + + \endgroup + + Deprecated RGB Formats ====================== diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst index 16ef3b41932e76..a3a35eeed70846 100644 --- a/Documentation/userspace-api/media/v4l/subdev-formats.rst +++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst @@ -949,6 +949,43 @@ The following tables list existing packed RGB formats. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + * .. _MEDIA-BUS-FMT-BGR666-1X18: + + - MEDIA_BUS_FMT_BGR666_1X18 + - 0x1023 + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - b\ :sub:`5` + - b\ :sub:`4` + - b\ :sub:`3` + - b\ :sub:`2` + - b\ :sub:`1` + - b\ :sub:`0` + - g\ :sub:`5` + - g\ :sub:`4` + - g\ :sub:`3` + - g\ :sub:`2` + - g\ :sub:`1` + - g\ :sub:`0` + - r\ :sub:`5` + - r\ :sub:`4` + - r\ :sub:`3` + - r\ :sub:`2` + - r\ :sub:`1` + - r\ :sub:`0` * .. _MEDIA-BUS-FMT-RBG888-1X24: - MEDIA_BUS_FMT_RBG888_1X24 @@ -1023,6 +1060,80 @@ The following tables list existing packed RGB formats. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + * .. _MEDIA-BUS-FMT-BGR666-1X24_CPADHI: + + - MEDIA_BUS_FMT_BGR666_1X24_CPADHI + - 0x1024 + - + - + - + - + - + - + - + - + - + - 0 + - 0 + - b\ :sub:`5` + - b\ :sub:`4` + - b\ :sub:`3` + - b\ :sub:`2` + - b\ :sub:`1` + - b\ :sub:`0` + - 0 + - 0 + - g\ :sub:`5` + - g\ :sub:`4` + - g\ :sub:`3` + - g\ :sub:`2` + - g\ :sub:`1` + - g\ :sub:`0` + - 0 + - 0 + - r\ :sub:`5` + - r\ :sub:`4` + - r\ :sub:`3` + - r\ :sub:`2` + - r\ :sub:`1` + - r\ :sub:`0` + * .. _MEDIA-BUS-FMT-RGB565-1X24_CPADHI: + + - MEDIA_BUS_FMT_RGB565_1X24_CPADHI + - 0x1022 + - + - + - + - + - + - + - + - + - + - 0 + - 0 + - 0 + - r\ :sub:`4` + - r\ :sub:`3` + - r\ :sub:`2` + - r\ :sub:`1` + - r\ :sub:`0` + - 0 + - 0 + - g\ :sub:`5` + - g\ :sub:`4` + - g\ :sub:`3` + - g\ :sub:`2` + - g\ :sub:`1` + - g\ :sub:`0` + - 0 + - 0 + - 0 + - b\ :sub:`4` + - b\ :sub:`3` + - b\ :sub:`2` + - b\ :sub:`1` + - b\ :sub:`0` * .. _MEDIA-BUS-FMT-BGR888-1X24: - MEDIA_BUS_FMT_BGR888_1X24 diff --git a/MAINTAINERS b/MAINTAINERS index 6a0723b7abc276..de0be1a92bfcd1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6417,6 +6417,14 @@ S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/tiny/gm12u320.c +DRM DRIVER FOR HIMAX HX8394 MIPI-DSI LCD panels +M: Ondrej Jirman +M: Javier Martinez Canillas +S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc +F: Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml +F: drivers/gpu/drm/panel/panel-himax-hx8394.c + DRM DRIVER FOR HX8357D PANELS M: Emma Anholt S: Maintained @@ -6438,11 +6446,6 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/ilitek,ili9486.yaml F: drivers/gpu/drm/tiny/ili9486.c -DRM DRIVER FOR INTEL I810 VIDEO CARDS -S: Orphan / Obsolete -F: drivers/gpu/drm/i810/ -F: include/uapi/drm/i810_drm.h - DRM DRIVER FOR JADARD JD9365DA-H3 MIPI-DSI LCD PANELS M: Jagan Teki S: Maintained @@ -6471,11 +6474,6 @@ S: Maintained F: Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml F: drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c -DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS -S: Orphan / Obsolete -F: drivers/gpu/drm/mga/ -F: include/uapi/drm/mga_drm.h - DRM DRIVER FOR MGA G200 GRAPHICS CHIPS M: Dave Airlie R: Thomas Zimmermann @@ -6594,11 +6592,6 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/qxl/ F: include/uapi/drm/qxl_drm.h -DRM DRIVER FOR RAGE 128 VIDEO CARDS -S: Orphan / Obsolete -F: drivers/gpu/drm/r128/ -F: include/uapi/drm/r128_drm.h - DRM DRIVER FOR RAYDIUM RM67191 PANELS M: Robert Chiras S: Maintained @@ -6626,11 +6619,6 @@ S: Maintained F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml F: drivers/gpu/drm/panel/panel-sitronix-st7703.c -DRM DRIVER FOR SAVAGE VIDEO CARDS -S: Orphan / Obsolete -F: drivers/gpu/drm/savage/ -F: include/uapi/drm/savage_drm.h - DRM DRIVER FOR FIRMWARE FRAMEBUFFERS M: Thomas Zimmermann M: Javier Martinez Canillas @@ -6646,11 +6634,6 @@ F: include/drm/drm_aperture.h F: include/linux/aperture.h F: include/video/nomodeset.h -DRM DRIVER FOR SIS VIDEO CARDS -S: Orphan / Obsolete -F: drivers/gpu/drm/sis/ -F: include/uapi/drm/sis_drm.h - DRM DRIVER FOR SITRONIX ST7586 PANELS M: David Lechner S: Maintained @@ -6678,10 +6661,6 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/ste,mcde.yaml F: drivers/gpu/drm/mcde/ -DRM DRIVER FOR TDFX VIDEO CARDS -S: Orphan / Obsolete -F: drivers/gpu/drm/tdfx/ - DRM DRIVER FOR TI DLPC3433 MIPI DSI TO DMD BRIDGE M: Jagan Teki S: Maintained @@ -6781,6 +6760,16 @@ C: irc://irc.oftc.net/dri-devel T: git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/accel.git F: Documentation/accel/ F: drivers/accel/ +F: include/drm/drm_accel.h + +DRM ACCEL DRIVERS FOR INTEL VPU +M: Jacek Lawrynowicz +M: Stanislaw Gruszka +L: dri-devel@lists.freedesktop.org +S: Supported +T: git git://anongit.freedesktop.org/drm/drm-misc +F: drivers/accel/ivpu/ +F: include/uapi/drm/ivpu_accel.h DRM DRIVERS FOR ALLWINNER A10 M: Maxime Ripard @@ -6851,7 +6840,7 @@ M: Philipp Zabel L: dri-devel@lists.freedesktop.org S: Maintained F: Documentation/devicetree/bindings/display/imx/ -F: drivers/gpu/drm/imx/ +F: drivers/gpu/drm/imx/ipuv3/ F: drivers/gpu/ipu-v3/ DRM DRIVERS FOR FREESCALE IMX BRIDGE @@ -6874,9 +6863,10 @@ F: drivers/gpu/drm/gma500/ DRM DRIVERS FOR HISILICON M: Xinliang Liu M: Tian Tao -R: John Stultz R: Xinwei Kong -R: Chen Feng +R: Sumit Semwal +R: Yongqin Liu +R: John Stultz L: dri-devel@lists.freedesktop.org S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc @@ -6917,7 +6907,7 @@ M: Thierry Reding L: dri-devel@lists.freedesktop.org L: linux-tegra@vger.kernel.org S: Supported -T: git git://anongit.freedesktop.org/tegra/linux.git +T: git https://gitlab.freedesktop.org/drm/tegra.git F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml F: Documentation/devicetree/bindings/gpu/host1x/ F: drivers/gpu/drm/tegra/ @@ -8915,13 +8905,15 @@ F: block/partitions/efi.* HABANALABS PCI DRIVER M: Oded Gabbay +L: dri-devel@lists.freedesktop.org S: Supported +C: irc://irc.oftc.net/dri-devel T: git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux.git F: Documentation/ABI/testing/debugfs-driver-habanalabs F: Documentation/ABI/testing/sysfs-driver-habanalabs -F: drivers/misc/habanalabs/ +F: drivers/accel/habanalabs/ F: include/trace/events/habanalabs.h -F: include/uapi/misc/habanalabs.h +F: include/uapi/drm/habanalabs_accel.h HACKRF MEDIA DRIVER M: Antti Palosaari diff --git a/drivers/Makefile b/drivers/Makefile index 62b2acf2a7647e..b1a714686050cb 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -188,3 +188,4 @@ obj-$(CONFIG_COUNTER) += counter/ obj-$(CONFIG_MOST) += most/ obj-$(CONFIG_PECI) += peci/ obj-$(CONFIG_HTE) += hte/ +obj-$(CONFIG_DRM_ACCEL) += accel/ diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig index c9ce849b2984af..c437206aa3f11a 100644 --- a/drivers/accel/Kconfig +++ b/drivers/accel/Kconfig @@ -6,9 +6,10 @@ # as, but not limited to, Machine-Learning and Deep-Learning acceleration # devices # +if DRM + menuconfig DRM_ACCEL bool "Compute Acceleration Framework" - depends on DRM help Framework for device drivers of compute acceleration devices, such as, but not limited to, Machine-Learning and Deep-Learning @@ -22,3 +23,8 @@ menuconfig DRM_ACCEL major number than GPUs, and will be exposed to user-space using different device files, called accel/accel* (in /dev, sysfs and debugfs). + +source "drivers/accel/habanalabs/Kconfig" +source "drivers/accel/ivpu/Kconfig" + +endif diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile new file mode 100644 index 00000000000000..07aa77aed1c8dd --- /dev/null +++ b/drivers/accel/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-y += habanalabs/ +obj-y += ivpu/ diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig similarity index 81% rename from drivers/misc/habanalabs/Kconfig rename to drivers/accel/habanalabs/Kconfig index bd01d0d940c018..be85336107f9fa 100644 --- a/drivers/misc/habanalabs/Kconfig +++ b/drivers/accel/habanalabs/Kconfig @@ -3,8 +3,10 @@ # HabanaLabs AI accelerators driver # -config HABANA_AI - tristate "HabanaAI accelerators (habanalabs)" +config DRM_ACCEL_HABANALABS + tristate "HabanaLabs AI accelerators" + depends on DRM_ACCEL + depends on X86_64 depends on PCI && HAS_IOMEM select GENERIC_ALLOCATOR select HWMON @@ -19,7 +21,7 @@ config HABANA_AI the user to submit workloads to the devices. The user-space interface is described in - include/uapi/misc/habanalabs.h + include/uapi/drm/habanalabs_accel.h If unsure, say N. diff --git a/drivers/misc/habanalabs/Makefile b/drivers/accel/habanalabs/Makefile similarity index 88% rename from drivers/misc/habanalabs/Makefile rename to drivers/accel/habanalabs/Makefile index a48a9e0969ed2d..98510cdd506643 100644 --- a/drivers/misc/habanalabs/Makefile +++ b/drivers/accel/habanalabs/Makefile @@ -3,7 +3,7 @@ # Makefile for HabanaLabs AI accelerators driver # -obj-$(CONFIG_HABANA_AI) := habanalabs.o +obj-$(CONFIG_DRM_ACCEL_HABANALABS) := habanalabs.o include $(src)/common/Makefile habanalabs-y += $(HL_COMMON_FILES) diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile similarity index 100% rename from drivers/misc/habanalabs/common/Makefile rename to drivers/accel/habanalabs/common/Makefile diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/accel/habanalabs/common/asid.c similarity index 100% rename from drivers/misc/habanalabs/common/asid.c rename to drivers/accel/habanalabs/common/asid.c diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/accel/habanalabs/common/command_buffer.c similarity index 95% rename from drivers/misc/habanalabs/common/command_buffer.c rename to drivers/accel/habanalabs/common/command_buffer.c index 2b332991ac6a75..3a0535ac28b1ba 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/accel/habanalabs/common/command_buffer.c @@ -5,7 +5,7 @@ * All Rights Reserved. */ -#include +#include #include "habanalabs.h" #include @@ -88,6 +88,7 @@ static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) { if (cb->is_pool) { + atomic_set(&cb->is_handle_destroyed, 0); spin_lock(&hdev->cb_pool_lock); list_add(&cb->pool_list, &hdev->cb_pool); spin_unlock(&hdev->cb_pool_lock); @@ -298,8 +299,25 @@ int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg, int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle) { + struct hl_cb *cb; int rc; + cb = hl_cb_get(mmg, cb_handle); + if (!cb) { + dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n", + cb_handle); + return -EINVAL; + } + + /* Make sure that CB handle isn't destroyed more than once */ + rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1); + hl_cb_put(cb); + if (rc) { + dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n", + cb_handle); + return -EINVAL; + } + rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle); if (rc < 0) return rc; /* Invalid handle */ @@ -350,7 +368,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) int rc; if (!hl_device_operational(hdev, &status)) { - dev_warn_ratelimited(hdev->dev, + dev_dbg_ratelimited(hdev->dev, "Device is %s. Can't execute CB IOCTL\n", hdev->status[status]); return -EBUSY; diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c similarity index 96% rename from drivers/misc/habanalabs/common/command_submission.c rename to drivers/accel/habanalabs/common/command_submission.c index ea0e5101c10ed6..8270db0a72a26b 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/accel/habanalabs/common/command_submission.c @@ -5,7 +5,7 @@ * All Rights Reserved. */ -#include +#include #include "habanalabs.h" #include @@ -13,7 +13,8 @@ #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \ HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \ - HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND) + HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \ + HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES) #define MAX_TS_ITER_NUM 10 @@ -397,8 +398,16 @@ static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job) * flow by calling 'hl_hw_queue_update_ci'. */ if (cs_needs_completion(cs) && - (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) + (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) { + + /* In CS based completions, the timestamp is already available, + * so no need to extract it from job + */ + if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB) + cs->completion_timestamp = job->timestamp; + cs_put(cs); + } hl_cs_job_put(job); } @@ -775,7 +784,7 @@ static void cs_do_release(struct kref *ref) } if (cs->timestamp) { - cs->fence->timestamp = ktime_get(); + cs->fence->timestamp = cs->completion_timestamp; hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence, cs->fence->timestamp, cs->fence->error); } @@ -1117,6 +1126,27 @@ void hl_release_pending_user_interrupts(struct hl_device *hdev) wake_pending_user_interrupt_threads(interrupt); } +static void force_complete_cs(struct hl_device *hdev) +{ + struct hl_cs *cs; + + spin_lock(&hdev->cs_mirror_lock); + + list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) { + cs->fence->error = -EIO; + complete_all(&cs->fence->completion); + } + + spin_unlock(&hdev->cs_mirror_lock); +} + +void hl_abort_waitings_for_completion(struct hl_device *hdev) +{ + force_complete_cs(hdev); + force_complete_multi_cs(hdev); + hl_release_pending_user_interrupts(hdev); +} + static void job_wq_completion(struct work_struct *work) { struct hl_cs_job *job = container_of(work, struct hl_cs_job, @@ -1274,6 +1304,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags) return CS_UNRESERVE_SIGNALS; else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND) return CS_TYPE_ENGINE_CORE; + else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES) + return CS_TYPE_FLUSH_PCI_HBW_WRITES; else return CS_TYPE_DEFAULT; } @@ -1286,6 +1318,13 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) enum hl_device_status status; enum hl_cs_type cs_type; bool is_sync_stream; + int i; + + for (i = 0 ; i < sizeof(args->in.pad) ; i++) + if (args->in.pad[i]) { + dev_dbg(hdev->dev, "Padding bytes must be 0\n"); + return -EINVAL; + } if (!hl_device_operational(hdev, &status)) { return -EBUSY; @@ -2422,6 +2461,21 @@ static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores, return rc; } +static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv) +{ + struct hl_device *hdev = hpriv->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + + if (!prop->hbw_flush_reg) { + dev_dbg(hdev->dev, "HBW flush is not supported\n"); + return -EOPNOTSUPP; + } + + RREG32(prop->hbw_flush_reg); + + return 0; +} + int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) { union hl_cs_args *args = data; @@ -2478,6 +2532,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores, args->in.num_engine_cores, args->in.core_command); break; + case CS_TYPE_FLUSH_PCI_HBW_WRITES: + rc = cs_ioctl_flush_pci_hbw_writes(hpriv); + break; default: rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, args->in.cs_flags, @@ -2569,7 +2626,9 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence *status = CS_WAIT_STATUS_BUSY; } - if (error == -ETIMEDOUT || error == -EIO) + if (completion_rc == -ERESTARTSYS) + rc = completion_rc; + else if (error == -ETIMEDOUT || error == -EIO) rc = error; return rc; @@ -2699,7 +2758,8 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com break; default: dev_err(hdev->dev, "Invalid fence status\n"); - return -EINVAL; + rc = -EINVAL; + break; } } @@ -2828,6 +2888,9 @@ static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data, if (completion_rc > 0) mcs_data->timestamp = mcs_compl->timestamp; + if (completion_rc == -ERESTARTSYS) + return completion_rc; + mcs_data->wait_status = completion_rc; return 0; @@ -2870,7 +2933,13 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) u32 size_to_copy; u64 *cs_seq_arr; u8 seq_arr_len; - int rc; + int rc, i; + + for (i = 0 ; i < sizeof(args->in.pad) ; i++) + if (args->in.pad[i]) { + dev_dbg(hdev->dev, "Padding bytes must be 0\n"); + return -EINVAL; + } if (!hdev->supports_wait_for_multi_cs) { dev_err(hdev->dev, "Wait for multi CS is not supported\n"); @@ -2973,15 +3042,15 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) free_seq_arr: kfree(cs_seq_arr); - if (rc) - return rc; - - if (mcs_data.wait_status == -ERESTARTSYS) { + if (rc == -ERESTARTSYS) { dev_err_ratelimited(hdev->dev, "user process got signal while waiting for Multi-CS\n"); - return -EINTR; + rc = -EINTR; } + if (rc) + return rc; + /* update output args */ memset(args, 0, sizeof(*args)); @@ -3119,19 +3188,18 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf, goto start_over; } } else { + /* Fill up the new registration node info */ + requested_offset_record->ts_reg_info.buf = buf; + requested_offset_record->ts_reg_info.cq_cb = cq_cb; + requested_offset_record->ts_reg_info.timestamp_kernel_addr = + (u64 *) ts_buff->user_buff_address + ts_offset; + requested_offset_record->cq_kernel_addr = + (u64 *) cq_cb->kernel_address + cq_offset; + requested_offset_record->cq_target_value = target_value; + spin_unlock_irqrestore(wait_list_lock, flags); } - /* Fill up the new registration node info */ - requested_offset_record->ts_reg_info.in_use = 1; - requested_offset_record->ts_reg_info.buf = buf; - requested_offset_record->ts_reg_info.cq_cb = cq_cb; - requested_offset_record->ts_reg_info.timestamp_kernel_addr = - (u64 *) ts_buff->user_buff_address + ts_offset; - requested_offset_record->cq_kernel_addr = - (u64 *) cq_cb->kernel_address + cq_offset; - requested_offset_record->cq_target_value = target_value; - *pend = requested_offset_record; dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n", @@ -3179,7 +3247,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, goto put_cq_cb; } - /* Find first available record */ + /* get ts buffer record */ rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset, cq_counters_offset, target_value, &interrupt->wait_list_lock, &pend); @@ -3227,7 +3295,19 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, * Note that we cannot have sorted list by target value, * in order to shorten the list pass loop, since * same list could have nodes for different cq counter handle. + * Note: + * Mark ts buff offset as in use here in the spinlock protection area + * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record + * before adding the node to the list. this scenario might happen when + * multiple threads are racing on same offset and one thread could + * set the ts buff in ts_buff_get_kernel_ts_record then the other thread + * takes over and get to ts_buff_get_kernel_ts_record and then we will try + * to re-use the same ts buff offset, and will try to delete a non existing + * node from the list. */ + if (register_ts_record) + pend->ts_reg_info.in_use = 1; + list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); @@ -3489,14 +3569,15 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data) { + struct hl_device *hdev = hpriv->hdev; union hl_wait_cs_args *args = data; u32 flags = args->in.flags; int rc; - /* If the device is not operational, no point in waiting for any command submission or - * user interrupt + /* If the device is not operational, or if an error has happened and user should release the + * device, there is no point in waiting for any command submission or user interrupt. */ - if (!hl_device_operational(hpriv->hdev, NULL)) + if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active) return -EBUSY; if (flags & HL_WAIT_CS_FLAGS_INTERRUPT) diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c similarity index 100% rename from drivers/misc/habanalabs/common/context.c rename to drivers/accel/habanalabs/common/context.c diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c similarity index 100% rename from drivers/misc/habanalabs/common/debugfs.c rename to drivers/accel/habanalabs/common/debugfs.c diff --git a/drivers/misc/habanalabs/common/decoder.c b/drivers/accel/habanalabs/common/decoder.c similarity index 100% rename from drivers/misc/habanalabs/common/decoder.c rename to drivers/accel/habanalabs/common/decoder.c diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c similarity index 95% rename from drivers/misc/habanalabs/common/device.c rename to drivers/accel/habanalabs/common/device.c index 87ab329e65d49d..9933e5858a3633 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/accel/habanalabs/common/device.c @@ -7,7 +7,7 @@ #define pr_fmt(fmt) "habanalabs: " fmt -#include +#include #include "habanalabs.h" #include @@ -428,8 +428,10 @@ static void hpriv_release(struct kref *ref) */ reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active; - /* Unless device is reset in any case, check idle status and reset if device is not idle */ - if (!reset_device && hdev->pdev && !hdev->pldm) + /* Check the device idle status and reset if not idle. + * Skip it if already in reset, or if device is going to be reset in any case. + */ + if (!hdev->reset_info.in_reset && !reset_device && hdev->pdev && !hdev->pldm) device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask, HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); if (!device_is_idle) { @@ -511,11 +513,6 @@ static int hl_device_release(struct inode *inode, struct file *filp) return 0; } - /* Each pending user interrupt holds the user's context, hence we - * must release them all before calling hl_ctx_mgr_fini(). - */ - hl_release_pending_user_interrupts(hpriv->hdev); - hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr); hl_mem_mgr_fini(&hpriv->mem_mgr); @@ -1428,8 +1425,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) int hl_device_reset(struct hl_device *hdev, u32 flags) { bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false, - reset_upon_device_release = false, schedule_hard_reset = false, delay_reset, - from_dev_release, from_watchdog_thread; + reset_upon_device_release = false, schedule_hard_reset = false, + delay_reset, from_dev_release, from_watchdog_thread; u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; struct hl_ctx *ctx; int i, rc; @@ -1446,12 +1443,17 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) delay_reset = !!(flags & HL_DRV_RESET_DELAY); from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR); + if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) { + dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n"); + return 0; + } + if (!hard_reset && !hdev->asic_prop.supports_compute_reset) { hard_instead_soft = true; hard_reset = true; } - if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) { + if (hdev->reset_upon_device_release && from_dev_release) { if (hard_reset) { dev_crit(hdev->dev, "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n"); @@ -1512,6 +1514,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) &hdev->device_release_watchdog_work.reset_work); if (from_dev_release) { + hdev->reset_info.in_compute_reset = 0; flags |= HL_DRV_RESET_HARD; flags &= ~HL_DRV_RESET_DEV_RELEASE; hard_reset = true; @@ -1566,7 +1569,8 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) if (rc == -EBUSY) { if (hdev->device_fini_pending) { dev_crit(hdev->dev, - "Failed to kill all open processes, stopping hard reset\n"); + "%s Failed to kill all open processes, stopping hard reset\n", + dev_name(&(hdev)->pdev->dev)); goto out_err; } @@ -1576,7 +1580,8 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) if (rc) { dev_crit(hdev->dev, - "Failed to kill all open processes, stopping hard reset\n"); + "%s Failed to kill all open processes, stopping hard reset\n", + dev_name(&(hdev)->pdev->dev)); goto out_err; } @@ -1627,14 +1632,16 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) * ensure driver puts the driver in a unusable state */ dev_crit(hdev->dev, - "Consecutive FW fatal errors received, stopping hard reset\n"); + "%s Consecutive FW fatal errors received, stopping hard reset\n", + dev_name(&(hdev)->pdev->dev)); rc = -EIO; goto out_err; } if (hdev->kernel_ctx) { dev_crit(hdev->dev, - "kernel ctx was alive during hard reset, something is terribly wrong\n"); + "%s kernel ctx was alive during hard reset, something is terribly wrong\n", + dev_name(&(hdev)->pdev->dev)); rc = -EBUSY; goto out_err; } @@ -1732,7 +1739,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) rc = hdev->asic_funcs->scrub_device_mem(hdev); if (rc) { dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc); - return rc; + goto out_err; } spin_lock(&hdev->reset_info.lock); @@ -1752,9 +1759,13 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) hdev->reset_info.needs_reset = false; if (hard_reset) - dev_info(hdev->dev, "Successfully finished resetting the device\n"); + dev_info(hdev->dev, + "Successfully finished resetting the %s device\n", + dev_name(&(hdev)->pdev->dev)); else - dev_dbg(hdev->dev, "Successfully finished resetting the device\n"); + dev_dbg(hdev->dev, + "Successfully finished resetting the %s device\n", + dev_name(&(hdev)->pdev->dev)); if (hard_reset) { hdev->reset_info.hard_reset_cnt++; @@ -1789,7 +1800,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) hdev->reset_info.in_compute_reset = 0; if (hard_reset) { - dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n"); + dev_err(hdev->dev, + "%s Failed to reset! Device is NOT usable\n", + dev_name(&(hdev)->pdev->dev)); hdev->reset_info.hard_reset_cnt++; } else if (reset_upon_device_release) { spin_unlock(&hdev->reset_info.lock); @@ -1870,6 +1883,8 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask) hl_ctx_put(ctx); + hl_abort_waitings_for_completion(hdev); + return 0; device_reset: @@ -2186,7 +2201,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) } dev_notice(hdev->dev, - "Successfully added device to habanalabs driver\n"); + "Successfully added device %s to habanalabs driver\n", + dev_name(&(hdev)->pdev->dev)); hdev->init_done = true; @@ -2235,11 +2251,11 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) device_cdev_sysfs_add(hdev); if (hdev->pdev) dev_err(&hdev->pdev->dev, - "Failed to initialize hl%d. Device is NOT usable !\n", - hdev->cdev_idx); + "Failed to initialize hl%d. Device %s is NOT usable !\n", + hdev->cdev_idx, dev_name(&(hdev)->pdev->dev)); else - pr_err("Failed to initialize hl%d. Device is NOT usable !\n", - hdev->cdev_idx); + pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n", + hdev->cdev_idx, dev_name(&(hdev)->pdev->dev)); return rc; } @@ -2295,7 +2311,8 @@ void hl_device_fini(struct hl_device *hdev) if (ktime_compare(ktime_get(), timeout) > 0) { dev_crit(hdev->dev, - "Failed to remove device because reset function did not finish\n"); + "%s Failed to remove device because reset function did not finish\n", + dev_name(&(hdev)->pdev->dev)); return; } } @@ -2363,7 +2380,7 @@ void hl_device_fini(struct hl_device *hdev) hl_mmu_fini(hdev); - vfree(hdev->captured_err_info.pgf_info.user_mappings); + vfree(hdev->captured_err_info.page_fault_info.user_mappings); hl_eq_fini(hdev, &hdev->event_queue); @@ -2402,7 +2419,12 @@ void hl_device_fini(struct hl_device *hdev) */ inline u32 hl_rreg(struct hl_device *hdev, u32 reg) { - return readl(hdev->rmmio + reg); + u32 val = readl(hdev->rmmio + reg); + + if (unlikely(trace_habanalabs_rreg32_enabled())) + trace_habanalabs_rreg32(hdev->dev, reg, val); + + return val; } /* @@ -2417,12 +2439,17 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg) */ inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) { + if (unlikely(trace_habanalabs_wreg32_enabled())) + trace_habanalabs_wreg32(hdev->dev, reg, val); + writel(val, hdev->rmmio + reg); } void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, u8 flags) { + struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info; + if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) { dev_err(hdev->dev, "Number of possible razwi initiators (%u) exceeded limit (%u)\n", @@ -2431,15 +2458,17 @@ void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_ } /* In case it's the first razwi since the device was opened, capture its parameters */ - if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info_recorded, 0, 1)) + if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1)) return; - hdev->captured_err_info.razwi.timestamp = ktime_to_ns(ktime_get()); - hdev->captured_err_info.razwi.addr = addr; - hdev->captured_err_info.razwi.num_of_possible_engines = num_of_engines; - memcpy(&hdev->captured_err_info.razwi.engine_id[0], &engine_id[0], + razwi_info->razwi.timestamp = ktime_to_ns(ktime_get()); + razwi_info->razwi.addr = addr; + razwi_info->razwi.num_of_possible_engines = num_of_engines; + memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0], num_of_engines * sizeof(u16)); - hdev->captured_err_info.razwi.flags = flags; + razwi_info->razwi.flags = flags; + + razwi_info->razwi_info_available = true; } void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, @@ -2453,7 +2482,7 @@ void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_o static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu) { - struct page_fault_info *pgf_info = &hdev->captured_err_info.pgf_info; + struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info; struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; struct hl_vm_hash_node *hnode; struct hl_userptr *userptr; @@ -2515,14 +2544,18 @@ static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu) void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu) { + struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info; + /* Capture only the first page fault */ - if (atomic_cmpxchg(&hdev->captured_err_info.pgf_info_recorded, 0, 1)) + if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1)) return; - hdev->captured_err_info.pgf_info.pgf.timestamp = ktime_to_ns(ktime_get()); - hdev->captured_err_info.pgf_info.pgf.addr = addr; - hdev->captured_err_info.pgf_info.pgf.engine_id = eng_id; + pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get()); + pgf_info->page_fault.addr = addr; + pgf_info->page_fault.engine_id = eng_id; hl_capture_user_mappings(hdev, is_pmmu); + + pgf_info->page_fault_info_available = true; } void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu, diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c similarity index 93% rename from drivers/misc/habanalabs/common/firmware_if.c rename to drivers/accel/habanalabs/common/firmware_if.c index 228b92278e480b..da892d8fb3d6db 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/accel/habanalabs/common/firmware_if.c @@ -14,8 +14,32 @@ #include #include +#include + #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ +static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = { + [COMMS_NOOP] = __stringify(COMMS_NOOP), + [COMMS_CLR_STS] = __stringify(COMMS_CLR_STS), + [COMMS_RST_STATE] = __stringify(COMMS_RST_STATE), + [COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC), + [COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY), + [COMMS_EXEC] = __stringify(COMMS_EXEC), + [COMMS_RST_DEV] = __stringify(COMMS_RST_DEV), + [COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE), + [COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC), + [COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI), +}; + +static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = { + [COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP), + [COMMS_STS_ACK] = __stringify(COMMS_STS_ACK), + [COMMS_STS_OK] = __stringify(COMMS_STS_OK), + [COMMS_STS_ERR] = __stringify(COMMS_STS_ERR), + [COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR), + [COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR), +}; + static char *extract_fw_ver_from_str(const char *fw_str) { char *str, *fw_ver, *whitespace; @@ -311,7 +335,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n", tmp); else - dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp); + dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp); hdev->device_cpu_disabled = true; goto out; } @@ -1322,13 +1346,12 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) break; default: dev_err(hdev->dev, - "Device boot progress - Invalid status code %d\n", - status); + "Device boot progress - Invalid or unexpected status code %d\n", status); break; } } -static int hl_fw_wait_preboot_ready(struct hl_device *hdev) +int hl_fw_wait_preboot_ready(struct hl_device *hdev) { struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load; u32 status; @@ -1353,8 +1376,8 @@ static int hl_fw_wait_preboot_ready(struct hl_device *hdev) pre_fw_load->wait_for_preboot_timeout); if (rc) { - dev_err(hdev->dev, "CPU boot ready status timeout\n"); detect_cpu_boot_status(hdev, status); + dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status); /* If we read all FF, then something is totally wrong, no point * of reading specific errors @@ -1634,6 +1657,7 @@ static void hl_fw_dynamic_send_cmd(struct hl_device *hdev, val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd); val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size); + trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]); WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val); } @@ -1691,6 +1715,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev, dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs; + trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]); + /* Wait for expected status */ rc = hl_poll_timeout( hdev, @@ -1706,6 +1732,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev, return -EIO; } + trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]); + /* * skip storing FW response for NOOP to preserve the actual desired * FW status @@ -1778,6 +1806,8 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev, { int rc; + trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]); + /* first send clear command to clean former commands */ rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader); if (rc) @@ -1884,7 +1914,7 @@ static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev, * * @hdev: pointer to the habanalabs device structure * @fw_loader: managing structure for loading device's FW - * @fw_desc: the descriptor form FW + * @fw_desc: the descriptor from FW * * @return 0 on success, otherwise non-zero error code */ @@ -1901,11 +1931,11 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev, int rc; if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC) - dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n", + dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n", fw_desc->header.magic); if (fw_desc->header.version != HL_COMMS_DESC_VER) - dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n", + dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n", fw_desc->header.version); /* @@ -1976,6 +2006,43 @@ static int hl_fw_dynamic_validate_response(struct hl_device *hdev, return rc; } +/* + * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw + * + * @hdev: pointer to the habanalabs device structure + * @fw_desc: the descriptor from FW + */ +static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev, + struct lkd_fw_comms_desc *fw_desc) +{ + int i; + char *msg; + + for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) { + if (!fw_desc->ascii_msg[i].valid) + return; + + /* force NULL termination */ + msg = fw_desc->ascii_msg[i].msg; + msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0'; + + switch (fw_desc->ascii_msg[i].msg_lvl) { + case LKD_FW_ASCII_MSG_ERR: + dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg); + break; + case LKD_FW_ASCII_MSG_WRN: + dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg); + break; + case LKD_FW_ASCII_MSG_INF: + dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg); + break; + default: + dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg); + break; + } + } +} + /** * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor * @@ -1988,9 +2055,10 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev, struct fw_load_mgr *fw_loader) { struct lkd_fw_comms_desc *fw_desc; - void __iomem *src, *temp_fw_desc; struct pci_mem_region *region; struct fw_response *response; + void *temp_fw_desc; + void __iomem *src; u16 fw_data_size; enum pci_region region_id; int rc; @@ -2039,6 +2107,10 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev, rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader, (struct lkd_fw_comms_desc *) temp_fw_desc); + + if (!rc) + hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc); + vfree(temp_fw_desc); return rc; @@ -2354,7 +2426,7 @@ static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev, hdev->fw_poll_interval_usec, dyn_loader->wait_for_bl_timeout); if (rc) { - dev_err(hdev->dev, "failed to wait for boot\n"); + dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status); return rc; } @@ -2381,7 +2453,7 @@ static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev, hdev->fw_poll_interval_usec, fw_loader->cpu_timeout); if (rc) { - dev_err(hdev->dev, "failed to wait for Linux\n"); + dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status); return rc; } @@ -2459,51 +2531,54 @@ static void hl_fw_linux_update_state(struct hl_device *hdev, static int hl_fw_dynamic_send_msg(struct hl_device *hdev, struct fw_load_mgr *fw_loader, u8 msg_type, void *data) { - struct lkd_msg_comms msg; + struct lkd_msg_comms *msg; int rc; - memset(&msg, 0, sizeof(msg)); + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; /* create message to be sent */ - msg.header.type = msg_type; - msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header)); - msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC); + msg->header.type = msg_type; + msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header)); + msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC); switch (msg_type) { case HL_COMMS_RESET_CAUSE_TYPE: - msg.reset_cause = *(__u8 *) data; + msg->reset_cause = *(__u8 *) data; break; default: dev_err(hdev->dev, "Send COMMS message - invalid message type %u\n", msg_type); - return -EINVAL; + rc = -EINVAL; + goto out; } rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, sizeof(struct lkd_msg_comms)); if (rc) - return rc; + goto out; /* copy message to space allocated by FW */ - rc = hl_fw_dynamic_copy_msg(hdev, &msg, fw_loader); + rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader); if (rc) - return rc; + goto out; rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY, 0, true, fw_loader->cpu_timeout); if (rc) - return rc; + goto out; rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC, 0, true, fw_loader->cpu_timeout); - if (rc) - return rc; - return 0; +out: + kfree(msg); + return rc; } /** @@ -2560,13 +2635,43 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, } if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) { + struct lkd_fw_binning_info *binning_info; + rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0); if (rc) goto protocol_err; /* read preboot version */ - return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT, + rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT, fw_loader->dynamic_loader.comm_desc.cur_fw_ver); + + if (rc) + return rc; + + /* read binning info from preboot */ + if (hdev->support_preboot_binning) { + binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info; + hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l); + hdev->dram_binning = le32_to_cpu(binning_info->dram_mask); + hdev->edma_binning = le32_to_cpu(binning_info->edma_mask); + hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask); + hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask); + + rc = hdev->asic_funcs->set_dram_properties(hdev); + if (rc) + return rc; + + rc = hdev->asic_funcs->set_binning_masks(hdev); + if (rc) + return rc; + + dev_dbg(hdev->dev, + "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n", + hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning, + hdev->decoder_binning, hdev->rotator_binning); + } + + return 0; } /* load boot fit to FW */ @@ -2687,7 +2792,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, if (rc) { dev_dbg(hdev->dev, - "No boot fit request received, resuming boot\n"); + "No boot fit request received (status = %d), resuming boot\n", status); } else { rc = hdev->asic_funcs->load_boot_fit_to_device(hdev); if (rc) @@ -2710,7 +2815,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, if (rc) { dev_err(hdev->dev, - "Timeout waiting for boot fit load ack\n"); + "Timeout waiting for boot fit load ack (status = %d)\n", status); goto out; } @@ -2788,7 +2893,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, if (rc) { dev_err(hdev->dev, - "Failed to get ACK on skipping BMC, %d\n", + "Failed to get ACK on skipping BMC (status = %d)\n", status); WREG32(msg_to_cpu_reg, KMD_MSG_NA); rc = -EIO; @@ -2815,7 +2920,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, "Device reports FIT image is corrupted\n"); else dev_err(hdev->dev, - "Failed to load firmware to device, %d\n", + "Failed to load firmware to device (status = %d)\n", status); rc = -EIO; @@ -3043,3 +3148,27 @@ int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_in sizeof(struct cpucp_sec_attest_info), nonce, HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC); } + +int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode, + dma_addr_t buff, u32 *size) +{ + struct cpucp_packet pkt = {0}; + u64 result; + int rc = 0; + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(buff); + pkt.data_max_size = cpu_to_le32(*size); + pkt.pkt_subidx = cpu_to_le32(sub_opcode); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) + dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n"); + else + dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result); + + *size = (u32)result; + + return rc; +} diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h similarity index 97% rename from drivers/misc/habanalabs/common/habanalabs.h rename to drivers/accel/habanalabs/common/habanalabs.h index e2527d976ee052..fa05e76d3d21aa 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/accel/habanalabs/common/habanalabs.h @@ -11,7 +11,7 @@ #include "../include/common/cpucp_if.h" #include "../include/common/qman_if.h" #include "../include/hw_ip/mmu/mmu_general.h" -#include +#include #include #include @@ -29,6 +29,8 @@ #include #include +#include "security.h" + #define HL_NAME "habanalabs" struct hl_device; @@ -375,7 +377,8 @@ enum hl_cs_type { CS_TYPE_COLLECTIVE_WAIT, CS_RESERVE_SIGNALS, CS_UNRESERVE_SIGNALS, - CS_TYPE_ENGINE_CORE + CS_TYPE_ENGINE_CORE, + CS_TYPE_FLUSH_PCI_HBW_WRITES, }; /* @@ -545,6 +548,8 @@ struct hl_hints_range { /** * struct asic_fixed_properties - ASIC specific immutable properties. * @hw_queues_props: H/W queues properties. + * @special_blocks: points to an array containing special blocks info. + * @skip_special_blocks_cfg: special blocks skip configs. * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g. * available sensors. * @uboot_ver: F/W U-boot version. @@ -644,6 +649,10 @@ struct hl_hints_range { * (i.e. the DRAM supports multiple page sizes), otherwise * it will shall be equal to dram_page_size. * @num_engine_cores: number of engine cpu cores + * @num_of_special_blocks: special_blocks array size. + * @glbl_err_cause_num: global err cause number. + * @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is + * not supported. * @collective_first_sob: first sync object available for collective use * @collective_first_mon: first monitor available for collective use * @sync_stream_first_sob: first sync object available for sync stream use @@ -692,6 +701,8 @@ struct hl_hints_range { */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; + struct hl_special_block_info *special_blocks; + struct hl_skip_blocks_cfg skip_special_blocks_cfg; struct cpucp_info cpucp_info; char uboot_ver[VERSION_MAX_LEN]; char preboot_ver[VERSION_MAX_LEN]; @@ -764,6 +775,9 @@ struct asic_fixed_properties { u32 xbar_edge_enabled_mask; u32 device_mem_alloc_default_page_size; u32 num_engine_cores; + u32 num_of_special_blocks; + u32 glbl_err_cause_num; + u32 hbw_flush_reg; u16 collective_first_sob; u16 collective_first_mon; u16 sync_stream_first_sob; @@ -935,6 +949,7 @@ struct hl_mmap_mem_buf { * @size: holds the CB's size. * @roundup_size: holds the cb size after roundup to page size. * @cs_cnt: holds number of CS that this CB participates in. + * @is_handle_destroyed: atomic boolean indicating whether or not the CB handle was destroyed. * @is_pool: true if CB was acquired from the pool, false otherwise. * @is_internal: internally allocated * @is_mmu_mapped: true if the CB is mapped to the device's MMU. @@ -951,6 +966,7 @@ struct hl_cb { u32 size; u32 roundup_size; atomic_t cs_cnt; + atomic_t is_handle_destroyed; u8 is_pool; u8 is_internal; u8 is_mmu_mapped; @@ -1077,20 +1093,25 @@ struct hl_cq { atomic_t free_slots_cnt; }; +enum hl_user_interrupt_type { + HL_USR_INTERRUPT_CQ = 0, + HL_USR_INTERRUPT_DECODER, +}; + /** * struct hl_user_interrupt - holds user interrupt information * @hdev: pointer to the device structure + * @type: user interrupt type * @wait_list_head: head to the list of user threads pending on this interrupt * @wait_list_lock: protects wait_list_head * @interrupt_id: msix interrupt id - * @is_decoder: whether this entry represents a decoder interrupt */ struct hl_user_interrupt { - struct hl_device *hdev; - struct list_head wait_list_head; - spinlock_t wait_list_lock; - u32 interrupt_id; - bool is_decoder; + struct hl_device *hdev; + enum hl_user_interrupt_type type; + struct list_head wait_list_head; + spinlock_t wait_list_lock; + u32 interrupt_id; }; /** @@ -1540,8 +1561,10 @@ struct engines_data { * @check_if_razwi_happened: check if there was a razwi due to RR violation. * @access_dev_mem: access device memory * @set_dram_bar_base: set the base of the DRAM BAR - * @set_engine_cores: set a config command to enigne cores + * @set_engine_cores: set a config command to engine cores * @send_device_activity: indication to FW about device availability + * @set_dram_properties: set DRAM related properties. + * @set_binning_masks: set binning/enable masks for all relevant components. */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -1679,6 +1702,8 @@ struct hl_asic_funcs { int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids, u32 num_cores, u32 core_command); int (*send_device_activity)(struct hl_device *hdev, bool open); + int (*set_dram_properties)(struct hl_device *hdev); + int (*set_binning_masks)(struct hl_device *hdev); }; @@ -1739,8 +1764,9 @@ struct hl_cs_counters_atomic { * struct hl_dmabuf_priv - a dma-buf private object. * @dmabuf: pointer to dma-buf object. * @ctx: pointer to the dma-buf owner's context. - * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for - * memory allocation handle. + * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported + * where virtual memory is supported. + * @memhash_hnode: pointer to the memhash node. this object holds the export count. * @device_address: physical address of the device's memory. Relevant only * if phys_pg_pack is NULL (dma-buf was exported from address). * The total size can be taken from the dmabuf object. @@ -1749,6 +1775,7 @@ struct hl_dmabuf_priv { struct dma_buf *dmabuf; struct hl_ctx *ctx; struct hl_vm_phys_pg_pack *phys_pg_pack; + struct hl_vm_hash_node *memhash_hnode; uint64_t device_address; }; @@ -1923,6 +1950,7 @@ struct hl_userptr { * @type: CS_TYPE_*. * @jobs_cnt: counter of submitted jobs on all queues. * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs. + * @completion_timestamp: timestamp of the last completed cs job. * @sob_addr_offset: sob offset from the configuration base address. * @initial_sob_count: count of completed signals in SOB before current submission of signal or * cs with encaps signals. @@ -1955,6 +1983,7 @@ struct hl_cs { struct list_head staged_cs_node; struct list_head debugfs_list; struct hl_cs_encaps_sig_handle *encaps_sig_hdl; + ktime_t completion_timestamp; u64 sequence; u64 staged_sequence; u64 timeout_jiffies; @@ -1990,6 +2019,7 @@ struct hl_cs { * @debugfs_list: node in debugfs list of command submission jobs. * @refcount: reference counter for usage of the CS job. * @queue_type: the type of the H/W queue this job is submitted to. + * @timestamp: timestamp upon job completion * @id: the id of this job inside a CS. * @hw_queue_id: the id of the H/W queue this job is submitted to. * @user_cb_size: the actual size of the CB we got from the user. @@ -2016,6 +2046,7 @@ struct hl_cs_job { struct list_head debugfs_list; struct kref refcount; enum hl_queue_type queue_type; + ktime_t timestamp; u32 id; u32 hw_queue_id; u32 user_cb_size; @@ -2076,12 +2107,16 @@ struct hl_cs_parser { * hl_userptr). * @node: node to hang on the hash table in context object. * @vaddr: key virtual address. + * @handle: memory handle for device memory allocation. * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr). + * @export_cnt: number of exports from within the VA block. */ struct hl_vm_hash_node { struct hlist_node node; u64 vaddr; + u64 handle; void *ptr; + int export_cnt; }; /** @@ -2109,10 +2144,10 @@ struct hl_vm_hw_block_list_node { * @pages: the physical page array. * @npages: num physical pages in the pack. * @total_size: total size of all the pages in this list. + * @exported_size: buffer exported size. * @node: used to attach to deletion list that is used when all the allocations are cleared * at the teardown of the context. * @mapping_cnt: number of shared mappings. - * @exporting_cnt: number of dma-buf exporting. * @asid: the context related to this list. * @page_size: size of each page in the pack. * @flags: HL_MEM_* flags related to this list. @@ -2126,9 +2161,9 @@ struct hl_vm_phys_pg_pack { u64 *pages; u64 npages; u64 total_size; + u64 exported_size; struct list_head node; atomic_t mapping_cnt; - u32 exporting_cnt; u32 asid; u32 page_size; u32 flags; @@ -2675,11 +2710,11 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); p->size = sz; \ }) -#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, decoder) \ +#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, intr_type) \ ({ \ usr_intr.hdev = hdev; \ usr_intr.interrupt_id = intr_id; \ - usr_intr.is_decoder = decoder; \ + usr_intr.type = intr_type; \ INIT_LIST_HEAD(&usr_intr.wait_list_head); \ spin_lock_init(&usr_intr.wait_list_lock); \ }) @@ -2961,37 +2996,53 @@ struct undefined_opcode_info { }; /** - * struct page_fault_info - info about page fault - * @pgf_info: page fault information. + * struct page_fault_info - page fault information. + * @page_fault: holds information collected during a page fault. * @user_mappings: buffer containing user mappings. * @num_of_user_mappings: number of user mappings. + * @page_fault_detected: if set as 1, then a page-fault was discovered for the + * first time after the driver has finished booting-up. + * Since we're looking for the page-fault's root cause, + * we don't care of the others that might follow it- + * so once changed to 1, it will remain that way. + * @page_fault_info_available: indicates that a page fault info is now available. */ struct page_fault_info { - struct hl_page_fault_info pgf; + struct hl_page_fault_info page_fault; struct hl_user_mapping *user_mappings; u64 num_of_user_mappings; + atomic_t page_fault_detected; + bool page_fault_info_available; +}; + +/** + * struct razwi_info - RAZWI information. + * @razwi: holds information collected during a RAZWI + * @razwi_detected: if set as 1, then a RAZWI was discovered for the + * first time after the driver has finished booting-up. + * Since we're looking for the RAZWI's root cause, + * we don't care of the others that might follow it- + * so once changed to 1, it will remain that way. + * @razwi_info_available: indicates that a RAZWI info is now available. + */ +struct razwi_info { + struct hl_info_razwi_event razwi; + atomic_t razwi_detected; + bool razwi_info_available; }; /** * struct hl_error_info - holds information collected during an error. * @cs_timeout: CS timeout error information. - * @razwi: razwi information. - * @razwi_info_recorded: if set writing to razwi information is enabled. - * otherwise - disabled, so the first (root cause) razwi will not be - * overwritten. - * @undef_opcode: undefined opcode information - * @pgf_info: page fault information. - * @pgf_info_recorded: if set writing to page fault information is enabled. - * otherwise - disabled, so the first (root cause) page fault will not be - * overwritten. + * @razwi_info: RAZWI information. + * @undef_opcode: undefined opcode information. + * @page_fault_info: page fault information. */ struct hl_error_info { struct cs_timeout_info cs_timeout; - struct hl_info_razwi_event razwi; - atomic_t razwi_info_recorded; + struct razwi_info razwi_info; struct undefined_opcode_info undef_opcode; - struct page_fault_info pgf_info; - atomic_t pgf_info_recorded; + struct page_fault_info page_fault_info; }; /** @@ -3157,6 +3208,8 @@ struct hl_reset_info { * @edma_binning: contains mask of edma engines that is received from the f/w which * indicates which edma engines are binned-out * @device_release_watchdog_timeout_sec: device release watchdog timeout value in seconds. + * @rotator_binning: contains mask of rotators engines that is received from the f/w + * which indicates which rotator engines are binned-out(Gaudi3 and above). * @id: device minor. * @id_control: minor of the control device. * @cdev_idx: char device index. Used for setting its name. @@ -3214,6 +3267,7 @@ struct hl_reset_info { * @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies * that the f/w is always alive. Used only for testing. * @supports_ctx_switch: true if a ctx switch is required upon first submission. + * @support_preboot_binning: true if we support read binning info from preboot. */ struct hl_device { struct pci_dev *pdev; @@ -3322,6 +3376,7 @@ struct hl_device { u32 decoder_binning; u32 edma_binning; u32 device_release_watchdog_timeout_sec; + u32 rotator_binning; u16 id; u16 id_control; u16 cdev_idx; @@ -3355,6 +3410,7 @@ struct hl_device { u8 supports_mmu_prefetch; u8 reset_upon_device_release; u8 supports_ctx_switch; + u8 support_preboot_binning; /* Parameters for bring-up */ u64 nic_ports_mask; @@ -3729,6 +3785,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power); void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev); void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev); int hl_fw_init_cpu(struct hl_device *hdev); +int hl_fw_wait_preboot_ready(struct hl_device *hdev); int hl_fw_read_preboot_status(struct hl_device *hdev); int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev, struct fw_load_mgr *fw_loader, @@ -3772,6 +3829,8 @@ int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); void hl_fw_set_pll_profile(struct hl_device *hdev); void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp); void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp); +int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode, + dma_addr_t buff, u32 *size); void hw_sob_get(struct hl_hw_sob *hw_sob); void hw_sob_put(struct hl_hw_sob *hw_sob); @@ -3786,6 +3845,7 @@ void hl_dec_fini(struct hl_device *hdev); void hl_dec_ctx_fini(struct hl_ctx *ctx); void hl_release_pending_user_interrupts(struct hl_device *hdev); +void hl_abort_waitings_for_completion(struct hl_device *hdev); int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig); diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c similarity index 98% rename from drivers/misc/habanalabs/common/habanalabs_drv.c rename to drivers/accel/habanalabs/common/habanalabs_drv.c index 7815c60df54e23..03dae57dc83863 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/accel/habanalabs/common/habanalabs_drv.c @@ -222,9 +222,11 @@ int hl_device_open(struct inode *inode, struct file *filp) hl_debugfs_add_file(hpriv); atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1); - atomic_set(&hdev->captured_err_info.razwi_info_recorded, 0); - atomic_set(&hdev->captured_err_info.pgf_info_recorded, 0); + atomic_set(&hdev->captured_err_info.razwi_info.razwi_detected, 0); + atomic_set(&hdev->captured_err_info.page_fault_info.page_fault_detected, 0); hdev->captured_err_info.undef_opcode.write_enable = true; + hdev->captured_err_info.razwi_info.razwi_info_available = false; + hdev->captured_err_info.page_fault_info.page_fault_info_available = false; hdev->open_counter++; hdev->last_successful_open_jif = jiffies; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c similarity index 93% rename from drivers/misc/habanalabs/common/habanalabs_ioctl.c rename to drivers/accel/habanalabs/common/habanalabs_ioctl.c index b6abfa7761a70e..5005e6fca69123 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c @@ -7,7 +7,7 @@ #define pr_fmt(fmt) "habanalabs: " fmt -#include +#include #include "habanalabs.h" #include @@ -607,16 +607,20 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { + void __user *out = (void __user *) (uintptr_t) args->return_pointer; struct hl_device *hdev = hpriv->hdev; u32 max_size = args->return_size; - struct hl_info_razwi_event *info = &hdev->captured_err_info.razwi; - void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct razwi_info *razwi_info; if ((!max_size) || (!out)) return -EINVAL; - return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) - ? -EFAULT : 0; + razwi_info = &hdev->captured_err_info.razwi_info; + if (!razwi_info->razwi_info_available) + return 0; + + return copy_to_user(out, &razwi_info->razwi, + min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0; } static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) @@ -786,16 +790,20 @@ static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { + void __user *out = (void __user *) (uintptr_t) args->return_pointer; struct hl_device *hdev = hpriv->hdev; u32 max_size = args->return_size; - struct hl_page_fault_info *info = &hdev->captured_err_info.pgf_info.pgf; - void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct page_fault_info *pgf_info; if ((!max_size) || (!out)) return -EINVAL; - return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) - ? -EFAULT : 0; + pgf_info = &hdev->captured_err_info.page_fault_info; + if (!pgf_info->page_fault_info_available) + return 0; + + return copy_to_user(out, &pgf_info->page_fault, + min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0; } static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) @@ -806,18 +814,68 @@ static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) struct page_fault_info *pgf_info; u64 actual_size; - pgf_info = &hdev->captured_err_info.pgf_info; - args->array_size = pgf_info->num_of_user_mappings; - if (!out) return -EINVAL; + pgf_info = &hdev->captured_err_info.page_fault_info; + if (!pgf_info->page_fault_info_available) + return 0; + + args->array_size = pgf_info->num_of_user_mappings; + actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping); if (user_buf_size < actual_size) return -ENOMEM; - return copy_to_user(out, pgf_info->user_mappings, min_t(size_t, user_buf_size, actual_size)) - ? -EFAULT : 0; + return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0; +} + +static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args) +{ + void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer; + u32 size = info_args->return_size; + dma_addr_t dma_handle; + bool need_input_buff; + void *fw_buff; + int rc = 0; + + switch (info_args->fw_sub_opcode) { + case HL_PASSTHROUGH_VERSIONS: + need_input_buff = false; + break; + default: + return -EINVAL; + } + + if (size > SZ_1M) { + dev_err(hdev->dev, "buffer size cannot exceed 1MB\n"); + return -EINVAL; + } + + fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle); + if (!fw_buff) + return -ENOMEM; + + + if (need_input_buff && copy_from_user(fw_buff, buff, size)) { + dev_dbg(hdev->dev, "Failed to copy from user FW buff\n"); + rc = -EFAULT; + goto free_buff; + } + + rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size); + if (rc) + goto free_buff; + + if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) { + dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n"); + rc = -EFAULT; + } + +free_buff: + hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff); + + return rc; } static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, @@ -826,9 +884,13 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, enum hl_device_status status; struct hl_info_args *args = data; struct hl_device *hdev = hpriv->hdev; - int rc; + if (args->pad) { + dev_dbg(hdev->dev, "Padding bytes must be 0\n"); + return -EINVAL; + } + /* * Information is returned for the following opcodes even if the device * is disabled or in reset. @@ -893,7 +955,7 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, } if (!hl_device_operational(hdev, &status)) { - dev_warn_ratelimited(dev, + dev_dbg_ratelimited(dev, "Device is %s. Can't execute INFO IOCTL\n", hdev->status[status]); return -EBUSY; @@ -947,6 +1009,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_ENGINE_STATUS: return engine_status_info(hpriv, args); + case HL_INFO_FW_GENERIC_REQ: + return send_fw_generic_request(hdev, args); + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -EINVAL; @@ -975,7 +1040,7 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) int rc = 0; if (!hl_device_operational(hdev, &status)) { - dev_warn_ratelimited(hdev->dev, + dev_dbg_ratelimited(hdev->dev, "Device is %s. Can't execute DEBUG IOCTL\n", hdev->status[status]); return -EBUSY; @@ -1072,8 +1137,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, retcode = -EFAULT; goto out_err; } - } else if (cmd & IOC_OUT) { - memset(kdata, 0, usize); } retcode = func(hpriv, kdata); diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/accel/habanalabs/common/hw_queue.c similarity index 100% rename from drivers/misc/habanalabs/common/hw_queue.c rename to drivers/accel/habanalabs/common/hw_queue.c diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c similarity index 100% rename from drivers/misc/habanalabs/common/hwmon.c rename to drivers/accel/habanalabs/common/hwmon.c diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c similarity index 94% rename from drivers/misc/habanalabs/common/irq.c rename to drivers/accel/habanalabs/common/irq.c index 94d537fd4fde52..04844e843a7b08 100644 --- a/drivers/misc/habanalabs/common/irq.c +++ b/drivers/accel/habanalabs/common/irq.c @@ -72,15 +72,17 @@ static void irq_handle_eqe(struct work_struct *work) * @hdev: pointer to device structure * @cs_seq: command submission sequence * @cq: completion queue + * @timestamp: interrupt timestamp * */ -static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq) +static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp) { struct hl_hw_queue *queue; struct hl_cs_job *job; queue = &hdev->kernel_queues[cq->hw_queue_id]; job = queue->shadow_queue[hl_pi_2_offset(cs_seq)]; + job->timestamp = timestamp; queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work); atomic_inc(&queue->ci); @@ -91,9 +93,10 @@ static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq) * * @hdev: pointer to device structure * @cs_seq: command submission sequence + * @timestamp: interrupt timestamp * */ -static void cs_finish(struct hl_device *hdev, u16 cs_seq) +static void cs_finish(struct hl_device *hdev, u16 cs_seq, ktime_t timestamp) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_hw_queue *queue; @@ -113,6 +116,7 @@ static void cs_finish(struct hl_device *hdev, u16 cs_seq) atomic_inc(&queue->ci); } + cs->completion_timestamp = timestamp; queue_work(hdev->cs_cmplt_wq, &cs->finish_work); } @@ -130,6 +134,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) bool shadow_index_valid, entry_ready; u16 shadow_index; struct hl_cq_entry *cq_entry, *cq_base; + ktime_t timestamp = ktime_get(); if (hdev->disabled) { dev_dbg(hdev->dev, @@ -171,9 +176,9 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) if (shadow_index_valid && !hdev->disabled) { if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_CS) - cs_finish(hdev, shadow_index); + cs_finish(hdev, shadow_index, timestamp); else - job_finish(hdev, shadow_index, cq); + job_finish(hdev, shadow_index, cq, timestamp); } /* Clear CQ entry ready bit */ @@ -228,7 +233,7 @@ static void hl_ts_free_objects(struct work_struct *work) * list to a dedicated workqueue to do the actual put. */ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend, - struct list_head **free_list) + struct list_head **free_list, ktime_t now) { struct timestamp_reg_free_node *free_node; u64 timestamp; @@ -246,7 +251,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi if (!free_node) return -ENOMEM; - timestamp = ktime_get_ns(); + timestamp = ktime_to_ns(now); *pend->ts_reg_info.timestamp_kernel_addr = timestamp; @@ -298,7 +303,7 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru if (pend->ts_reg_info.buf) { if (!reg_node_handle_fail) { rc = handle_registration_node(hdev, pend, - &ts_reg_free_list_head); + &ts_reg_free_list_head, now); if (rc) reg_node_handle_fail = true; } @@ -333,13 +338,22 @@ irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg) struct hl_user_interrupt *user_int = arg; struct hl_device *hdev = user_int->hdev; - if (user_int->is_decoder) - handle_user_interrupt(hdev, &hdev->common_decoder_interrupt); - else + switch (user_int->type) { + case HL_USR_INTERRUPT_CQ: handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt); - /* Handle user cq or decoder interrupts registered on this specific irq */ - handle_user_interrupt(hdev, user_int); + /* Handle user cq interrupt registered on this specific irq */ + handle_user_interrupt(hdev, user_int); + break; + case HL_USR_INTERRUPT_DECODER: + handle_user_interrupt(hdev, &hdev->common_decoder_interrupt); + + /* Handle decoder interrupt registered on this specific irq */ + handle_user_interrupt(hdev, user_int); + break; + default: + break; + } return IRQ_HANDLED; } diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c similarity index 92% rename from drivers/misc/habanalabs/common/memory.c rename to drivers/accel/habanalabs/common/memory.c index 5e9ae7600d75ee..e6474d38afc491 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/accel/habanalabs/common/memory.c @@ -5,7 +5,7 @@ * All Rights Reserved. */ -#include +#include #include "habanalabs.h" #include "../include/hw_ip/mmu/mmu_general.h" @@ -19,7 +19,9 @@ MODULE_IMPORT_NS(DMA_BUF); #define HL_MMU_DEBUG 0 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */ -#define DRAM_POOL_PAGE_SIZE SZ_8M +#define DRAM_POOL_PAGE_SIZE SZ_8M + +#define MEM_HANDLE_INVALID ULONG_MAX static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle); @@ -371,12 +373,6 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args) return -EINVAL; } - if (phys_pg_pack->exporting_cnt) { - spin_unlock(&vm->idr_lock); - dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle); - return -EINVAL; - } - /* must remove from idr before the freeing of the physical pages as the refcount of the pool * is also the trigger of the idr destroy */ @@ -1240,6 +1236,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device hnode->ptr = vm_type; hnode->vaddr = ret_vaddr; + hnode->handle = is_userptr ? MEM_HANDLE_INVALID : handle; mutex_lock(&ctx->mem_hash_lock); hash_add(ctx->mem_hash, &hnode->node, ret_vaddr); @@ -1313,6 +1310,12 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, return -EINVAL; } + if (hnode->export_cnt) { + mutex_unlock(&ctx->mem_hash_lock); + dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr); + return -EINVAL; + } + hash_del(&hnode->node); mutex_unlock(&ctx->mem_hash_lock); @@ -1545,10 +1548,10 @@ static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size, } static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages, - u64 page_size, struct device *dev, - enum dma_data_direction dir) + u64 page_size, u64 exported_size, + struct device *dev, enum dma_data_direction dir) { - u64 chunk_size, bar_address, dma_max_seg_size; + u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages; struct asic_fixed_properties *prop; int rc, i, j, nents, cur_page; struct scatterlist *sg; @@ -1574,16 +1577,23 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 if (!sgt) return ERR_PTR(-ENOMEM); + /* remove export size restrictions in case not explicitly defined */ + cur_size_to_export = exported_size ? exported_size : (npages * page_size); + /* If the size of each page is larger than the dma max segment size, * then we can't combine pages and the number of entries in the SGL * will just be the * * */ - if (page_size > dma_max_seg_size) - nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size); - else + if (page_size > dma_max_seg_size) { + /* we should limit number of pages according to the exported size */ + cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size); + nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size); + } else { + cur_npages = npages; + /* Get number of non-contiguous chunks */ - for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) { + for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) { if (pages[i - 1] + page_size != pages[i] || chunk_size + page_size > dma_max_seg_size) { nents++; @@ -1593,6 +1603,7 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 chunk_size += page_size; } + } rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO); if (rc) @@ -1615,7 +1626,8 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 else cur_device_address += dma_max_seg_size; - chunk_size = min(size_left, dma_max_seg_size); + /* make sure not to export over exported size */ + chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export); bar_address = hdev->dram_pci_bar_start + cur_device_address; @@ -1623,6 +1635,8 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 if (rc) goto error_unmap; + cur_size_to_export -= chunk_size; + if (size_left > dma_max_seg_size) { size_left -= dma_max_seg_size; } else { @@ -1634,7 +1648,7 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 /* Merge pages and put them into the scatterlist */ for_each_sgtable_dma_sg(sgt, sg, i) { chunk_size = page_size; - for (j = cur_page + 1 ; j < npages ; j++) { + for (j = cur_page + 1 ; j < cur_npages ; j++) { if (pages[j - 1] + page_size != pages[j] || chunk_size + page_size > dma_max_seg_size) break; @@ -1645,10 +1659,13 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 bar_address = hdev->dram_pci_bar_start + (pages[cur_page] - prop->dram_base_address); + /* make sure not to export over exported size */ + chunk_size = min(chunk_size, cur_size_to_export); rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir); if (rc) goto error_unmap; + cur_size_to_export -= chunk_size; cur_page = j; } } @@ -1719,6 +1736,7 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment, phys_pg_pack->pages, phys_pg_pack->npages, phys_pg_pack->page_size, + phys_pg_pack->exported_size, attachment->dev, dir); else @@ -1726,6 +1744,7 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment, &hl_dmabuf->device_address, 1, hl_dmabuf->dmabuf->size, + 0, attachment->dev, dir); @@ -1763,18 +1782,20 @@ static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment, static void hl_release_dmabuf(struct dma_buf *dmabuf) { struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv; - struct hl_ctx *ctx = hl_dmabuf->ctx; - struct hl_device *hdev = ctx->hdev; - struct hl_vm *vm = &hdev->vm; + struct hl_ctx *ctx; - if (hl_dmabuf->phys_pg_pack) { - spin_lock(&vm->idr_lock); - hl_dmabuf->phys_pg_pack->exporting_cnt--; - spin_unlock(&vm->idr_lock); - } + if (!hl_dmabuf) + return; - hl_ctx_put(hl_dmabuf->ctx); + ctx = hl_dmabuf->ctx; + if (hl_dmabuf->memhash_hnode) { + mutex_lock(&ctx->mem_hash_lock); + hl_dmabuf->memhash_hnode->export_cnt--; + mutex_unlock(&ctx->mem_hash_lock); + } + + hl_ctx_put(ctx); kfree(hl_dmabuf); } @@ -1785,7 +1806,7 @@ static const struct dma_buf_ops habanalabs_dmabuf_ops = { .release = hl_release_dmabuf, }; -static int export_dmabuf_common(struct hl_ctx *ctx, +static int export_dmabuf(struct hl_ctx *ctx, struct hl_dmabuf_priv *hl_dmabuf, u64 total_size, int flags, int *dmabuf_fd) { @@ -1806,7 +1827,7 @@ static int export_dmabuf_common(struct hl_ctx *ctx, fd = dma_buf_fd(hl_dmabuf->dmabuf, flags); if (fd < 0) { - dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n"); + dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); rc = fd; goto err_dma_buf_put; } @@ -1819,36 +1840,13 @@ static int export_dmabuf_common(struct hl_ctx *ctx, return 0; err_dma_buf_put: + hl_dmabuf->dmabuf->priv = NULL; dma_buf_put(hl_dmabuf->dmabuf); return rc; } -/** - * export_dmabuf_from_addr() - export a dma-buf object for the given memory - * address and size. - * @ctx: pointer to the context structure. - * @device_addr: device memory physical address. - * @size: size of device memory. - * @flags: DMA-BUF file/FD flags. - * @dmabuf_fd: pointer to result FD that represents the dma-buf object. - * - * Create and export a dma-buf object for an existing memory allocation inside - * the device memory, and return a FD which is associated with the dma-buf - * object. - * - * Return: 0 on success, non-zero for failure. - */ -static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr, - u64 size, int flags, int *dmabuf_fd) +static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size) { - struct hl_dmabuf_priv *hl_dmabuf; - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop; - u64 bar_address; - int rc; - - prop = &hdev->asic_prop; - if (!IS_ALIGNED(device_addr, PAGE_SIZE)) { dev_dbg(hdev->dev, "exported device memory address 0x%llx should be aligned to 0x%lx\n", @@ -1863,49 +1861,150 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr, return -EINVAL; } + return 0; +} + +static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 bar_address; + int rc; + + rc = validate_export_params_common(hdev, device_addr, size); + if (rc) + return rc; + if (device_addr < prop->dram_user_base_address || - device_addr + size > prop->dram_end_address || - device_addr + size < device_addr) { + (device_addr + size) > prop->dram_end_address || + (device_addr + size) < device_addr) { dev_dbg(hdev->dev, "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n", device_addr, size); return -EINVAL; } - bar_address = hdev->dram_pci_bar_start + - (device_addr - prop->dram_base_address); + bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address); - if (bar_address + size > - hdev->dram_pci_bar_start + prop->dram_pci_bar_size || - bar_address + size < bar_address) { + if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) || + (bar_address + size) < bar_address) { dev_dbg(hdev->dev, "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n", device_addr, size); return -EINVAL; } - hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL); - if (!hl_dmabuf) - return -ENOMEM; + return 0; +} - hl_dmabuf->device_address = device_addr; +static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 bar_address; + int i, rc; - rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd); + rc = validate_export_params_common(hdev, device_addr, size); if (rc) - goto err_free_dmabuf_wrapper; + return rc; + + if ((offset + size) > phys_pg_pack->total_size) { + dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n", + offset, size, phys_pg_pack->total_size); + return -EINVAL; + } + + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + + bar_address = hdev->dram_pci_bar_start + + (phys_pg_pack->pages[i] - prop->dram_base_address); + + if ((bar_address + phys_pg_pack->page_size) > + (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) || + (bar_address + phys_pg_pack->page_size) < bar_address) { + dev_dbg(hdev->dev, + "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n", + phys_pg_pack->pages[i], + phys_pg_pack->page_size); + + return -EINVAL; + } + } return 0; +} -err_free_dmabuf_wrapper: - kfree(hl_dmabuf); - return rc; +static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm_hash_node *hnode; + + /* get the memory handle */ + mutex_lock(&ctx->mem_hash_lock); + hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)addr) + if (addr == hnode->vaddr) + break; + + if (!hnode) { + mutex_unlock(&ctx->mem_hash_lock); + dev_dbg(hdev->dev, "map address %#llx not found\n", addr); + return ERR_PTR(-EINVAL); + } + + if (upper_32_bits(hnode->handle)) { + mutex_unlock(&ctx->mem_hash_lock); + dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n", + hnode->handle, addr); + return ERR_PTR(-EINVAL); + } + + /* + * node found, increase export count so this memory cannot be unmapped + * and the hash node cannot be deleted. + */ + hnode->export_cnt++; + mutex_unlock(&ctx->mem_hash_lock); + + return hnode; +} + +static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode) +{ + mutex_lock(&ctx->mem_hash_lock); + hnode->export_cnt--; + mutex_unlock(&ctx->mem_hash_lock); +} + +static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev, + struct hl_vm_hash_node *hnode) +{ + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct hl_vm *vm = &hdev->vm; + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) hnode->handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle); + return ERR_PTR(-EINVAL); + } + + spin_unlock(&vm->idr_lock); + + if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) { + dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle); + return ERR_PTR(-EINVAL); + } + + return phys_pg_pack; } /** - * export_dmabuf_from_handle() - export a dma-buf object for the given memory - * handle. + * export_dmabuf_from_addr() - export a dma-buf object for the given memory + * address and size. * @ctx: pointer to the context structure. - * @handle: device memory allocation handle. + * @addr: device address. + * @size: size of device memory to export. + * @offset: the offset into the buffer from which to start exporting * @flags: DMA-BUF file/FD flags. * @dmabuf_fd: pointer to result FD that represents the dma-buf object. * @@ -1915,87 +2014,69 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr, * * Return: 0 on success, non-zero for failure. */ -static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags, - int *dmabuf_fd) +static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 offset, + int flags, int *dmabuf_fd) { - struct hl_vm_phys_pg_pack *phys_pg_pack; - struct hl_dmabuf_priv *hl_dmabuf; - struct hl_device *hdev = ctx->hdev; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + struct hl_vm_hash_node *hnode = NULL; struct asic_fixed_properties *prop; - struct hl_vm *vm = &hdev->vm; - u64 bar_address; - int rc, i; + struct hl_dmabuf_priv *hl_dmabuf; + struct hl_device *hdev; + u64 export_addr; + int rc; + hdev = ctx->hdev; prop = &hdev->asic_prop; - if (upper_32_bits(handle)) { - dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle); + /* offset must be 0 in devices without virtual memory support */ + if (!prop->dram_supports_virtual_memory && offset) { + dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n"); return -EINVAL; } - spin_lock(&vm->idr_lock); + export_addr = addr + offset; - phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle); - if (!phys_pg_pack) { - spin_unlock(&vm->idr_lock); - dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle); - return -EINVAL; - } - - /* increment now to avoid freeing device memory while exporting */ - phys_pg_pack->exporting_cnt++; - - spin_unlock(&vm->idr_lock); - - if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) { - dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle); - rc = -EINVAL; - goto err_dec_exporting_cnt; - } - - for (i = 0 ; i < phys_pg_pack->npages ; i++) { - - bar_address = hdev->dram_pci_bar_start + - (phys_pg_pack->pages[i] - - prop->dram_base_address); - - if (bar_address + phys_pg_pack->page_size > - hdev->dram_pci_bar_start + prop->dram_pci_bar_size || - bar_address + phys_pg_pack->page_size < bar_address) { - - dev_dbg(hdev->dev, - "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n", - phys_pg_pack->pages[i], - phys_pg_pack->page_size); + hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL); + if (!hl_dmabuf) + return -ENOMEM; - rc = -EINVAL; - goto err_dec_exporting_cnt; + if (prop->dram_supports_virtual_memory) { + hnode = memhash_node_export_get(ctx, addr); + if (IS_ERR(hnode)) { + rc = PTR_ERR(hnode); + goto err_free_dmabuf_wrapper; } - } + phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode); + if (IS_ERR(phys_pg_pack)) { + rc = PTR_ERR(phys_pg_pack); + goto dec_memhash_export_cnt; + } + rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack); + if (rc) + goto dec_memhash_export_cnt; - hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL); - if (!hl_dmabuf) { - rc = -ENOMEM; - goto err_dec_exporting_cnt; + phys_pg_pack->exported_size = size; + hl_dmabuf->phys_pg_pack = phys_pg_pack; + hl_dmabuf->memhash_hnode = hnode; + } else { + rc = validate_export_params_no_mmu(hdev, export_addr, size); + if (rc) + goto err_free_dmabuf_wrapper; } - hl_dmabuf->phys_pg_pack = phys_pg_pack; + hl_dmabuf->device_address = export_addr; - rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size, - flags, dmabuf_fd); + rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd); if (rc) - goto err_free_dmabuf_wrapper; + goto dec_memhash_export_cnt; return 0; +dec_memhash_export_cnt: + if (prop->dram_supports_virtual_memory) + memhash_node_export_put(ctx, hnode); err_free_dmabuf_wrapper: kfree(hl_dmabuf); - -err_dec_exporting_cnt: - spin_lock(&vm->idr_lock); - phys_pg_pack->exporting_cnt--; - spin_unlock(&vm->idr_lock); - return rc; } @@ -2089,12 +2170,13 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args) { struct hl_ts_buff *ts_buff = NULL; - u32 size, num_elements; + u32 num_elements; + size_t size; void *p; num_elements = *(u32 *)args; - ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL); + ts_buff = kzalloc(sizeof(*ts_buff), gfp); if (!ts_buff) return -ENOMEM; @@ -2180,7 +2262,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) int rc, dmabuf_fd = -EBADF; if (!hl_device_operational(hdev, &status)) { - dev_warn_ratelimited(hdev->dev, + dev_dbg_ratelimited(hdev->dev, "Device is %s. Can't execute MEMORY IOCTL\n", hdev->status[status]); return -EBUSY; @@ -2269,17 +2351,12 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) break; case HL_MEM_OP_EXPORT_DMABUF_FD: - if (hdev->asic_prop.dram_supports_virtual_memory) - rc = export_dmabuf_from_handle(ctx, - args->in.export_dmabuf_fd.handle, - args->in.flags, - &dmabuf_fd); - else - rc = export_dmabuf_from_addr(ctx, - args->in.export_dmabuf_fd.handle, - args->in.export_dmabuf_fd.mem_size, - args->in.flags, - &dmabuf_fd); + rc = export_dmabuf_from_addr(ctx, + args->in.export_dmabuf_fd.addr, + args->in.export_dmabuf_fd.mem_size, + args->in.export_dmabuf_fd.offset, + args->in.flags, + &dmabuf_fd); memset(args, 0, sizeof(*args)); args->out.fd = dmabuf_fd; break; diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c similarity index 99% rename from drivers/misc/habanalabs/common/memory_mgr.c rename to drivers/accel/habanalabs/common/memory_mgr.c index 1936d653699ed0..0f2759e265477d 100644 --- a/drivers/misc/habanalabs/common/memory_mgr.c +++ b/drivers/accel/habanalabs/common/memory_mgr.c @@ -25,8 +25,7 @@ struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle) buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); if (!buf) { spin_unlock(&mmg->lock); - dev_warn(mmg->dev, - "Buff get failed, no match to handle %#llx\n", handle); + dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle); return NULL; } kref_get(&buf->refcount); diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/accel/habanalabs/common/mmu/Makefile similarity index 100% rename from drivers/misc/habanalabs/common/mmu/Makefile rename to drivers/accel/habanalabs/common/mmu/Makefile diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c similarity index 99% rename from drivers/misc/habanalabs/common/mmu/mmu.c rename to drivers/accel/habanalabs/common/mmu/mmu.c index 2c1005f74cf440..a42ae8bc61e8d0 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/accel/habanalabs/common/mmu/mmu.c @@ -781,7 +781,7 @@ static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data) { - struct hl_device *hdev = (struct hl_device *)data; + struct hl_device *hdev = data; hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1, (void *)chunk->start_addr, chunk->phys_addr); diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/accel/habanalabs/common/mmu/mmu_v1.c similarity index 99% rename from drivers/misc/habanalabs/common/mmu/mmu_v1.c rename to drivers/accel/habanalabs/common/mmu/mmu_v1.c index 8a40de4a47617c..d925dc4dd09725 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c +++ b/drivers/accel/habanalabs/common/mmu/mmu_v1.c @@ -344,7 +344,6 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) } } - hop2_pte_addr = hop2_addr; hop2_pte_addr = hop2_addr; for (i = 0 ; i < num_of_hop3 ; i++) { clear_pte(ctx, hop2_pte_addr); diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c similarity index 100% rename from drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c rename to drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c diff --git a/drivers/misc/habanalabs/common/pci/Makefile b/drivers/accel/habanalabs/common/pci/Makefile similarity index 100% rename from drivers/misc/habanalabs/common/pci/Makefile rename to drivers/accel/habanalabs/common/pci/Makefile diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/accel/habanalabs/common/pci/pci.c similarity index 97% rename from drivers/misc/habanalabs/common/pci/pci.c rename to drivers/accel/habanalabs/common/pci/pci.c index 5fe3da5fba306a..d1f4c695baf22d 100644 --- a/drivers/misc/habanalabs/common/pci/pci.c +++ b/drivers/accel/habanalabs/common/pci/pci.c @@ -10,6 +10,8 @@ #include +#include + #define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100) #define IATU_REGION_CTRL_REGION_EN_MASK BIT(31) @@ -120,6 +122,9 @@ int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data) if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) { pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); + if (unlikely(trace_habanalabs_elbi_read_enabled())) + trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val); + return 0; } @@ -179,8 +184,11 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data) usleep_range(300, 500); } - if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) + if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) { + if (unlikely(trace_habanalabs_elbi_write_enabled())) + trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val); return 0; + } if (val & PCI_CONFIG_ELBI_STS_ERR) return -EIO; diff --git a/drivers/misc/habanalabs/common/security.c b/drivers/accel/habanalabs/common/security.c similarity index 76% rename from drivers/misc/habanalabs/common/security.c rename to drivers/accel/habanalabs/common/security.c index 6196c0487c8b0e..5f03ade07ead59 100644 --- a/drivers/misc/habanalabs/common/security.c +++ b/drivers/accel/habanalabs/common/security.c @@ -7,6 +7,19 @@ #include "habanalabs.h" +static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = { + "Error due to un-priv read", + "Error due to un-secure read", + "Error due to read from unmapped reg", + "Error due to un-priv write", + "Error due to un-secure write", + "Error due to write to unmapped reg", + "External I/F write sec violation", + "External I/F write to un-mapped reg", + "Read to write only", + "Write to read only" +}; + /** * hl_get_pb_block - return the relevant block within the block array * @@ -598,3 +611,164 @@ void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset, blocks_array_size); } + +static u32 hl_automated_get_block_base_addr(struct hl_device *hdev, + struct hl_special_block_info *block_info, + u32 major, u32 minor, u32 sub_minor) +{ + u32 fw_block_base_address = block_info->base_addr + + major * block_info->major_offset + + minor * block_info->minor_offset + + sub_minor * block_info->sub_minor_offset; + struct asic_fixed_properties *prop = &hdev->asic_prop; + + /* Calculation above returns an address for FW use, and therefore should + * be casted for driver use. + */ + return (fw_block_base_address - lower_32_bits(prop->cfg_base_address)); +} + +static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg, + int block_type) +{ + int i; + + /* Check if block type is listed in the exclusion list of block types */ + for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++) + if (block_type == skip_blocks_cfg->block_types[i]) + return true; + + return false; +} + +static bool hl_check_block_range_exclusion(struct hl_device *hdev, + struct hl_skip_blocks_cfg *skip_blocks_cfg, + struct hl_special_block_info *block_info, + u32 major, u32 minor, u32 sub_minor) +{ + u32 blocks_in_range, block_base_addr_in_range, block_base_addr; + int i, j; + + block_base_addr = hl_automated_get_block_base_addr(hdev, block_info, + major, minor, sub_minor); + + for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) { + blocks_in_range = (skip_blocks_cfg->block_ranges[i].end - + skip_blocks_cfg->block_ranges[i].start) / + HL_BLOCK_SIZE + 1; + for (j = 0 ; j < blocks_in_range ; j++) { + block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start + + j * HL_BLOCK_SIZE; + if (block_base_addr == block_base_addr_in_range) + return true; + } + } + + return false; +} + +static int hl_read_glbl_errors(struct hl_device *hdev, + u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data) +{ + struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks; + struct hl_special_block_info *current_block = &special_blocks[blk_idx]; + u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base, + base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address); + int i; + + block_base = base + major * current_block->major_offset + + minor * current_block->minor_offset + + sub_minor * current_block->sub_minor_offset; + + glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET; + cause_val = RREG32(glbl_err_cause); + if (!cause_val) + return 0; + + glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET; + addr_val = RREG32(glbl_err_addr); + + for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) { + if (cause_val & BIT(i)) + dev_err_ratelimited(hdev->dev, + "%s, addr %#llx\n", + hl_glbl_error_cause[i], + hdev->asic_prop.cfg_base_address + block_base + + FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val)); + } + + WREG32(glbl_err_cause, cause_val); + + return 0; +} + +void hl_check_for_glbl_errors(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_special_blocks_cfg special_blocks_cfg; + struct iterate_special_ctx glbl_err_iter; + int rc; + + memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg)); + special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg; + + glbl_err_iter.fn = &hl_read_glbl_errors; + glbl_err_iter.data = &special_blocks_cfg; + + rc = hl_iterate_special_blocks(hdev, &glbl_err_iter); + if (rc) + dev_err_ratelimited(hdev->dev, + "Could not iterate special blocks, glbl error check failed\n"); +} + +int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx) +{ + struct hl_special_blocks_cfg *special_blocks_cfg = + (struct hl_special_blocks_cfg *)ctx->data; + struct hl_skip_blocks_cfg *skip_blocks_cfg = + special_blocks_cfg->skip_blocks_cfg; + u32 major, minor, sub_minor, blk_idx, num_blocks; + struct hl_special_block_info *block_info_arr; + int rc; + + block_info_arr = hdev->asic_prop.special_blocks; + if (!block_info_arr) + return -EINVAL; + + num_blocks = hdev->asic_prop.num_of_special_blocks; + + for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) { + if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type)) + continue; + + for (major = 0 ; major < block_info_arr->major ; major++) { + minor = 0; + do { + sub_minor = 0; + do { + if ((hl_check_block_range_exclusion(hdev, + skip_blocks_cfg, block_info_arr, + major, minor, sub_minor)) || + (skip_blocks_cfg->skip_block_hook && + skip_blocks_cfg->skip_block_hook(hdev, + special_blocks_cfg, + blk_idx, major, minor, sub_minor))) { + sub_minor++; + continue; + } + + rc = ctx->fn(hdev, blk_idx, major, minor, + sub_minor, ctx->data); + if (rc) + return rc; + + sub_minor++; + } while (sub_minor < block_info_arr->sub_minor); + + minor++; + } while (minor < block_info_arr->minor); + } + } + + return 0; +} diff --git a/drivers/accel/habanalabs/common/security.h b/drivers/accel/habanalabs/common/security.h new file mode 100644 index 00000000000000..234b4a6ed8bc1c --- /dev/null +++ b/drivers/accel/habanalabs/common/security.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2022 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef SECURITY_H_ +#define SECURITY_H_ + +#include + +extern struct hl_device *hdev; + +/* special blocks */ +#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10 +#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0) +/* GLBL_ERR_ADDR register offset from the start of the block */ +#define HL_GLBL_ERR_ADDR_OFFSET 0xF44 +/* GLBL_ERR_CAUSE register offset from the start of the block */ +#define HL_GLBL_ERR_CAUSE_OFFSET 0xF48 + +/* + * struct hl_special_block_info - stores address details of a particular type of + * IP block which has a SPECIAL part. + * + * @block_type: block type as described in every ASIC's block_types enum. + * @base_addr: base address of the first block of particular type, + * e.g., address of NIC0_UMR0_0 of 'NIC_UMR' block. + * @major: number of major blocks of particular type. + * @minor: number of minor blocks of particular type. + * @sub_minor: number of sub minor blocks of particular type. + * @major_offset: address gap between 2 consecutive major blocks of particular type, + * e.g., offset between NIC0_UMR0_0 and NIC1_UMR0_0 is 0x80000. + * @minor_offset: address gap between 2 consecutive minor blocks of particular type, + * e.g., offset between NIC0_UMR0_0 and NIC0_UMR1_0 is 0x20000. + * @sub_minor_offset: address gap between 2 consecutive sub_minor blocks of particular + * type, e.g., offset between NIC0_UMR0_0 and NIC0_UMR0_1 is 0x1000. + * + * e.g., in Gaudi2, NIC_UMR blocks can be interpreted as: + * NIC_UMR_ where major=12, minor=2, sub_minor=15. + * In other words, for each of 12 major numbers (i.e 0 to 11) there are + * 2 blocks with different minor numbers (i.e. 0 to 1). Again, for each minor + * number there are 15 blocks with different sub_minor numbers (i.e. 0 to 14). + * So different blocks are NIC0_UMR0_0, NIC0_UMR0_1, ..., NIC0_UMR1_0, ...., + * NIC11_UMR1_14. + * + * Struct's formatted data is located in the SOL-based auto-generated protbits headers. + */ +struct hl_special_block_info { + int block_type; + u32 base_addr; + u32 major; + u32 minor; + u32 sub_minor; + u32 major_offset; + u32 minor_offset; + u32 sub_minor_offset; +}; + +/* + * struct hl_automated_pb_cfg - represents configurations of a particular type + * of IP block which has protection bits. + * + * @addr: address details as described in hl_automation_pb_addr struct. + * @prot_map: each bit corresponds to one among 32 protection configuration regs + * (e.g., SPECIAL_GLBL_PRIV). '1' means 0xffffffff and '0' means 0x0 + * to be written into the corresponding protection configuration reg. + * This bit is meaningful if same bit in data_map is 0, otherwise ignored. + * @data_map: each bit corresponds to one among 32 protection configuration regs + * (e.g., SPECIAL_GLBL_PRIV). '1' means corresponding protection + * configuration reg is to be written with a value in array pointed + * by 'data', otherwise the value is decided by 'prot_map'. + * @data: pointer to data array which stores the config value(s) to be written + * to corresponding protection configuration reg(s). + * @data_size: size of the data array. + * + * Each bit of 'data_map' and 'prot_map' fields corresponds to one among 32 + * protection configuration registers e.g., SPECIAL GLBL PRIV regs (starting at + * offset 0xE80). '1' in 'data_map' means protection configuration to be done + * using configuration in data array. '0' in 'data_map" means protection + * configuration to be done as per the value of corresponding bit in 'prot_map'. + * '1' in 'prot_map' means the register to be programmed with 0xFFFFFFFF + * (all non-protected). '0' in 'prot_map' means the register to be programmed + * with 0x0 (all protected). + * + * e.g., prot_map = 0x00000001, data_map = 0xC0000000 , data = {0xff, 0x12} + * SPECIAL_GLBL_PRIV[0] = 0xFFFFFFFF + * SPECIAL_GLBL_PRIV[1..29] = 0x0 + * SPECIAL_GLBL_PRIV[30] = 0xFF + * SPECIAL_GLBL_PRIV[31] = 0x12 + */ +struct hl_automated_pb_cfg { + struct hl_special_block_info addr; + u32 prot_map; + u32 data_map; + const u32 *data; + u8 data_size; +}; + +/* struct hl_special_blocks_cfg - holds special blocks cfg data. + * + * @priv_automated_pb_cfg: points to the main privileged PB array. + * @sec_automated_pb_cfg: points to the main secured PB array. + * @skip_blocks_cfg: holds arrays of block types & block ranges to be excluded. + * @priv_cfg_size: size of the main privileged PB array. + * @sec_cfg_size: size of the main secured PB array. + * @prot_lvl_priv: indication if it's a privileged/secured PB configurations. + */ +struct hl_special_blocks_cfg { + struct hl_automated_pb_cfg *priv_automated_pb_cfg; + struct hl_automated_pb_cfg *sec_automated_pb_cfg; + struct hl_skip_blocks_cfg *skip_blocks_cfg; + u32 priv_cfg_size; + u32 sec_cfg_size; + u8 prot_lvl_priv; +}; + +/* Automated security */ + +/* struct hl_skip_blocks_cfg - holds arrays of block types & block ranges to be + * excluded from special blocks configurations. + * + * @block_types: an array of block types NOT to be configured. + * @block_types_len: len of an array of block types not to be configured. + * @block_ranges: an array of block ranges not to be configured. + * @block_ranges_len: len of an array of block ranges not to be configured. + * @skip_block_hook: hook that will be called before initializing special blocks. + */ +struct hl_skip_blocks_cfg { + int *block_types; + size_t block_types_len; + struct range *block_ranges; + size_t block_ranges_len; + bool (*skip_block_hook)(struct hl_device *hdev, + struct hl_special_blocks_cfg *special_blocks_cfg, + u32 blk_idx, u32 major, u32 minor, u32 sub_minor); +}; + +/** + * struct iterate_special_ctx - HW module special block iterator + * @fn: function to apply to each HW module special block instance + * @data: optional internal data to the function iterator + */ +struct iterate_special_ctx { + /* + * callback for the HW module special block iterator + * @hdev: pointer to the habanalabs device structure + * @block_id: block (ASIC specific definition can be dcore/hdcore) + * @major: major block index within block_id + * @minor: minor block index within the major block + * @sub_minor: sub_minor block index within the minor block + * @data: function specific data + */ + int (*fn)(struct hl_device *hdev, u32 block_id, u32 major, u32 minor, + u32 sub_minor, void *data); + void *data; +}; + +int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx); +void hl_check_for_glbl_errors(struct hl_device *hdev); + +#endif /* SECURITY_H_ */ diff --git a/drivers/misc/habanalabs/common/state_dump.c b/drivers/accel/habanalabs/common/state_dump.c similarity index 99% rename from drivers/misc/habanalabs/common/state_dump.c rename to drivers/accel/habanalabs/common/state_dump.c index 74726907c95edf..3a9931f2425992 100644 --- a/drivers/misc/habanalabs/common/state_dump.c +++ b/drivers/accel/habanalabs/common/state_dump.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include "habanalabs.h" /** diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c similarity index 100% rename from drivers/misc/habanalabs/common/sysfs.c rename to drivers/accel/habanalabs/common/sysfs.c diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/accel/habanalabs/gaudi/Makefile similarity index 100% rename from drivers/misc/habanalabs/gaudi/Makefile rename to drivers/accel/habanalabs/gaudi/Makefile diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c similarity index 99% rename from drivers/misc/habanalabs/gaudi/gaudi.c rename to drivers/accel/habanalabs/gaudi/gaudi.c index 9f5e208701bad1..71debe862c865f 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/accel/habanalabs/gaudi/gaudi.c @@ -701,6 +701,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev) prop->dma_mask = 48; + prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL; + return 0; } @@ -6432,12 +6434,6 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev, else timeout = HL_DEVICE_TIMEOUT_USEC; - if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { - dev_err_ratelimited(hdev->dev, - "Can't send driver job on QMAN0 because the device is not idle\n"); - return -EBUSY; - } - fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr); if (!fence_ptr) { dev_err(hdev->dev, @@ -7584,7 +7580,7 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type) return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6; } -static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type) +static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask) { ktime_t zero_time = ktime_set(0, 0); @@ -7612,6 +7608,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type) hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; + *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n"); break; @@ -7619,6 +7616,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type) case GAUDI_EVENT_FIX_THERMAL_ENV_E: hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); + *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n"); break; @@ -7887,8 +7885,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr break; case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E: - event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; - gaudi_print_clk_change_info(hdev, event_type); + gaudi_print_clk_change_info(hdev, event_type, &event_mask); hl_fw_unmask_irq(hdev, event_type); break; @@ -9133,6 +9130,16 @@ static u32 *gaudi_get_stream_master_qid_arr(void) return gaudi_stream_master; } +static int gaudi_set_dram_properties(struct hl_device *hdev) +{ + return 0; +} + +static int gaudi_set_binning_masks(struct hl_device *hdev) +{ + return 0; +} + static void gaudi_check_if_razwi_happened(struct hl_device *hdev) { } @@ -9259,6 +9266,8 @@ static const struct hl_asic_funcs gaudi_funcs = { .access_dev_mem = hl_access_dev_mem, .set_dram_bar_base = gaudi_set_hbm_bar_base, .send_device_activity = gaudi_send_device_activity, + .set_dram_properties = gaudi_set_dram_properties, + .set_binning_masks = gaudi_set_binning_masks, }; /** diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/accel/habanalabs/gaudi/gaudiP.h similarity index 99% rename from drivers/misc/habanalabs/gaudi/gaudiP.h rename to drivers/accel/habanalabs/gaudi/gaudiP.h index 4fbcf3f0afe589..3d88d56c8eb3a3 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/accel/habanalabs/gaudi/gaudiP.h @@ -8,7 +8,7 @@ #ifndef GAUDIP_H_ #define GAUDIP_H_ -#include +#include #include "../common/habanalabs.h" #include "../include/common/hl_boot_if.h" #include "../include/gaudi/gaudi_packets.h" diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/accel/habanalabs/gaudi/gaudi_coresight.c similarity index 99% rename from drivers/misc/habanalabs/gaudi/gaudi_coresight.c rename to drivers/accel/habanalabs/gaudi/gaudi_coresight.c index 08108f5fed6748..3455b14554c677 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c +++ b/drivers/accel/habanalabs/gaudi/gaudi_coresight.c @@ -11,7 +11,8 @@ #include "../include/gaudi/gaudi_masks.h" #include "../include/gaudi/gaudi_reg_map.h" -#include +#include + #define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET #define SPMU_EVENT_TYPES_OFFSET 0x400 #define SPMU_MAX_COUNTERS 6 diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/accel/habanalabs/gaudi/gaudi_security.c similarity index 100% rename from drivers/misc/habanalabs/gaudi/gaudi_security.c rename to drivers/accel/habanalabs/gaudi/gaudi_security.c diff --git a/drivers/misc/habanalabs/gaudi2/Makefile b/drivers/accel/habanalabs/gaudi2/Makefile similarity index 100% rename from drivers/misc/habanalabs/gaudi2/Makefile rename to drivers/accel/habanalabs/gaudi2/Makefile diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c similarity index 92% rename from drivers/misc/habanalabs/gaudi2/gaudi2.c rename to drivers/accel/habanalabs/gaudi2/gaudi2.c index e793fb2bdcbe6c..f1f2a58ee68c2a 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c @@ -7,6 +7,7 @@ #include "gaudi2P.h" #include "gaudi2_masks.h" +#include "../include/gaudi2/gaudi2_special_blocks.h" #include "../include/hw_ip/mmu/mmu_general.h" #include "../include/hw_ip/mmu/mmu_v2_0.h" #include "../include/gaudi2/gaudi2_packets.h" @@ -53,6 +54,7 @@ #define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF #define GAUDI2_DECODER_FULL_MASK 0x3FF +#define GAUDI2_NA_EVENT_CAUSE 0xFF #define GAUDI2_NUM_OF_QM_ERR_CAUSE 18 #define GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE 25 #define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3 @@ -675,14 +677,13 @@ static const char * const gaudi2_kdma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CO struct gaudi2_sm_sei_cause_data { const char *cause_name; const char *log_name; - u32 log_mask; }; static const struct gaudi2_sm_sei_cause_data gaudi2_sm_sei_cause[GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE] = { - {"calculated SO value overflow/underflow", "SOB group ID", 0x7FF}, - {"payload address of monitor is not aligned to 4B", "monitor addr", 0xFFFF}, - {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id", 0xFFFF}, + {"calculated SO value overflow/underflow", "SOB ID"}, + {"payload address of monitor is not aligned to 4B", "monitor addr"}, + {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id"}, }; static const char * const @@ -1568,7 +1569,7 @@ enum rtr_id { DCORE3_RTR7, }; -static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = { +static const u32 gaudi2_tpc_initiator_hbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = { DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3, DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4, DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, @@ -1576,33 +1577,61 @@ static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORE DCORE0_RTR0 }; -static const u32 gaudi2_dec_initiator_rtr_id[NUMBER_OF_DEC] = { +static const u32 gaudi2_tpc_initiator_lbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = { + DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, + DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, + DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, DCORE2_RTR0, DCORE2_RTR0, + DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6, DCORE3_RTR7, DCORE3_RTR7, + DCORE0_RTR0 +}; + +static const u32 gaudi2_dec_initiator_hbw_rtr_id[NUMBER_OF_DEC] = { DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0 }; -static const u32 gaudi2_nic_initiator_rtr_id[NIC_NUMBER_OF_MACROS] = { +static const u32 gaudi2_dec_initiator_lbw_rtr_id[NUMBER_OF_DEC] = { + DCORE0_RTR1, DCORE0_RTR1, DCORE1_RTR6, DCORE1_RTR6, DCORE2_RTR1, DCORE2_RTR1, + DCORE3_RTR6, DCORE3_RTR6, DCORE0_RTR0, DCORE0_RTR0 +}; + +static const u32 gaudi2_nic_initiator_hbw_rtr_id[NIC_NUMBER_OF_MACROS] = { DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7 }; -struct sft_info { - u8 interface_id; - u8 dcore_id; +static const u32 gaudi2_nic_initiator_lbw_rtr_id[NIC_NUMBER_OF_MACROS] = { + DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, + DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7 }; -static const struct sft_info gaudi2_edma_initiator_sft_id[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = { - {0, 0}, {1, 0}, {0, 1}, {1, 1}, {1, 2}, {1, 3}, {0, 2}, {0, 3}, +static const u32 gaudi2_edma_initiator_hbw_sft[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = { + mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, + mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE }; -static const u32 gaudi2_pdma_initiator_rtr_id[NUM_OF_PDMA] = { +static const u32 gaudi2_pdma_initiator_hbw_rtr_id[NUM_OF_PDMA] = { DCORE0_RTR0, DCORE0_RTR0 }; -static const u32 gaudi2_rot_initiator_rtr_id[NUM_OF_ROT] = { +static const u32 gaudi2_pdma_initiator_lbw_rtr_id[NUM_OF_PDMA] = { + DCORE0_RTR2, DCORE0_RTR2 +}; + +static const u32 gaudi2_rot_initiator_hbw_rtr_id[NUM_OF_ROT] = { DCORE2_RTR0, DCORE3_RTR7 }; +static const u32 gaudi2_rot_initiator_lbw_rtr_id[NUM_OF_ROT] = { + DCORE2_RTR2, DCORE3_RTR5 +}; + struct mme_initiators_rtr_id { u32 wap0; u32 wap1; @@ -1655,6 +1684,30 @@ struct hbm_mc_error_causes { char cause[50]; }; +static struct hl_special_block_info gaudi2_special_blocks[] = GAUDI2_SPECIAL_BLOCKS; + +/* Special blocks iterator is currently used to configure security protection bits, + * and read global errors. Most HW blocks are addressable and those who aren't (N/A)- + * must be skipped. Following configurations are commonly used for both PB config + * and global error reading, since currently they both share the same settings. + * Once it changes, we must remember to use separate configurations for either one. + */ +static int gaudi2_iterator_skip_block_types[] = { + GAUDI2_BLOCK_TYPE_PLL, + GAUDI2_BLOCK_TYPE_EU_BIST, + GAUDI2_BLOCK_TYPE_HBM, + GAUDI2_BLOCK_TYPE_XFT +}; + +static struct range gaudi2_iterator_skip_block_ranges[] = { + /* Skip all PSOC blocks except for PSOC_GLOBAL_CONF */ + {mmPSOC_I2C_M0_BASE, mmPSOC_EFUSE_BASE}, + {mmPSOC_BTL_BASE, mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE}, + /* Skip all CPU blocks except for CPU_IF */ + {mmCPU_CA53_CFG_BASE, mmCPU_CA53_CFG_BASE}, + {mmCPU_TIMESTAMP_BASE, mmCPU_MSTR_IF_RR_SHRD_HBW_BASE} +}; + static struct hbm_mc_error_causes hbm_mc_spi[GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE] = { {HBM_MC_SPI_TEMP_PIN_CHG_MASK, "temperature pins changed"}, {HBM_MC_SPI_THR_ENG_MASK, "temperature-based throttling engaged"}, @@ -2070,6 +2123,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev) prop->dma_mask = 64; + prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0; + return 0; } @@ -2434,6 +2489,25 @@ static int gaudi2_set_cluster_binning_masks(struct hl_device *hdev) return 0; } +static int gaudi2_set_binning_masks(struct hl_device *hdev) +{ + int rc; + + rc = gaudi2_set_cluster_binning_masks(hdev); + if (rc) + return rc; + + rc = gaudi2_set_tpc_binning_masks(hdev); + if (rc) + return rc; + + rc = gaudi2_set_dec_binning_masks(hdev); + if (rc) + return rc; + + return 0; +} + static int gaudi2_cpucp_info_get(struct hl_device *hdev) { struct gaudi2_device *gaudi2 = hdev->asic_specific; @@ -2485,19 +2559,11 @@ static int gaudi2_cpucp_info_get(struct hl_device *hdev) * at this point the DRAM parameters need to be updated according to data obtained * from the FW */ - rc = gaudi2_set_dram_properties(hdev); + rc = hdev->asic_funcs->set_dram_properties(hdev); if (rc) return rc; - rc = gaudi2_set_cluster_binning_masks(hdev); - if (rc) - return rc; - - rc = gaudi2_set_tpc_binning_masks(hdev); - if (rc) - return rc; - - rc = gaudi2_set_dec_binning_masks(hdev); + rc = hdev->asic_funcs->set_binning_masks(hdev); if (rc) return rc; @@ -2925,11 +2991,11 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev) /* Initialize common user CQ interrupt */ HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev, - HL_COMMON_USER_CQ_INTERRUPT_ID, false); + HL_COMMON_USER_CQ_INTERRUPT_ID, HL_USR_INTERRUPT_CQ); /* Initialize common decoder interrupt */ HL_USR_INTR_STRUCT_INIT(hdev->common_decoder_interrupt, hdev, - HL_COMMON_DEC_INTERRUPT_ID, true); + HL_COMMON_DEC_INTERRUPT_ID, HL_USR_INTERRUPT_DECODER); /* User interrupts structure holds both decoder and user interrupts from various engines. * We first initialize the decoder interrupts and then we add the user interrupts. @@ -2942,10 +3008,11 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev) */ for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM; i += 2, j++) - HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, true); + HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, + HL_USR_INTERRUPT_DECODER); for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++) - HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, false); + HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, HL_USR_INTERRUPT_CQ); } static inline int gaudi2_get_non_zero_random_int(void) @@ -2955,6 +3022,99 @@ static inline int gaudi2_get_non_zero_random_int(void) return rand ? rand : 1; } +static void gaudi2_special_blocks_free(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_skip_blocks_cfg *skip_special_blocks_cfg = + &prop->skip_special_blocks_cfg; + + kfree(prop->special_blocks); + kfree(skip_special_blocks_cfg->block_types); + kfree(skip_special_blocks_cfg->block_ranges); +} + +static void gaudi2_special_blocks_iterator_free(struct hl_device *hdev) +{ + gaudi2_special_blocks_free(hdev); +} + +static bool gaudi2_special_block_skip(struct hl_device *hdev, + struct hl_special_blocks_cfg *special_blocks_cfg, + u32 blk_idx, u32 major, u32 minor, u32 sub_minor) +{ + return false; +} + +static int gaudi2_special_blocks_config(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int i, rc; + + /* Configure Special blocks */ + prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE; + prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks); + prop->special_blocks = kmalloc_array(prop->num_of_special_blocks, + sizeof(*prop->special_blocks), GFP_KERNEL); + if (!prop->special_blocks) + return -ENOMEM; + + for (i = 0 ; i < prop->num_of_special_blocks ; i++) + memcpy(&prop->special_blocks[i], &gaudi2_special_blocks[i], + sizeof(*prop->special_blocks)); + + /* Configure when to skip Special blocks */ + memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg)); + prop->skip_special_blocks_cfg.skip_block_hook = gaudi2_special_block_skip; + + if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) { + prop->skip_special_blocks_cfg.block_types = + kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_types), + sizeof(gaudi2_iterator_skip_block_types[0]), GFP_KERNEL); + if (!prop->skip_special_blocks_cfg.block_types) { + rc = -ENOMEM; + goto free_special_blocks; + } + + memcpy(prop->skip_special_blocks_cfg.block_types, gaudi2_iterator_skip_block_types, + sizeof(gaudi2_iterator_skip_block_types)); + + prop->skip_special_blocks_cfg.block_types_len = + ARRAY_SIZE(gaudi2_iterator_skip_block_types); + } + + if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) { + prop->skip_special_blocks_cfg.block_ranges = + kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_ranges), + sizeof(gaudi2_iterator_skip_block_ranges[0]), GFP_KERNEL); + if (!prop->skip_special_blocks_cfg.block_ranges) { + rc = -ENOMEM; + goto free_skip_special_blocks_types; + } + + for (i = 0 ; i < ARRAY_SIZE(gaudi2_iterator_skip_block_ranges) ; i++) + memcpy(&prop->skip_special_blocks_cfg.block_ranges[i], + &gaudi2_iterator_skip_block_ranges[i], + sizeof(struct range)); + + prop->skip_special_blocks_cfg.block_ranges_len = + ARRAY_SIZE(gaudi2_iterator_skip_block_ranges); + } + + return 0; + +free_skip_special_blocks_types: + kfree(prop->skip_special_blocks_cfg.block_types); +free_special_blocks: + kfree(prop->special_blocks); + + return rc; +} + +static int gaudi2_special_blocks_iterator_config(struct hl_device *hdev) +{ + return gaudi2_special_blocks_config(hdev); +} + static int gaudi2_sw_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -3050,8 +3210,15 @@ static int gaudi2_sw_init(struct hl_device *hdev) hdev->asic_funcs->set_pci_memory_regions(hdev); + rc = gaudi2_special_blocks_iterator_config(hdev); + if (rc) + goto free_scratchpad_mem; + return 0; +free_scratchpad_mem: + hl_asic_dma_pool_free(hdev, gaudi2->scratchpad_kernel_address, + gaudi2->scratchpad_bus_address); free_virt_msix_db_mem: hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr); free_cpu_accessible_dma_pool: @@ -3071,6 +3238,8 @@ static int gaudi2_sw_fini(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; struct gaudi2_device *gaudi2 = hdev->asic_specific; + gaudi2_special_blocks_iterator_free(hdev); + hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr); gen_pool_destroy(hdev->cpu_accessible_dma_pool); @@ -5483,7 +5652,31 @@ static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_rese skip_reset: if (driver_performs_reset || hard_reset) - gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us); + /* + * Instead of waiting for BTM indication we should wait for preboot ready: + * Consider the below scenario: + * 1. FW update is being triggered + * - setting the dirty bit + * 2. hard reset will be triggered due to the dirty bit + * 3. FW initiates the reset: + * - dirty bit cleared + * - BTM indication cleared + * - preboot ready indication cleared + * 4. during hard reset: + * - BTM indication will be set + * - BIST test performed and another reset triggered + * 5. only after this reset the preboot will set the preboot ready + * + * when polling on BTM indication alone we can lose sync with FW while trying to + * communicate with FW that is during reset. + * to overcome this we will always wait to preboot ready indication + */ + if ((hdev->fw_components & FW_TYPE_PREBOOT_CPU)) { + msleep(reset_sleep_ms); + hl_fw_wait_preboot_ready(hdev); + } else { + gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us); + } else gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us); @@ -6803,38 +6996,37 @@ static inline bool is_info_event(u32 event) switch (event) { case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE: case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S ... GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: + + /* return in case of NIC status event - these events are received periodically and not as + * an indication to an error. + */ + case GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 ... GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1: return true; default: return false; } } -static void gaudi2_print_irq_info(struct hl_device *hdev, u16 event_type) +static void gaudi2_print_event(struct hl_device *hdev, u16 event_type, + bool ratelimited, const char *fmt, ...) { - char desc[64] = ""; - bool event_valid = false; + struct va_format vaf; + va_list args; - /* return in case of NIC status event - these events are received periodically and not as - * an indication to an error, thus not printed. - */ - if (event_type >= GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 && - event_type <= GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1) - return; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; - if (gaudi2_irq_map_table[event_type].valid) { - snprintf(desc, sizeof(desc), gaudi2_irq_map_table[event_type].name); - event_valid = true; - } - - if (!event_valid) - snprintf(desc, sizeof(desc), "N/A"); - - if (is_info_event(event_type)) - dev_info_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", - event_type, desc); + if (ratelimited) + dev_err_ratelimited(hdev->dev, "%s: %pV\n", + gaudi2_irq_map_table[event_type].valid ? + gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf); else - dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", - event_type, desc); + dev_err(hdev->dev, "%s: %pV\n", + gaudi2_irq_map_table[event_type].valid ? + gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf); + + va_end(args); } static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type, @@ -6847,7 +7039,7 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type, ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom); memory_wrapper_idx = ecc_data->memory_wrapper_idx; - dev_err(hdev->dev, + gaudi2_print_event(hdev, event_type, !ecc_data->is_critical, "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.\n", ecc_address, ecc_syndrom, memory_wrapper_idx, ecc_data->is_critical); @@ -6987,10 +7179,10 @@ static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base, u32 str gaudi2_print_last_pqes_on_err(hdev, qid_base, i, qman_base, false); } -static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *qm_name, - u64 qman_base, u32 qid_base) +static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type, + u64 qman_base, u32 qid_base) { - u32 i, j, glbl_sts_val, arb_err_val, num_error_causes; + u32 i, j, glbl_sts_val, arb_err_val, num_error_causes, error_count = 0; u64 glbl_sts_addr, arb_err_addr; char reg_desc[32]; @@ -7013,12 +7205,14 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q } for (j = 0 ; j < num_error_causes ; j++) - if (glbl_sts_val & BIT(j)) - dev_err_ratelimited(hdev->dev, "%s %s. err cause: %s\n", - qm_name, reg_desc, - i == QMAN_STREAMS ? - gaudi2_qman_lower_cp_error_cause[j] : - gaudi2_qman_error_cause[j]); + if (glbl_sts_val & BIT(j)) { + gaudi2_print_event(hdev, event_type, true, + "%s. err cause: %s", reg_desc, + i == QMAN_STREAMS ? + gaudi2_qman_lower_cp_error_cause[j] : + gaudi2_qman_error_cause[j]); + error_count++; + } print_qman_data_on_err(hdev, qid_base, i, qman_base); } @@ -7026,18 +7220,23 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q arb_err_val = RREG32(arb_err_addr); if (!arb_err_val) - return; + goto out; for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) { - if (arb_err_val & BIT(j)) - dev_err_ratelimited(hdev->dev, "%s ARB_ERR. err cause: %s\n", - qm_name, gaudi2_qman_arb_error_cause[j]); + if (arb_err_val & BIT(j)) { + gaudi2_print_event(hdev, event_type, true, + "ARB_ERR. err cause: %s", + gaudi2_qman_arb_error_cause[j]); + error_count++; + } } + +out: + return error_count; } static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, u64 rtr_mstr_if_base_addr, bool is_write, char *name, - bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info, enum gaudi2_engine_id id, u64 *event_mask) { u32 razwi_hi, razwi_lo, razwi_xy; @@ -7045,26 +7244,14 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, u8 rd_wr_flag; if (is_write) { - if (read_razwi_regs) { - razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI); - razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO); - razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY); - } else { - razwi_hi = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_hi_reg); - razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg); - razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg); - } + razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI); + razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO); + razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY); rd_wr_flag = HL_RAZWI_WRITE; } else { - if (read_razwi_regs) { - razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI); - razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO); - razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY); - } else { - razwi_hi = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_hi_reg); - razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg); - razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg); - } + razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI); + razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO); + razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY); rd_wr_flag = HL_RAZWI_READ; } @@ -7078,38 +7265,26 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev, u64 rtr_mstr_if_base_addr, bool is_write, char *name, - bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info, enum gaudi2_engine_id id, u64 *event_mask) { - u32 razwi_addr, razwi_xy; + u64 razwi_addr = CFG_BASE; + u32 razwi_xy; u16 eng_id = id; u8 rd_wr_flag; if (is_write) { - if (read_razwi_regs) { - razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI); - razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY); - } else { - razwi_addr = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_reg); - razwi_xy = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_id_reg); - } - + razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI); + razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY); rd_wr_flag = HL_RAZWI_WRITE; } else { - if (read_razwi_regs) { - razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI); - razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY); - } else { - razwi_addr = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_reg); - razwi_xy = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_id_reg); - } - + razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI); + razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY); rd_wr_flag = HL_RAZWI_READ; } hl_handle_razwi(hdev, razwi_addr, &eng_id, 1, rd_wr_flag | HL_RAZWI_LBW, event_mask); dev_err_ratelimited(hdev->dev, - "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x\n", + "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%llX Initiator coordinates 0x%x\n", name, is_write ? "WR" : "RD", rtr_mstr_if_base_addr, razwi_addr, razwi_xy); } @@ -7164,183 +7339,148 @@ static enum gaudi2_engine_id gaudi2_razwi_calc_engine_id(struct hl_device *hdev, */ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev, enum razwi_event_sources module, u8 module_idx, - u8 module_sub_idx, struct hl_eq_razwi_info *razwi_info, - u64 *event_mask) + u8 module_sub_idx, u64 *event_mask) { - bool via_sft = false, read_razwi_regs = false; - u32 rtr_id, dcore_id, dcore_rtr_id, sft_id, eng_id; - u64 rtr_mstr_if_base_addr; + bool via_sft = false; + u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id; + u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr; u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0; u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0; char initiator_name[64]; - if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX) || !razwi_info) - read_razwi_regs = true; - switch (module) { case RAZWI_TPC: - rtr_id = gaudi2_tpc_initiator_rtr_id[module_idx]; + hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx]; + + /* TODO : remove this check and depend only on tpc routers table + * when SW-118828 is resolved + */ + if (!hdev->asic_prop.fw_security_enabled && + ((module_idx == 0) || (module_idx == 1))) + lbw_rtr_id = DCORE0_RTR0; + else + lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx]; sprintf(initiator_name, "TPC_%u", module_idx); break; case RAZWI_MME: sprintf(initiator_name, "MME_%u", module_idx); switch (module_sub_idx) { case MME_WAP0: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0; break; case MME_WAP1: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1; break; case MME_WRITE: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write; break; case MME_READ: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read; break; case MME_SBTE0: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0; break; case MME_SBTE1: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1; break; case MME_SBTE2: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2; break; case MME_SBTE3: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3; break; case MME_SBTE4: - rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4; + hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4; break; default: return; } + lbw_rtr_id = hbw_rtr_id; break; case RAZWI_EDMA: - sft_id = gaudi2_edma_initiator_sft_id[module_idx].interface_id; - dcore_id = gaudi2_edma_initiator_sft_id[module_idx].dcore_id; + hbw_rtr_mstr_if_base_addr = gaudi2_edma_initiator_hbw_sft[module_idx]; + dcore_id = module_idx / NUM_OF_EDMA_PER_DCORE; + /* SFT has separate MSTR_IF for LBW, only there we can + * read the LBW razwi related registers + */ + lbw_rtr_mstr_if_base_addr = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE + + dcore_id * SFT_DCORE_OFFSET; via_sft = true; sprintf(initiator_name, "EDMA_%u", module_idx); break; case RAZWI_PDMA: - rtr_id = gaudi2_pdma_initiator_rtr_id[module_idx]; + hbw_rtr_id = gaudi2_pdma_initiator_hbw_rtr_id[module_idx]; + lbw_rtr_id = gaudi2_pdma_initiator_lbw_rtr_id[module_idx]; sprintf(initiator_name, "PDMA_%u", module_idx); break; case RAZWI_NIC: - rtr_id = gaudi2_nic_initiator_rtr_id[module_idx]; + hbw_rtr_id = gaudi2_nic_initiator_hbw_rtr_id[module_idx]; + lbw_rtr_id = gaudi2_nic_initiator_lbw_rtr_id[module_idx]; sprintf(initiator_name, "NIC_%u", module_idx); break; case RAZWI_DEC: - rtr_id = gaudi2_dec_initiator_rtr_id[module_idx]; + hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx]; + lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx]; sprintf(initiator_name, "DEC_%u", module_idx); break; case RAZWI_ROT: - rtr_id = gaudi2_rot_initiator_rtr_id[module_idx]; + hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx]; + lbw_rtr_id = gaudi2_rot_initiator_lbw_rtr_id[module_idx]; sprintf(initiator_name, "ROT_%u", module_idx); break; default: return; } - if (!read_razwi_regs) { - if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_HBW) { - hbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) & - RAZWI_HAPPENED_AW; - hbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) & - RAZWI_HAPPENED_AR; - } else if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_LBW) { - lbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) & - RAZWI_HAPPENED_AW; - lbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) & - RAZWI_HAPPENED_AR; - } - rtr_mstr_if_base_addr = 0; - - goto dump_info; - } - /* Find router mstr_if register base */ - if (via_sft) { - rtr_mstr_if_base_addr = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE + - dcore_id * SFT_DCORE_OFFSET + - sft_id * SFT_IF_OFFSET + - RTR_MSTR_IF_OFFSET; - } else { - dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE; - dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE; - rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE + + if (!via_sft) { + dcore_id = hbw_rtr_id / NUM_OF_RTR_PER_DCORE; + dcore_rtr_id = hbw_rtr_id % NUM_OF_RTR_PER_DCORE; + hbw_rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE + dcore_id * DCORE_OFFSET + dcore_rtr_id * DCORE_RTR_OFFSET + RTR_MSTR_IF_OFFSET; + lbw_rtr_mstr_if_base_addr = hbw_rtr_mstr_if_base_addr + + (((s32)lbw_rtr_id - hbw_rtr_id) * DCORE_RTR_OFFSET); } /* Find out event cause by reading "RAZWI_HAPPENED" registers */ - hbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED); - - hbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED); - - if (via_sft) { - /* SFT has separate MSTR_IF for LBW, only there we can - * read the LBW razwi related registers - */ - u64 base; - - base = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE + dcore_id * SFT_DCORE_OFFSET + - RTR_LBW_MSTR_IF_OFFSET; - - lbw_shrd_aw = RREG32(base + RR_SHRD_LBW_AW_RAZWI_HAPPENED); - - lbw_shrd_ar = RREG32(base + RR_SHRD_LBW_AR_RAZWI_HAPPENED); - } else { - lbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED); - - lbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED); - } - -dump_info: - /* check if there is no RR razwi indication at all */ - if (!hbw_shrd_aw && !hbw_shrd_ar && !lbw_shrd_aw && !lbw_shrd_ar) - return; + hbw_shrd_aw = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED); + hbw_shrd_ar = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED); + lbw_shrd_aw = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED); + lbw_shrd_ar = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED); eng_id = gaudi2_razwi_calc_engine_id(hdev, module, module_idx); if (hbw_shrd_aw) { - gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true, - initiator_name, read_razwi_regs, razwi_info, - eng_id, event_mask); + gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, true, + initiator_name, eng_id, event_mask); /* Clear event indication */ - if (read_razwi_regs) - WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw); + WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw); } if (hbw_shrd_ar) { - gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false, - initiator_name, read_razwi_regs, razwi_info, - eng_id, event_mask); + gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, false, + initiator_name, eng_id, event_mask); /* Clear event indication */ - if (read_razwi_regs) - WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar); + WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar); } if (lbw_shrd_aw) { - gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true, - initiator_name, read_razwi_regs, razwi_info, - eng_id, event_mask); + gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, true, + initiator_name, eng_id, event_mask); /* Clear event indication */ - if (read_razwi_regs) - WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw); + WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw); } if (lbw_shrd_ar) { - gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false, - initiator_name, read_razwi_regs, razwi_info, - eng_id, event_mask); + gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, false, + initiator_name, eng_id, event_mask); /* Clear event indication */ - if (read_razwi_regs) - WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar); + WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar); } } @@ -7352,42 +7492,38 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev) /* check all TPCs */ for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) { if (prop->tpc_enabled_mask & BIT(mod_idx)) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL, - NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL); } /* check all MMEs */ for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++) gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mod_idx, - sub_mod, NULL, NULL); + sub_mod, NULL); /* check all EDMAs */ for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) if (prop->edma_enabled_mask & BIT(mod_idx)) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL, - NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL); /* check all PDMAs */ for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL, - NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL); /* check all NICs */ for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++) if (hdev->nic_ports_mask & BIT(mod_idx)) gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0, - NULL, NULL); + NULL); /* check all DECs */ for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++) if (prop->decoder_enabled_mask & BIT(mod_idx)) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL, - NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL); /* check all ROTs */ for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL, NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL); } static const char *gaudi2_get_initiators_name(u32 rtr_id) @@ -7645,19 +7781,19 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u u64 *event_mask) { u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng; - u32 razwi_addr; + u64 razwi_addr = CFG_BASE; u8 rd_wr_flag; num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]); if (is_write) { - razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR); + razwi_addr += RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR); rd_wr_flag = HL_RAZWI_WRITE; /* Clear set indication */ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1); } else { - razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR); + razwi_addr += RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR); rd_wr_flag = HL_RAZWI_READ; /* Clear set indication */ @@ -7667,7 +7803,7 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u hl_handle_razwi(hdev, razwi_addr, &engines[0], num_of_eng, rd_wr_flag | HL_RAZWI_LBW, event_mask); dev_err_ratelimited(hdev->dev, - "RAZWI PSOC unmapped LBW %s error, rtr id %u, address %#x\n", + "RAZWI PSOC unmapped LBW %s error, rtr id %u, address 0x%llX\n", is_write ? "WR" : "RD", rtr_id, razwi_addr); dev_err_ratelimited(hdev->dev, @@ -7675,17 +7811,17 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u } /* PSOC RAZWI interrupt occurs only when trying to access a bad address */ -static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask) +static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask) { u32 hbw_aw_set, hbw_ar_set, lbw_aw_set, lbw_ar_set, rtr_id, dcore_id, dcore_rtr_id, xy, - razwi_mask_info, razwi_intr = 0; + razwi_mask_info, razwi_intr = 0, error_count = 0; int rtr_map_arr_len = NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES; u64 rtr_ctrl_base_addr; if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) { razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT); if (!razwi_intr) - return; + return 0; } razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO); @@ -7743,33 +7879,41 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *eve gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id, rtr_ctrl_base_addr, false, event_mask); + error_count++; + clear: /* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */ if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr); + + return error_count; } -static void _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base) +static int _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base, u16 event_type) { - u32 i, sts_val, sts_clr_val = 0; + u32 i, sts_val, sts_clr_val = 0, error_count = 0; sts_val = RREG32(qman_base + QM_SEI_STATUS_OFFSET); for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) { if (sts_val & BIT(i)) { - dev_err_ratelimited(hdev->dev, "QM SEI. err cause: %s\n", - gaudi2_qm_sei_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_qm_sei_error_cause[i]); sts_clr_val |= BIT(i); + error_count++; } } WREG32(qman_base + QM_SEI_STATUS_OFFSET, sts_clr_val); + + return error_count; } -static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, - struct hl_eq_razwi_info *razwi_info, u64 *event_mask) +static int gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, + bool extended_err_check, u64 *event_mask) { enum razwi_event_sources module; + u32 error_count = 0; u64 qman_base; u8 index; @@ -7808,26 +7952,30 @@ static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, module = RAZWI_ROT; break; default: - return; + return 0; } - _gaudi2_handle_qm_sei_err(hdev, qman_base); + error_count = _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type); /* There is a single event per NIC macro, so should check its both QMAN blocks */ if (event_type >= GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE && event_type <= GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE) - _gaudi2_handle_qm_sei_err(hdev, qman_base + NIC_QM_OFFSET); + error_count += _gaudi2_handle_qm_sei_err(hdev, + qman_base + NIC_QM_OFFSET, event_type); - /* check if RAZWI happened */ - if (razwi_info) - gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, razwi_info, event_mask); + if (extended_err_check) { + /* check if RAZWI happened */ + gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, event_mask); + hl_check_for_glbl_errors(hdev); + } + + return error_count; } -static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type) +static int gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) { - u32 qid_base; + u32 qid_base, error_count = 0; u64 qman_base; - char desc[32]; u8 index; switch (event_type) { @@ -7835,194 +7983,207 @@ static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type) index = event_type - GAUDI2_EVENT_TPC0_QM; qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 + index * QMAN_STREAMS; qman_base = mmDCORE0_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC%d_QM", index); break; case GAUDI2_EVENT_TPC6_QM ... GAUDI2_EVENT_TPC11_QM: index = event_type - GAUDI2_EVENT_TPC6_QM; qid_base = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 + index * QMAN_STREAMS; qman_base = mmDCORE1_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE1_TPC%d_QM", index); break; case GAUDI2_EVENT_TPC12_QM ... GAUDI2_EVENT_TPC17_QM: index = event_type - GAUDI2_EVENT_TPC12_QM; qid_base = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 + index * QMAN_STREAMS; qman_base = mmDCORE2_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE2_TPC%d_QM", index); break; case GAUDI2_EVENT_TPC18_QM ... GAUDI2_EVENT_TPC23_QM: index = event_type - GAUDI2_EVENT_TPC18_QM; qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 + index * QMAN_STREAMS; qman_base = mmDCORE3_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE3_TPC%d_QM", index); break; case GAUDI2_EVENT_TPC24_QM: qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0; qman_base = mmDCORE0_TPC6_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC6_QM"); break; case GAUDI2_EVENT_MME0_QM: qid_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0; qman_base = mmDCORE0_MME_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_MME_QM"); break; case GAUDI2_EVENT_MME1_QM: qid_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0; qman_base = mmDCORE1_MME_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE1_MME_QM"); break; case GAUDI2_EVENT_MME2_QM: qid_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0; qman_base = mmDCORE2_MME_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE2_MME_QM"); break; case GAUDI2_EVENT_MME3_QM: qid_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0; qman_base = mmDCORE3_MME_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE3_MME_QM"); break; case GAUDI2_EVENT_HDMA0_QM: + index = 0; qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0; qman_base = mmDCORE0_EDMA0_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA0_QM"); break; case GAUDI2_EVENT_HDMA1_QM: + index = 1; qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0; qman_base = mmDCORE0_EDMA1_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA1_QM"); break; case GAUDI2_EVENT_HDMA2_QM: + index = 2; qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0; qman_base = mmDCORE1_EDMA0_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA0_QM"); break; case GAUDI2_EVENT_HDMA3_QM: + index = 3; qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0; qman_base = mmDCORE1_EDMA1_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA1_QM"); break; case GAUDI2_EVENT_HDMA4_QM: + index = 4; qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0; qman_base = mmDCORE2_EDMA0_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA0_QM"); break; case GAUDI2_EVENT_HDMA5_QM: + index = 5; qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0; qman_base = mmDCORE2_EDMA1_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA1_QM"); break; case GAUDI2_EVENT_HDMA6_QM: + index = 6; qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0; qman_base = mmDCORE3_EDMA0_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA0_QM"); break; case GAUDI2_EVENT_HDMA7_QM: + index = 7; qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0; qman_base = mmDCORE3_EDMA1_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA1_QM"); break; case GAUDI2_EVENT_PDMA0_QM: qid_base = GAUDI2_QUEUE_ID_PDMA_0_0; qman_base = mmPDMA0_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "PDMA0_QM"); break; case GAUDI2_EVENT_PDMA1_QM: qid_base = GAUDI2_QUEUE_ID_PDMA_1_0; qman_base = mmPDMA1_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "PDMA1_QM"); break; case GAUDI2_EVENT_ROTATOR0_ROT0_QM: qid_base = GAUDI2_QUEUE_ID_ROT_0_0; qman_base = mmROT0_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "ROTATOR0_QM"); break; case GAUDI2_EVENT_ROTATOR1_ROT1_QM: qid_base = GAUDI2_QUEUE_ID_ROT_1_0; qman_base = mmROT1_QM_BASE; - snprintf(desc, ARRAY_SIZE(desc), "ROTATOR1_QM"); break; default: - return; + return 0; } - gaudi2_handle_qman_err_generic(hdev, desc, qman_base, qid_base); + error_count = gaudi2_handle_qman_err_generic(hdev, event_type, qman_base, qid_base); /* Handle EDMA QM SEI here because there is no AXI error response event for EDMA */ - if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM) - _gaudi2_handle_qm_sei_err(hdev, qman_base); + if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM) { + error_count += _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, index, 0, event_mask); + } + + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev) +static int gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev, u16 event_type) { - u32 i, sts_val, sts_clr_val = 0; + u32 i, sts_val, sts_clr_val = 0, error_count = 0; sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS); for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) { if (sts_val & BIT(i)) { - dev_err_ratelimited(hdev->dev, "ARC SEI. err cause: %s\n", - gaudi2_arc_sei_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_arc_sei_error_cause[i]); sts_clr_val |= BIT(i); + error_count++; } } + hl_check_for_glbl_errors(hdev); + WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR, sts_clr_val); + + return error_count; } -static void gaudi2_handle_cpu_sei_err(struct hl_device *hdev) +static int gaudi2_handle_cpu_sei_err(struct hl_device *hdev, u16 event_type) { - u32 i, sts_val, sts_clr_val = 0; + u32 i, sts_val, sts_clr_val = 0, error_count = 0; sts_val = RREG32(mmCPU_IF_CPU_SEI_INTR_STS); for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) { if (sts_val & BIT(i)) { - dev_err_ratelimited(hdev->dev, "CPU SEI. err cause: %s\n", - gaudi2_cpu_sei_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_cpu_sei_error_cause[i]); sts_clr_val |= BIT(i); + error_count++; } } + hl_check_for_glbl_errors(hdev); + WREG32(mmCPU_IF_CPU_SEI_INTR_CLR, sts_clr_val); + + return error_count; } -static void gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, +static int gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, u16 event_type, struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause, u64 *event_mask) { u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data); + u32 error_count = 0; int i; for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++) - if (intr_cause_data & BIT(i)) - dev_err_ratelimited(hdev->dev, "ROT%u. err cause: %s\n", - rot_index, guadi2_rot_error_cause[i]); + if (intr_cause_data & BIT(i)) { + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", guadi2_rot_error_cause[i]); + error_count++; + } /* check if RAZWI happened */ - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, - &razwi_with_intr_cause->razwi_info, event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, event_mask); + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, char *interrupt_name, +static int gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, u16 event_type, struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause, u64 *event_mask) { u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data); + u32 error_count = 0; int i; for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++) - if (intr_cause_data & BIT(i)) - dev_err_ratelimited(hdev->dev, "TPC%d_%s interrupt cause: %s\n", - tpc_index, interrupt_name, gaudi2_tpc_interrupts_cause[i]); + if (intr_cause_data & BIT(i)) { + gaudi2_print_event(hdev, event_type, true, + "interrupt cause: %s", gaudi2_tpc_interrupts_cause[i]); + error_count++; + } /* check if RAZWI happened */ - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, - &razwi_with_intr_cause->razwi_info, event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, event_mask); + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const char *interrupt_name, - struct hl_eq_razwi_info *razwi_info, u64 *event_mask) +static int gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, u16 event_type, + u64 *event_mask) { - u32 sts_addr, sts_val, sts_clr_val = 0; + u32 sts_addr, sts_val, sts_clr_val = 0, error_count = 0; int i; if (dec_index < NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES) @@ -8039,24 +8200,27 @@ static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const ch for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) { if (sts_val & BIT(i)) { - dev_err_ratelimited(hdev->dev, "DEC%u_%s err cause: %s\n", - dec_index, interrupt_name, gaudi2_dec_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_dec_error_cause[i]); sts_clr_val |= BIT(i); + error_count++; } } /* check if RAZWI happened */ - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info, - event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, event_mask); + hl_check_for_glbl_errors(hdev); /* Write 1 clear errors */ WREG32(sts_addr, sts_clr_val); + + return error_count; } -static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const char *interrupt_name, - struct hl_eq_razwi_info *razwi_info, u64 *event_mask) +static int gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, u16 event_type, + u64 *event_mask) { - u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0; + u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0; int i; sts_addr = mmDCORE0_MME_CTRL_LO_INTR_CAUSE + DCORE_OFFSET * mme_index; @@ -8066,35 +8230,45 @@ static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const ch for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) { if (sts_val & BIT(i)) { - dev_err_ratelimited(hdev->dev, "MME%u_%s err cause: %s\n", - mme_index, interrupt_name, guadi2_mme_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", guadi2_mme_error_cause[i]); sts_clr_val |= BIT(i); + error_count++; } } /* check if RAZWI happened */ for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, razwi_info, - event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, event_mask); + + hl_check_for_glbl_errors(hdev); WREG32(sts_clr_addr, sts_clr_val); + + return error_count; } -static void gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u8 mme_index, u8 sbte_index, +static int gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u16 event_type, u64 intr_cause_data) { - int i; + int i, error_count = 0; for (i = 0 ; i < GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE ; i++) - if (intr_cause_data & BIT(i)) - dev_err_ratelimited(hdev->dev, "MME%uSBTE%u_AXI_ERR_RSP err cause: %s\n", - mme_index, sbte_index, guadi2_mme_sbte_error_cause[i]); + if (intr_cause_data & BIT(i)) { + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", guadi2_mme_sbte_error_cause[i]); + error_count++; + } + + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, - struct hl_eq_razwi_info *razwi_info, u64 *event_mask) +static int gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, u16 event_type, + u64 *event_mask) { - u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0; + u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0; int i; sts_addr = mmDCORE0_MME_ACC_INTR_CAUSE + DCORE_OFFSET * mme_index; @@ -8104,24 +8278,27 @@ static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) { if (sts_val & BIT(i)) { - dev_err_ratelimited(hdev->dev, - "MME%u_WAP_SOURCE_RESULT_INVALID err cause: %s\n", - mme_index, guadi2_mme_wap_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", guadi2_mme_wap_error_cause[i]); sts_clr_val |= BIT(i); + error_count++; } } /* check if RAZWI happened on WAP0/1 */ - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, razwi_info, - event_mask); - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, razwi_info, - event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, event_mask); + hl_check_for_glbl_errors(hdev); WREG32(sts_clr_addr, sts_clr_val); + + return error_count; } -static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause_data) +static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type, + u64 intr_cause_data) { + u32 error_count = 0; int i; /* If an AXI read or write error is received, an error is reported and @@ -8130,19 +8307,33 @@ static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause * the actual error caused by a LBW KDMA transaction. */ for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) - if (intr_cause_data & BIT(i)) - dev_err_ratelimited(hdev->dev, "kdma core err cause: %s\n", - gaudi2_kdma_core_interrupts_cause[i]); + if (intr_cause_data & BIT(i)) { + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_kdma_core_interrupts_cause[i]); + error_count++; + } + + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_data) +static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, + u64 intr_cause_data) { + u32 error_count = 0; int i; for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) - if (intr_cause_data & BIT(i)) - dev_err_ratelimited(hdev->dev, "dma core err cause: %s\n", - gaudi2_dma_core_interrupts_cause[i]); + if (intr_cause_data & BIT(i)) { + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_dma_core_interrupts_cause[i]); + error_count++; + } + + hl_check_for_glbl_errors(hdev); + + return error_count; } static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask) @@ -8151,86 +8342,98 @@ static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { - gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true, - NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); + gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", + GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { - gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true, - NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); + gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", + GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { - gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true, - NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); + gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", + GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { - gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true, - NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); + gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", + GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } } -static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data, - u64 *event_mask) +static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_type, + u64 intr_cause_data, u64 *event_mask) { + u32 error_count = 0; int i; for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) { if (!(intr_cause_data & BIT_ULL(i))) continue; - dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n", - gaudi2_pcie_addr_dec_error_cause[i]); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]); + error_count++; switch (intr_cause_data & BIT_ULL(i)) { case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK: + hl_check_for_glbl_errors(hdev); break; case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK: gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask); break; } } + + return error_count; } -static void gaudi2_handle_pif_fatal(struct hl_device *hdev, u64 intr_cause_data) +static int gaudi2_handle_pif_fatal(struct hl_device *hdev, u16 event_type, + u64 intr_cause_data) { + u32 error_count = 0; int i; for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) { - if (intr_cause_data & BIT_ULL(i)) - dev_err_ratelimited(hdev->dev, "PMMU PIF err cause: %s\n", - gaudi2_pmmu_fatal_interrupts_cause[i]); + if (intr_cause_data & BIT_ULL(i)) { + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_pmmu_fatal_interrupts_cause[i]); + error_count++; + } } + + return error_count; } -static void gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data) +static int gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data) { - u32 dcore_id, hif_id; + u32 error_count = 0; int i; - dcore_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) / 4; - hif_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) % 4; - for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) { - if (intr_cause_data & BIT_ULL(i)) - dev_err_ratelimited(hdev->dev, "DCORE%u_HIF%u: %s\n", dcore_id, hif_id, - gaudi2_hif_fatal_interrupts_cause[i]); + if (intr_cause_data & BIT_ULL(i)) { + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_hif_fatal_interrupts_cause[i]); + error_count++; + } } + + return error_count; } static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu, u64 *event_mask) { - u32 valid, val; + u32 valid, val, axid_l, axid_h; u64 addr; valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID)); @@ -8243,8 +8446,11 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool addr <<= 32; addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA)); - dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n", - is_pmmu ? "PMMU" : "HMMU", addr); + axid_l = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_FAULT_ID_LSB)); + axid_h = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_FAULT_ID_MSB)); + + dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx, transaction id 0x%llX\n", + is_pmmu ? "PMMU" : "HMMU", addr, ((u64)axid_h << 32) + axid_l); hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask); WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0); @@ -8270,18 +8476,18 @@ static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, boo WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE), 0); } -static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char *mmu_name, +static int gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, u16 event_type, u64 mmu_base, bool is_pmmu, u64 *event_mask) { - u32 spi_sei_cause, interrupt_clr = 0x0; + u32 spi_sei_cause, interrupt_clr = 0x0, error_count = 0; int i; spi_sei_cause = RREG32(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET); for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) { if (spi_sei_cause & BIT(i)) { - dev_err_ratelimited(hdev->dev, "%s SPI_SEI ERR. err cause: %s\n", - mmu_name, gaudi2_mmu_spi_sei[i].cause); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", gaudi2_mmu_spi_sei[i].cause); if (i == 0) gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, event_mask); @@ -8290,6 +8496,8 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char if (gaudi2_mmu_spi_sei[i].clear_bit >= 0) interrupt_clr |= BIT(gaudi2_mmu_spi_sei[i].clear_bit); + + error_count++; } } @@ -8298,12 +8506,14 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char /* Clear interrupt */ WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr); + + return error_count; } -static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) +static int gaudi2_handle_sm_err(struct hl_device *hdev, u16 event_type, u8 sm_index) { - u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log; - u32 cq_intr_addr, cq_intr_val, cq_intr_queue_index; + u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log, + cq_intr_addr, cq_intr_val, cq_intr_queue_index, error_count = 0; int i; sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index; @@ -8323,11 +8533,12 @@ static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) if (!(sei_cause_cause & BIT(i))) continue; - dev_err_ratelimited(hdev->dev, "SM%u SEI ERR. err cause: %s. %s: 0x%X\n", - sm_index, - gaudi2_sm_sei_cause[i].cause_name, - gaudi2_sm_sei_cause[i].log_name, - sei_cause_log & gaudi2_sm_sei_cause[i].log_mask); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s. %s: 0x%X\n", + gaudi2_sm_sei_cause[i].cause_name, + gaudi2_sm_sei_cause[i].log_name, + sei_cause_log); + error_count++; break; } @@ -8343,16 +8554,21 @@ static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) dev_err_ratelimited(hdev->dev, "SM%u err. err cause: CQ_INTR. queue index: %u\n", sm_index, cq_intr_queue_index); + error_count++; /* Clear CQ_INTR */ WREG32(cq_intr_addr, 0); } + + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) +static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) { bool is_pmmu = false; - char desc[32]; + u32 error_count = 0; u64 mmu_base; u8 index; @@ -8360,54 +8576,49 @@ static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR: index = (event_type - GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM) / 3; mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index); break; case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP: index = (event_type - GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP); mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index); break; case GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR: index = (event_type - GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM) / 3; mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index); break; case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP: index = (event_type - GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP); mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index); break; case GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR: index = (event_type - GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM) / 3; mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index); break; case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP: index = (event_type - GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP); mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index); break; case GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR: index = (event_type - GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM) / 3; mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index); break; case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP: index = (event_type - GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP); mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET; - snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index); break; case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR: case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0: is_pmmu = true; mmu_base = mmPMMU_HBW_MMU_BASE; - snprintf(desc, ARRAY_SIZE(desc), "PMMU"); break; default: - return; + return 0; } - gaudi2_handle_mmu_spi_sei_generic(hdev, desc, mmu_base, is_pmmu, event_mask); + error_count = gaudi2_handle_mmu_spi_sei_generic(hdev, event_type, mmu_base, + is_pmmu, event_mask); + hl_check_for_glbl_errors(hdev); + + return error_count; } @@ -8527,22 +8738,17 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type, cause_idx = sei_data->hdr.sei_cause; if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) { - dev_err_ratelimited(hdev->dev, "Invalid HBM SEI event cause (%d) provided by FW\n", - cause_idx); + gaudi2_print_event(hdev, event_type, true, + "err cause: %s", + "Invalid HBM SEI event cause (%d) provided by FW\n", cause_idx); return true; } - if (sei_data->hdr.is_critical) - dev_err(hdev->dev, - "System Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n", - hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel, - hbm_mc_sei_cause[cause_idx]); - - else - dev_err_ratelimited(hdev->dev, - "System Non-Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n", - hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel, - hbm_mc_sei_cause[cause_idx]); + gaudi2_print_event(hdev, event_type, !sei_data->hdr.is_critical, + "System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n", + sei_data->hdr.is_critical ? "Critical" : "Non-critical", + hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel, + hbm_mc_sei_cause[cause_idx]); /* Print error-specific info */ switch (cause_idx) { @@ -8586,24 +8792,33 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type, return require_hard_reset; } -static void gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u64 intr_cause_data) +static int gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u16 event_type, + u64 intr_cause_data) { - dev_err(hdev->dev, - "HBM catastrophic temperature error (CATTRIP) cause %#llx\n", - intr_cause_data); + if (intr_cause_data) { + gaudi2_print_event(hdev, event_type, true, + "temperature error cause: %#llx", intr_cause_data); + return 1; + } + + return 0; } -static void gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data) +static int gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data) { - u32 i; + u32 i, error_count = 0; for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++) - if (intr_cause_data & hbm_mc_spi[i].mask) + if (intr_cause_data & hbm_mc_spi[i].mask) { dev_dbg(hdev->dev, "HBM spi event: notification cause(%s)\n", hbm_mc_spi[i].cause); + error_count++; + } + + return error_count; } -static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type) +static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask) { ktime_t zero_time = ktime_set(0, 0); @@ -8615,13 +8830,13 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type) hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; - dev_info_ratelimited(hdev->dev, "Clock throttling due to power consumption\n"); + dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n"); break; case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E: hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); - dev_info_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n"); + dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n"); break; case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S: @@ -8629,12 +8844,14 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type) hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; + *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n"); break; case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); + *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n"); break; @@ -8646,43 +8863,49 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type) mutex_unlock(&hdev->clk_throttling.lock); } -static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, +static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, u16 event_type, struct cpucp_pkt_sync_err *sync_err) { struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; - dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", - le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); + gaudi2_print_event(hdev, event_type, false, + "FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", + le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), + q->pi, atomic_read(&q->ci)); } -static void gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev) +static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type) { - u32 p2p_intr, msix_gw_intr; + u32 p2p_intr, msix_gw_intr, error_count = 0; p2p_intr = RREG32(mmPCIE_WRAP_P2P_INTR); msix_gw_intr = RREG32(mmPCIE_WRAP_MSIX_GW_INTR); if (p2p_intr) { - dev_err_ratelimited(hdev->dev, + gaudi2_print_event(hdev, event_type, true, "pcie p2p transaction terminated due to security, req_id(0x%x)\n", RREG32(mmPCIE_WRAP_P2P_REQ_ID)); WREG32(mmPCIE_WRAP_P2P_INTR, 0x1); + error_count++; } if (msix_gw_intr) { - dev_err_ratelimited(hdev->dev, + gaudi2_print_event(hdev, event_type, true, "pcie msi-x gen denied due to vector num check failure, vec(0x%X)\n", RREG32(mmPCIE_WRAP_MSIX_GW_VEC)); WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1); + error_count++; } + + return error_count; } -static void gaudi2_handle_pcie_drain(struct hl_device *hdev, +static int gaudi2_handle_pcie_drain(struct hl_device *hdev, struct hl_eq_pcie_drain_ind_data *drain_data) { - u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause; + u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause, error_count = 0; cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data); lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw); @@ -8690,39 +8913,52 @@ static void gaudi2_handle_pcie_drain(struct hl_device *hdev, hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw); hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw); - if (cause & BIT_ULL(0)) + if (cause & BIT_ULL(0)) { dev_err_ratelimited(hdev->dev, "PCIE AXI drain LBW completed, read_err %u, write_err %u\n", !!lbw_rd, !!lbw_wr); + error_count++; + } - if (cause & BIT_ULL(1)) + if (cause & BIT_ULL(1)) { dev_err_ratelimited(hdev->dev, "PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n", hbw_rd, hbw_wr); + error_count++; + } + + return error_count; } -static void gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data) +static int gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data) { + u32 error_count = 0; int i; for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) { - if (intr_cause_data & BIT_ULL(i)) + if (intr_cause_data & BIT_ULL(i)) { dev_err_ratelimited(hdev->dev, "PSOC %s completed\n", gaudi2_psoc_axi_drain_interrupts_cause[i]); + error_count++; + } } + + hl_check_for_glbl_errors(hdev); + + return error_count; } -static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, +static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, u16 event_type, struct cpucp_pkt_sync_err *sync_err) { struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; - dev_warn(hdev->dev, + gaudi2_print_event(hdev, event_type, false, "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); } -static void hl_arc_event_handle(struct hl_device *hdev, +static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type, struct hl_eq_engine_arc_intr_data *data) { struct hl_engine_arc_dccm_queue_full_irq *q; @@ -8737,12 +8973,13 @@ static void hl_arc_event_handle(struct hl_device *hdev, case ENGINE_ARC_DCCM_QUEUE_FULL_IRQ: q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload; - dev_err_ratelimited(hdev->dev, + gaudi2_print_event(hdev, event_type, true, "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u\n", engine_id, intr_type, q->queue_index); - break; + return 1; default: - dev_err_ratelimited(hdev->dev, "Unknown ARC event type\n"); + gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type\n"); + return 0; } } @@ -8750,8 +8987,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent { struct gaudi2_device *gaudi2 = hdev->asic_specific; bool reset_required = false, is_critical = false; - u32 ctl, reset_flags = HL_DRV_RESET_HARD; - int index, sbte_index; + u32 index, ctl, reset_flags = HL_DRV_RESET_HARD, error_count = 0; u64 event_mask = 0; u16 event_type; @@ -8767,8 +9003,6 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent gaudi2->events_stat[event_type]++; gaudi2->events_stat_aggregate[event_type]++; - gaudi2_print_irq_info(hdev, event_type); - switch (event_type) { case GAUDI2_EVENT_PCIE_CORE_SERR ... GAUDI2_EVENT_ARC0_ECC_DERR: fallthrough; @@ -8777,6 +9011,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); is_critical = eq_entry->ecc_data.is_critical; + error_count++; break; case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM: @@ -8784,48 +9019,48 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_ROTATOR0_ROT0_QM ... GAUDI2_EVENT_ROTATOR1_ROT1_QM: fallthrough; case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1: - gaudi2_handle_qman_err(hdev, event_type); + error_count = gaudi2_handle_qman_err(hdev, event_type, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0: reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; - gaudi2_handle_arc_farm_sei_err(hdev); + error_count = gaudi2_handle_arc_farm_sei_err(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_CPU_AXI_ERR_RSP: - gaudi2_handle_cpu_sei_err(hdev); + error_count = gaudi2_handle_cpu_sei_err(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP: case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP: reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; - gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info, &event_mask); + error_count = gaudi2_handle_qm_sei_err(hdev, event_type, true, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE: case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE: index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE; - gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause, &event_mask); - gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask); + error_count = gaudi2_handle_rot_err(hdev, index, event_type, + &eq_entry->razwi_with_intr_cause, &event_mask); + error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP: index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP; - gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP", + error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type, &eq_entry->razwi_with_intr_cause, &event_mask); - gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask); + error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE: index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE; - gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info, - &event_mask); + error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8856,8 +9091,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_TPC24_KERNEL_ERR: index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) / (GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR); - gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause, - &event_mask); + error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type, + &eq_entry->razwi_with_intr_cause, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8873,7 +9108,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_DEC9_SPI: index = (event_type - GAUDI2_EVENT_DEC0_SPI) / (GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI); - gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info, &event_mask); + error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8884,9 +9119,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) / (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE); - gaudi2_handle_mme_err(hdev, index, - "CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info, &event_mask); - gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask); + error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask); + error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8897,8 +9131,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) / (GAUDI2_EVENT_MME1_QMAN_SW_ERROR - GAUDI2_EVENT_MME0_QMAN_SW_ERROR); - gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info, - &event_mask); + error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8909,25 +9142,25 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) / (GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID); - gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info, &event_mask); + error_count = gaudi2_handle_mme_wap_err(hdev, index, event_type, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP: case GAUDI2_EVENT_KDMA0_CORE: - gaudi2_handle_kdma_core_event(hdev, + error_count = gaudi2_handle_kdma_core_event(hdev, event_type, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE: - gaudi2_handle_dma_core_event(hdev, + error_count = gaudi2_handle_dma_core_event(hdev, event_type, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR: - gaudi2_print_pcie_addr_dec_info(hdev, + error_count = gaudi2_print_pcie_addr_dec_info(hdev, event_type, le64_to_cpu(eq_entry->intr_cause.intr_cause_data), &event_mask); reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; @@ -8937,27 +9170,27 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP: case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR: case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0: - gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask); + error_count = gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask); reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL: - gaudi2_handle_hif_fatal(hdev, event_type, + error_count = gaudi2_handle_hif_fatal(hdev, event_type, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PMMU_FATAL_0: - gaudi2_handle_pif_fatal(hdev, + error_count = gaudi2_handle_pif_fatal(hdev, event_type, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT: - gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask); + error_count = gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8967,33 +9200,39 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; reset_required = true; } + error_count++; break; case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5: - gaudi2_handle_hbm_cattrip(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); + error_count = gaudi2_handle_hbm_cattrip(hdev, event_type, + le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI: - gaudi2_handle_hbm_mc_spi(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); + error_count = gaudi2_handle_hbm_mc_spi(hdev, + le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE: - gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data); + error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN: - gaudi2_handle_psoc_drain(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); + error_count = gaudi2_handle_psoc_drain(hdev, + le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_CPU_AXI_ECC: + error_count = GAUDI2_NA_EVENT_CAUSE; reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_CPU_L2_RAM_ECC: + error_count = GAUDI2_NA_EVENT_CAUSE; reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; @@ -9001,31 +9240,30 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP: case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP: case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP: - index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) / - (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP - - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP); - sbte_index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) % - (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP - - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP); - gaudi2_handle_mme_sbte_err(hdev, index, sbte_index, + error_count = gaudi2_handle_mme_sbte_err(hdev, event_type, le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B: + error_count = GAUDI2_NA_EVENT_CAUSE; reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PSOC_AXI_ERR_RSP: + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PSOC_PRSTN_FALL: + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PCIE_APB_TIMEOUT: + error_count = GAUDI2_NA_EVENT_CAUSE; reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PCIE_FATAL_ERR: + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_TPC0_BMON_SPMU: @@ -9078,6 +9316,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_DEC8_BMON_SPMU: case GAUDI2_EVENT_DEC9_BMON_SPMU: case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU: + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -9085,67 +9324,87 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E: case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S: case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: - gaudi2_print_clk_change_info(hdev, event_type); - event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; + gaudi2_print_clk_change_info(hdev, event_type, &event_mask); + error_count = GAUDI2_NA_EVENT_CAUSE; break; case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC: - gaudi2_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); + gaudi2_print_out_of_sync_info(hdev, event_type, &eq_entry->pkt_sync_err); + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_PCIE_FLR_REQUESTED: event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; + error_count = GAUDI2_NA_EVENT_CAUSE; /* Do nothing- FW will handle it */ break; case GAUDI2_EVENT_PCIE_P2P_MSIX: - gaudi2_handle_pcie_p2p_msix(hdev); + error_count = gaudi2_handle_pcie_p2p_msix(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE: index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE; - gaudi2_handle_sm_err(hdev, index); + error_count = gaudi2_handle_sm_err(hdev, event_type, index); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR: + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE: dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n", le64_to_cpu(eq_entry->data[0])); + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT: dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n", le64_to_cpu(eq_entry->data[0])); + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED: - gaudi2_print_cpu_pkt_failure_info(hdev, &eq_entry->pkt_sync_err); + gaudi2_print_cpu_pkt_failure_info(hdev, event_type, &eq_entry->pkt_sync_err); + error_count = GAUDI2_NA_EVENT_CAUSE; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI2_EVENT_ARC_DCCM_FULL: - hl_arc_event_handle(hdev, &eq_entry->arc_data); + error_count = hl_arc_event_handle(hdev, event_type, &eq_entry->arc_data); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED: + case GAUDI2_EVENT_DEV_RESET_REQ: event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; + error_count = GAUDI2_NA_EVENT_CAUSE; is_critical = true; break; default: - if (gaudi2_irq_map_table[event_type].valid) + if (gaudi2_irq_map_table[event_type].valid) { dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n", event_type); + error_count = GAUDI2_NA_EVENT_CAUSE; + } } + /* Make sure to dump an error in case no error cause was printed so far. + * Note that although we have counted the errors, we use this number as + * a boolean. + */ + if (error_count == GAUDI2_NA_EVENT_CAUSE && !is_info_event(event_type)) + gaudi2_print_event(hdev, event_type, true, "%d", event_type); + else if (error_count == 0) + gaudi2_print_event(hdev, event_type, true, + "No error cause for H/W event %u\n", event_type); + if ((gaudi2_irq_map_table[event_type].reset || reset_required) && (hdev->hard_reset_on_fw_events || (hdev->asic_prop.fw_security_enabled && is_critical))) @@ -10466,6 +10725,8 @@ static const struct hl_asic_funcs gaudi2_funcs = { .set_dram_bar_base = gaudi2_set_hbm_bar_base, .set_engine_cores = gaudi2_set_engine_cores, .send_device_activity = gaudi2_send_device_activity, + .set_dram_properties = gaudi2_set_dram_properties, + .set_binning_masks = gaudi2_set_binning_masks, }; void gaudi2_set_asic_funcs(struct hl_device *hdev) diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h similarity index 95% rename from drivers/misc/habanalabs/gaudi2/gaudi2P.h rename to drivers/accel/habanalabs/gaudi2/gaudi2P.h index b4383c199bbbab..2687404d9d2132 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2P.h +++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h @@ -8,7 +8,7 @@ #ifndef GAUDI2P_H_ #define GAUDI2P_H_ -#include +#include #include "../common/habanalabs.h" #include "../include/common/hl_boot_if.h" #include "../include/gaudi2/gaudi2.h" @@ -240,6 +240,8 @@ #define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1)) +#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE 8 + enum gaudi2_reserved_sob_id { GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST, GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST = @@ -532,6 +534,41 @@ struct gaudi2_device { u32 num_of_valid_hw_events; }; +/* + * Types of the Gaudi2 IP blocks, used by special blocks iterator. + * Required for scenarios where only particular block types can be + * addressed (e.g., special PLDM images). + */ +enum gaudi2_block_types { + GAUDI2_BLOCK_TYPE_PLL, + GAUDI2_BLOCK_TYPE_RTR, + GAUDI2_BLOCK_TYPE_CPU, + GAUDI2_BLOCK_TYPE_HIF, + GAUDI2_BLOCK_TYPE_HBM, + GAUDI2_BLOCK_TYPE_NIC, + GAUDI2_BLOCK_TYPE_PCIE, + GAUDI2_BLOCK_TYPE_PCIE_PMA, + GAUDI2_BLOCK_TYPE_PDMA, + GAUDI2_BLOCK_TYPE_EDMA, + GAUDI2_BLOCK_TYPE_PMMU, + GAUDI2_BLOCK_TYPE_PSOC, + GAUDI2_BLOCK_TYPE_ROT, + GAUDI2_BLOCK_TYPE_ARC_FARM, + GAUDI2_BLOCK_TYPE_DEC, + GAUDI2_BLOCK_TYPE_MME, + GAUDI2_BLOCK_TYPE_EU_BIST, + GAUDI2_BLOCK_TYPE_SYNC_MNGR, + GAUDI2_BLOCK_TYPE_STLB, + GAUDI2_BLOCK_TYPE_TPC, + GAUDI2_BLOCK_TYPE_HMMU, + GAUDI2_BLOCK_TYPE_SRAM, + GAUDI2_BLOCK_TYPE_XBAR, + GAUDI2_BLOCK_TYPE_KDMA, + GAUDI2_BLOCK_TYPE_XDMA, + GAUDI2_BLOCK_TYPE_XFT, + GAUDI2_BLOCK_TYPE_MAX +}; + extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE]; extern const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE]; extern const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE]; diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c similarity index 99% rename from drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c rename to drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c index 56c6ab692482f9..1dfbe293ececf9 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c @@ -5,7 +5,7 @@ * All Rights Reserved. */ #include "gaudi2_coresight_regs.h" -#include +#include #define GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 2000) #define SPMU_MAX_COUNTERS 6 @@ -2376,10 +2376,10 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, upper_32_bits(input->start_addr2)); WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, lower_32_bits(input->end_addr2)); WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, upper_32_bits(input->end_addr2)); - WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr2)); - WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr2)); - WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr2)); - WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr2)); + WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr3)); + WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr3)); + WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr3)); + WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr3)); WREG32(base_reg + mmBMON_IDL_OFFSET, 0x0); WREG32(base_reg + mmBMON_IDH_OFFSET, 0x0); diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h similarity index 100% rename from drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h rename to drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h similarity index 100% rename from drivers/misc/habanalabs/gaudi2/gaudi2_masks.h rename to drivers/accel/habanalabs/gaudi2/gaudi2_masks.h diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c similarity index 99% rename from drivers/misc/habanalabs/gaudi2/gaudi2_security.c rename to drivers/accel/habanalabs/gaudi2/gaudi2_security.c index 768c2f3dc90039..a212f82e660483 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c @@ -1561,6 +1561,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = { mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI, mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO, mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI, + mmDCORE0_TPC0_CFG_KERNEL_KERNEL_CONFIG, mmDCORE0_TPC0_CFG_KERNEL_SRF_0, mmDCORE0_TPC0_CFG_KERNEL_SRF_1, mmDCORE0_TPC0_CFG_KERNEL_SRF_2, @@ -1666,6 +1667,10 @@ static const u32 gaudi2_pb_dcr0_sm_glbl[] = { mmDCORE0_SYNC_MNGR_GLBL_BASE, }; +static const u32 gaudi2_pb_dcr1_sm_glbl[] = { + mmDCORE1_SYNC_MNGR_GLBL_BASE, +}; + static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = { {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63}, {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63}, @@ -1678,14 +1683,14 @@ static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = { }; static const struct range gaudi2_pb_dcr_x_sm_glbl_unsecured_regs[] = { - {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63}, - {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63}, - {mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63}, - {mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63}, - {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63}, - {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63}, - {mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63}, - {mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63}, + {mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63}, + {mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63}, + {mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63}, + {mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63}, + {mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63}, + {mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63}, + {mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63}, + {mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63}, }; static const u32 gaudi2_pb_arc_sched[] = { @@ -3358,14 +3363,6 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev) /* Sync Manager GLBL */ - /* Unsecure all CQ registers */ - rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES, DCORE_OFFSET, - HL_PB_SINGLE_INSTANCE, HL_PB_NA, - gaudi2_pb_dcr0_sm_glbl, - ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl), - gaudi2_pb_dcr_x_sm_glbl_unsecured_regs, - ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs)); - /* Secure Dcore0 CQ0 registers */ rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA, @@ -3374,6 +3371,14 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev) gaudi2_pb_dcr0_sm_glbl_unsecured_regs, ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl_unsecured_regs)); + /* Unsecure all other CQ registers */ + rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES - 1, DCORE_OFFSET, + HL_PB_SINGLE_INSTANCE, HL_PB_NA, + gaudi2_pb_dcr1_sm_glbl, + ARRAY_SIZE(gaudi2_pb_dcr1_sm_glbl), + gaudi2_pb_dcr_x_sm_glbl_unsecured_regs, + ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs)); + /* PSOC. * Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are * protected by privileged RR. diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/accel/habanalabs/goya/Makefile similarity index 100% rename from drivers/misc/habanalabs/goya/Makefile rename to drivers/accel/habanalabs/goya/Makefile diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c similarity index 99% rename from drivers/misc/habanalabs/goya/goya.c rename to drivers/accel/habanalabs/goya/goya.c index 0f083fcf81a6b1..2b135e856607da 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/accel/habanalabs/goya/goya.c @@ -5420,6 +5420,16 @@ static int goya_scrub_device_dram(struct hl_device *hdev, u64 val) return -EOPNOTSUPP; } +static int goya_set_dram_properties(struct hl_device *hdev) +{ + return 0; +} + +static int goya_set_binning_masks(struct hl_device *hdev) +{ + return 0; +} + static int goya_send_device_activity(struct hl_device *hdev, bool open) { return 0; @@ -5518,6 +5528,8 @@ static const struct hl_asic_funcs goya_funcs = { .access_dev_mem = hl_access_dev_mem, .set_dram_bar_base = goya_set_ddr_bar_base, .send_device_activity = goya_send_device_activity, + .set_dram_properties = goya_set_dram_properties, + .set_binning_masks = goya_set_binning_masks, }; /* diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/accel/habanalabs/goya/goyaP.h similarity index 99% rename from drivers/misc/habanalabs/goya/goyaP.h rename to drivers/accel/habanalabs/goya/goyaP.h index d6ec43d6f6b0d4..5df3d30b91fdd1 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/accel/habanalabs/goya/goyaP.h @@ -8,7 +8,7 @@ #ifndef GOYAP_H_ #define GOYAP_H_ -#include +#include #include "../common/habanalabs.h" #include "../include/common/hl_boot_if.h" #include "../include/goya/goya_packets.h" diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/accel/habanalabs/goya/goya_coresight.c similarity index 99% rename from drivers/misc/habanalabs/goya/goya_coresight.c rename to drivers/accel/habanalabs/goya/goya_coresight.c index 2c5133cfae6597..e7ac3046cfaae1 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/accel/habanalabs/goya/goya_coresight.c @@ -10,7 +10,7 @@ #include "../include/goya/asic_reg/goya_regs.h" #include "../include/goya/asic_reg/goya_masks.h" -#include +#include #define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100) diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/accel/habanalabs/goya/goya_hwmgr.c similarity index 100% rename from drivers/misc/habanalabs/goya/goya_hwmgr.c rename to drivers/accel/habanalabs/goya/goya_hwmgr.c diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/accel/habanalabs/goya/goya_security.c similarity index 100% rename from drivers/misc/habanalabs/goya/goya_security.c rename to drivers/accel/habanalabs/goya/goya_security.c diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/accel/habanalabs/include/common/cpucp_if.h similarity index 94% rename from drivers/misc/habanalabs/include/common/cpucp_if.h rename to drivers/accel/habanalabs/include/common/cpucp_if.h index baa5aa43b6f490..d713252a4f1334 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/accel/habanalabs/include/common/cpucp_if.h @@ -344,6 +344,16 @@ struct hl_eq_engine_arc_intr_data { __le64 pad[5]; }; +#define ADDR_DEC_ADDRESS_COUNT_MAX 4 + +/* Data structure specifies details of ADDR_DEC interrupt */ +struct hl_eq_addr_dec_intr_data { + struct hl_eq_intr_cause intr_cause; + __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX]; + __u8 addr_cnt; + __u8 pad[7]; +}; + struct hl_eq_entry { struct hl_eq_header hdr; union { @@ -358,6 +368,7 @@ struct hl_eq_entry { struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause; struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */ struct hl_eq_engine_arc_intr_data arc_data; + struct hl_eq_addr_dec_intr_data addr_dec; __le64 data[7]; }; }; @@ -643,6 +654,10 @@ enum pq_init_status { * data corruption in case of mismatched driver/FW versions. * Relevant only to Gaudi. * + * CPUCP_PACKET_GENERIC_PASSTHROUGH - + * Generic opcode for all firmware info that is only passed to host + * through the LKD, without getting parsed there. + * * CPUCP_PACKET_ACTIVE_STATUS_SET - * LKD sends FW indication whether device is free or in use, this indication is reported * also to the BMC. @@ -704,9 +719,12 @@ enum cpucp_packet_id { CPUCP_PACKET_RESERVED5, /* not used */ CPUCP_PACKET_RESERVED6, /* not used */ CPUCP_PACKET_RESERVED7, /* not used */ + CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */ CPUCP_PACKET_RESERVED8, /* not used */ - CPUCP_PACKET_RESERVED9, /* not used */ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */ + CPUCP_PACKET_RESERVED9, /* not used */ + CPUCP_PACKET_RESERVED10, /* not used */ + CPUCP_PACKET_RESERVED11, /* not used */ CPUCP_PACKET_ID_MAX /* must be last */ }; @@ -727,6 +745,11 @@ enum cpucp_packet_id { #define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48 #define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull +#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull +#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull + #define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0 #define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull #define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1 @@ -805,8 +828,13 @@ struct cpucp_packet { __le32 nonce; }; - /* For NIC requests */ - __le32 port_index; + union { + /* For NIC requests */ + __le32 port_index; + + /* For Generic packet sub index */ + __le32 pkt_subidx; + }; }; struct cpucp_unmask_irq_arr_packet { @@ -881,7 +909,9 @@ enum cpucp_in_attributes { cpucp_in_max, cpucp_in_lowest = 6, cpucp_in_highest = 7, - cpucp_in_reset_history + cpucp_in_reset_history, + cpucp_in_intr_alarm_a, + cpucp_in_intr_alarm_b, }; enum cpucp_curr_attributes { @@ -976,6 +1006,11 @@ enum pll_index { IC_PLL = 16, MC_PLL = 17, EMMC_PLL = 18, + D2D_PLL = 19, + CS_PLL = 20, + C2C_PLL = 21, + NCH_PLL = 22, + C2M_PLL = 23, PLL_MAX }; @@ -1135,8 +1170,9 @@ enum cpucp_serdes_type { HLS1_SERDES_TYPE, HLS1H_SERDES_TYPE, HLS2_SERDES_TYPE, - UNKNOWN_SERDES_TYPE, - MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE + HLS2_TYPE_1_SERDES_TYPE, + MAX_NUM_SERDES_TYPE, /* number of types */ + UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */ }; struct cpucp_nic_info { @@ -1160,6 +1196,21 @@ struct page_discard_info { __le32 mmu_page_idx[PAGE_DISCARD_MAX]; }; +/* + * struct frac_val - fracture value represented by "integer.frac". + * @integer: the integer part of the fracture value; + * @frac: the fracture part of the fracture value. + */ +struct frac_val { + union { + struct { + __le16 integer; + __le16 frac; + }; + __le32 val; + }; +}; + /* * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp". * @integer: the integer part of the SER value; @@ -1183,8 +1234,12 @@ struct ser_val { * @pcs_link: has PCS link. * @phy_ready: is PHY ready. * @auto_neg: is Autoneg enabled. - * @timeout_retransmission_cnt: timeout retransmission events - * @high_ber_cnt: high ber events + * @timeout_retransmission_cnt: timeout retransmission events. + * @high_ber_cnt: high ber events. + * @pre_fec_ser: pre FEC SER value. + * @post_fec_ser: post FEC SER value. + * @throughput: measured throughput. + * @latency: measured latency. */ struct cpucp_nic_status { __le32 port; @@ -1200,6 +1255,10 @@ struct cpucp_nic_status { __u8 auto_neg; __le32 timeout_retransmission_cnt; __le32 high_ber_cnt; + struct ser_val pre_fec_ser; + struct ser_val post_fec_ser; + struct frac_val bandwidth; + struct frac_val lat; }; enum cpucp_hbm_row_replace_cause { @@ -1292,6 +1351,7 @@ struct cpucp_dev_info_signed { __u8 certificate[SEC_CERTIFICATE_BUF_SZ]; }; +#define DCORE_MON_REGS_SZ 512 /* * struct dcore_monitor_regs_data - DCORE monitor regs data. * the structure follows sync manager block layout. relevant only to Gaudi. @@ -1302,11 +1362,11 @@ struct cpucp_dev_info_signed { * @mon_status: array of monitor status. */ struct dcore_monitor_regs_data { - __le32 mon_pay_addrl[512]; - __le32 mon_pay_addrh[512]; - __le32 mon_pay_data[512]; - __le32 mon_arm[512]; - __le32 mon_status[512]; + __le32 mon_pay_addrl[DCORE_MON_REGS_SZ]; + __le32 mon_pay_addrh[DCORE_MON_REGS_SZ]; + __le32 mon_pay_data[DCORE_MON_REGS_SZ]; + __le32 mon_arm[DCORE_MON_REGS_SZ]; + __le32 mon_status[DCORE_MON_REGS_SZ]; }; /* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */ @@ -1317,4 +1377,14 @@ struct cpucp_monitor_dump { struct dcore_monitor_regs_data sync_mngr_e_n; }; +/* + * The Type of the generic request (and other input arguments) will be fetched from user by reading + * from "pkt_subidx" field in struct cpucp_packet. + * + * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions. + */ +enum hl_passthrough_type { + HL_PASSTHROUGH_VERSIONS, +}; + #endif /* CPUCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/accel/habanalabs/include/common/hl_boot_if.h similarity index 83% rename from drivers/misc/habanalabs/include/common/hl_boot_if.h rename to drivers/accel/habanalabs/include/common/hl_boot_if.h index e0ea51cc74756c..2256add057c514 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/accel/habanalabs/include/common/hl_boot_if.h @@ -40,6 +40,19 @@ enum cpu_boot_err { CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */ }; +/* + * Mask for fatal failures + * This mask contains all possible fatal failures, and a dynamic code + * will clear the non-relevant ones. + */ +#define CPU_BOOT_ERR_FATAL_MASK \ + ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \ + (1 << CPU_BOOT_ERR_PLL_FAIL) | \ + (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \ + (1 << CPU_BOOT_ERR_BINNING_FAIL) | \ + (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \ + (1 << CPU_BOOT_ERR_EEPROM_FAIL)) + /* * CPU error bits in BOOT_ERROR registers * @@ -439,7 +452,7 @@ struct cpu_dyn_regs { /* TODO: remove the desc magic after the code is updated to use message */ /* HCDM - Habana Communications Descriptor Magic */ #define HL_COMMS_DESC_MAGIC 0x4843444D -#define HL_COMMS_DESC_VER 1 +#define HL_COMMS_DESC_VER 3 /* HCMv - Habana Communications Message + header version */ #define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00 @@ -450,8 +463,10 @@ struct cpu_dyn_regs { ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK)) #define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC #define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1) +#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2) +#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3) -#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V1 +#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3 #define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \ @@ -474,22 +489,31 @@ enum comms_msg_type { /* * Binning information shared between LKD and FW - * @tpc_mask - TPC binning information + * @tpc_mask_l - TPC binning information lower 64 bit * @dec_mask - Decoder binning information - * @hbm_mask - HBM binning information + * @dram_mask - DRAM binning information * @edma_mask - EDMA binning information * @mme_mask_l - MME binning information lower 32 * @mme_mask_h - MME binning information upper 32 - * @reserved - reserved field for 64 bit alignment + * @rot_mask - Rotator binning information + * @xbar_mask - xBAR binning information + * @reserved - reserved field for future binning info w/o ABI change + * @tpc_mask_h - TPC binning information upper 64 bit + * @nic_mask - NIC binning information */ struct lkd_fw_binning_info { - __le64 tpc_mask; + __le64 tpc_mask_l; __le32 dec_mask; - __le32 hbm_mask; + __le32 dram_mask; __le32 edma_mask; __le32 mme_mask_l; __le32 mme_mask_h; - __le32 reserved; + __le32 rot_mask; + __le32 xbar_mask; + __le32 reserved0; + __le64 tpc_mask_h; + __le64 nic_mask; + __le32 reserved1[8]; }; /* TODO: remove this struct after the code is updated to use message */ @@ -512,6 +536,23 @@ struct comms_msg_header { __u8 reserved[4]; /* pad to 64 bit */ }; +enum lkd_fw_ascii_msg_lvls { + LKD_FW_ASCII_MSG_ERR = 0, + LKD_FW_ASCII_MSG_WRN = 1, + LKD_FW_ASCII_MSG_INF = 2, + LKD_FW_ASCII_MSG_DBG = 3, +}; + +#define LKD_FW_ASCII_MSG_MAX_LEN 128 +#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */ + +struct lkd_fw_ascii_msg { + __u8 valid; + __u8 msg_lvl; + __u8 reserved[6]; + char msg[LKD_FW_ASCII_MSG_MAX_LEN]; +}; + /* this is the main FW descriptor - consider ABI when changing */ struct lkd_fw_comms_desc { struct comms_desc_header header; @@ -521,6 +562,8 @@ struct lkd_fw_comms_desc { /* can be used for 1 more version w/o ABI change */ char reserved0[VERSION_MAX_LEN]; __le64 img_addr; /* address for next FW component load */ + struct lkd_fw_binning_info binning_info; + struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX]; }; enum comms_reset_cause { @@ -545,6 +588,8 @@ struct lkd_fw_comms_msg { char reserved0[VERSION_MAX_LEN]; /* address for next FW component load */ __le64 img_addr; + struct lkd_fw_binning_info binning_info; + struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX]; }; struct { __u8 reset_cause; @@ -552,7 +597,7 @@ struct lkd_fw_comms_msg { struct { __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */ }; - struct lkd_fw_binning_info binning_info; + struct lkd_fw_binning_info binning_conf; }; }; @@ -699,4 +744,92 @@ struct comms_status { }; }; +/** + * HL_MODULES_MAX_NUM is determined by the size of modules_mask in struct + * hl_component_versions + */ +enum hl_modules { + HL_MODULES_BOOT_INFO = 0, + HL_MODULES_EEPROM, + HL_MODULES_FDT, + HL_MODULES_I2C, + HL_MODULES_LZ4, + HL_MODULES_MBEDTLS, + HL_MODULES_MAX_NUM = 16 +}; + +/** + * HL_COMPONENTS_MAX_NUM is determined by the size of components_mask in + * struct cpucp_versions + */ +enum hl_components { + HL_COMPONENTS_PID = 0, + HL_COMPONENTS_MGMT, + HL_COMPONENTS_PREBOOT, + HL_COMPONENTS_PPBOOT, + HL_COMPONENTS_ARMCP, + HL_COMPONENTS_CPLD, + HL_COMPONENTS_UBOOT, + HL_COMPONENTS_MAX_NUM = 16 +}; + +/** + * struct hl_component_versions - versions associated with hl component. + * @struct_size: size of all the struct (including dynamic size of modules). + * @modules_offset: offset of the modules field in this struct. + * @component: version of the component itself. + * @fw_os: Firmware OS Version. + * @modules_mask: i'th bit (from LSB) is a flag - on if module i in enum + * hl_modules is used. + * @modules_counter: number of set bits in modules_mask. + * @reserved: reserved for future use. + * @modules: versions of the component's modules. Elborated explanation in + * struct cpucp_versions. + */ +struct hl_component_versions { + __le16 struct_size; + __le16 modules_offset; + __u8 component[VERSION_MAX_LEN]; + __u8 fw_os[VERSION_MAX_LEN]; + __le16 modules_mask; + __u8 modules_counter; + __u8 reserved[1]; + __u8 modules[][VERSION_MAX_LEN]; +}; + +/** + * struct hl_fw_versions - all versions (fuse, cpucp's components with their + * modules) + * @struct_size: size of all the struct (including dynamic size of components). + * @components_offset: offset of the components field in this struct. + * @fuse: silicon production FUSE information. + * @components_mask: i'th bit (from LSB) is a flag - on if component i in enum + * hl_components is used. + * @components_counter: number of set bits in components_mask. + * @reserved: reserved for future use. + * @components: versions of hl components. Index i corresponds to the i'th bit + * that is *on* in components_mask. For example, if + * components_mask=0b101, then *components represents arcpid and + * *(hl_component_versions*)((char*)components + 1') represents + * preboot, where 1' = components[0].struct_size. + */ +struct hl_fw_versions { + __le16 struct_size; + __le16 components_offset; + __u8 fuse[VERSION_MAX_LEN]; + __le16 components_mask; + __u8 components_counter; + __u8 reserved[1]; + struct hl_component_versions components[]; +}; + +/* Max size of struct hl_component_versions */ +#define HL_COMPONENT_VERSIONS_MAX_SIZE \ + (sizeof(struct hl_component_versions) + HL_MODULES_MAX_NUM * \ + VERSION_MAX_LEN) + +/* Max size of struct hl_fw_versions */ +#define HL_FW_VERSIONS_MAX_SIZE (sizeof(struct hl_fw_versions) + \ + HL_COMPONENTS_MAX_NUM * HL_COMPONENT_VERSIONS_MAX_SIZE) + #endif /* HL_BOOT_IF_H */ diff --git a/drivers/misc/habanalabs/include/common/qman_if.h b/drivers/accel/habanalabs/include/common/qman_if.h similarity index 100% rename from drivers/misc/habanalabs/include/common/qman_if.h rename to drivers/accel/habanalabs/include/common/qman_if.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h index 1a657666679453..23ee8691db4615 100644 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h +++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h @@ -320,4 +320,6 @@ #define mmPSOC_TPC_PLL_NR 0xC73100 #define mmIF_W_PLL_NR 0x488100 +#define mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL 0xC01208 + #endif /* ASIC_REG_GAUDI_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/accel/habanalabs/include/gaudi/gaudi.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi.h rename to drivers/accel/habanalabs/include/gaudi/gaudi.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h b/drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/accel/habanalabs/include/gaudi/gaudi_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_masks.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/accel/habanalabs/include/gaudi/gaudi_packets.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_packets.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_packets.h diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h rename to drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h diff --git a/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h b/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h new file mode 100644 index 00000000000000..22a6ab9a7f47a9 --- /dev/null +++ b/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 HabanaLabs Ltd. + * All Rights Reserved. + */ + +#ifndef __GAUDI2_ARC_COMMON_PACKETS_H__ +#define __GAUDI2_ARC_COMMON_PACKETS_H__ + +enum { + CPU_ID_SCHED_ARC0 = 0, /* FARM_ARC0 */ + CPU_ID_SCHED_ARC1 = 1, /* FARM_ARC1 */ + CPU_ID_SCHED_ARC2 = 2, /* FARM_ARC2 */ + CPU_ID_SCHED_ARC3 = 3, /* FARM_ARC3 */ + /* Dcore1 MME Engine ARC instance used as scheduler */ + CPU_ID_SCHED_ARC4 = 4, /* DCORE1_MME0 */ + /* Dcore3 MME Engine ARC instance used as scheduler */ + CPU_ID_SCHED_ARC5 = 5, /* DCORE3_MME0 */ + + CPU_ID_TPC_QMAN_ARC0 = 6, /* DCORE0_TPC0 */ + CPU_ID_TPC_QMAN_ARC1 = 7, /* DCORE0_TPC1 */ + CPU_ID_TPC_QMAN_ARC2 = 8, /* DCORE0_TPC2 */ + CPU_ID_TPC_QMAN_ARC3 = 9, /* DCORE0_TPC3 */ + CPU_ID_TPC_QMAN_ARC4 = 10, /* DCORE0_TPC4 */ + CPU_ID_TPC_QMAN_ARC5 = 11, /* DCORE0_TPC5 */ + CPU_ID_TPC_QMAN_ARC6 = 12, /* DCORE1_TPC0 */ + CPU_ID_TPC_QMAN_ARC7 = 13, /* DCORE1_TPC1 */ + CPU_ID_TPC_QMAN_ARC8 = 14, /* DCORE1_TPC2 */ + CPU_ID_TPC_QMAN_ARC9 = 15, /* DCORE1_TPC3 */ + CPU_ID_TPC_QMAN_ARC10 = 16, /* DCORE1_TPC4 */ + CPU_ID_TPC_QMAN_ARC11 = 17, /* DCORE1_TPC5 */ + CPU_ID_TPC_QMAN_ARC12 = 18, /* DCORE2_TPC0 */ + CPU_ID_TPC_QMAN_ARC13 = 19, /* DCORE2_TPC1 */ + CPU_ID_TPC_QMAN_ARC14 = 20, /* DCORE2_TPC2 */ + CPU_ID_TPC_QMAN_ARC15 = 21, /* DCORE2_TPC3 */ + CPU_ID_TPC_QMAN_ARC16 = 22, /* DCORE2_TPC4 */ + CPU_ID_TPC_QMAN_ARC17 = 23, /* DCORE2_TPC5 */ + CPU_ID_TPC_QMAN_ARC18 = 24, /* DCORE3_TPC0 */ + CPU_ID_TPC_QMAN_ARC19 = 25, /* DCORE3_TPC1 */ + CPU_ID_TPC_QMAN_ARC20 = 26, /* DCORE3_TPC2 */ + CPU_ID_TPC_QMAN_ARC21 = 27, /* DCORE3_TPC3 */ + CPU_ID_TPC_QMAN_ARC22 = 28, /* DCORE3_TPC4 */ + CPU_ID_TPC_QMAN_ARC23 = 29, /* DCORE3_TPC5 */ + CPU_ID_TPC_QMAN_ARC24 = 30, /* DCORE0_TPC6 - Never present */ + + CPU_ID_MME_QMAN_ARC0 = 31, /* DCORE0_MME0 */ + CPU_ID_MME_QMAN_ARC1 = 32, /* DCORE2_MME0 */ + + CPU_ID_EDMA_QMAN_ARC0 = 33, /* DCORE0_EDMA0 */ + CPU_ID_EDMA_QMAN_ARC1 = 34, /* DCORE0_EDMA1 */ + CPU_ID_EDMA_QMAN_ARC2 = 35, /* DCORE1_EDMA0 */ + CPU_ID_EDMA_QMAN_ARC3 = 36, /* DCORE1_EDMA1 */ + CPU_ID_EDMA_QMAN_ARC4 = 37, /* DCORE2_EDMA0 */ + CPU_ID_EDMA_QMAN_ARC5 = 38, /* DCORE2_EDMA1 */ + CPU_ID_EDMA_QMAN_ARC6 = 39, /* DCORE3_EDMA0 */ + CPU_ID_EDMA_QMAN_ARC7 = 40, /* DCORE3_EDMA1 */ + + CPU_ID_PDMA_QMAN_ARC0 = 41, /* DCORE0_PDMA0 */ + CPU_ID_PDMA_QMAN_ARC1 = 42, /* DCORE0_PDMA1 */ + + CPU_ID_ROT_QMAN_ARC0 = 43, /* ROT0 */ + CPU_ID_ROT_QMAN_ARC1 = 44, /* ROT1 */ + + CPU_ID_NIC_QMAN_ARC0 = 45, /* NIC0_0 */ + CPU_ID_NIC_QMAN_ARC1 = 46, /* NIC0_1 */ + CPU_ID_NIC_QMAN_ARC2 = 47, /* NIC1_0 */ + CPU_ID_NIC_QMAN_ARC3 = 48, /* NIC1_1 */ + CPU_ID_NIC_QMAN_ARC4 = 49, /* NIC2_0 */ + CPU_ID_NIC_QMAN_ARC5 = 50, /* NIC2_1 */ + CPU_ID_NIC_QMAN_ARC6 = 51, /* NIC3_0 */ + CPU_ID_NIC_QMAN_ARC7 = 52, /* NIC3_1 */ + CPU_ID_NIC_QMAN_ARC8 = 53, /* NIC4_0 */ + CPU_ID_NIC_QMAN_ARC9 = 54, /* NIC4_1 */ + CPU_ID_NIC_QMAN_ARC10 = 55, /* NIC5_0 */ + CPU_ID_NIC_QMAN_ARC11 = 56, /* NIC5_1 */ + CPU_ID_NIC_QMAN_ARC12 = 57, /* NIC6_0 */ + CPU_ID_NIC_QMAN_ARC13 = 58, /* NIC6_1 */ + CPU_ID_NIC_QMAN_ARC14 = 59, /* NIC7_0 */ + CPU_ID_NIC_QMAN_ARC15 = 60, /* NIC7_1 */ + CPU_ID_NIC_QMAN_ARC16 = 61, /* NIC8_0 */ + CPU_ID_NIC_QMAN_ARC17 = 62, /* NIC8_1 */ + CPU_ID_NIC_QMAN_ARC18 = 63, /* NIC9_0 */ + CPU_ID_NIC_QMAN_ARC19 = 64, /* NIC9_1 */ + CPU_ID_NIC_QMAN_ARC20 = 65, /* NIC10_0 */ + CPU_ID_NIC_QMAN_ARC21 = 66, /* NIC10_1 */ + CPU_ID_NIC_QMAN_ARC22 = 67, /* NIC11_0 */ + CPU_ID_NIC_QMAN_ARC23 = 68, /* NIC11_1 */ + + CPU_ID_MAX = 69, + CPU_ID_SCHED_MAX = 6, + + CPU_ID_ALL = 0xFE, + CPU_ID_INVALID = 0xFF, +}; + +enum arc_regions_t { + ARC_REGION0_UNSED = 0, + /* + * Extension registers + * None + */ + ARC_REGION1_SRAM = 1, + /* + * Extension registers + * AUX_SRAM_LSB_ADDR + * AUX_SRAM_MSB_ADDR + * ARC Address: 0x1000_0000 + */ + ARC_REGION2_CFG = 2, + /* + * Extension registers + * AUX_CFG_LSB_ADDR + * AUX_CFG_MSB_ADDR + * ARC Address: 0x2000_0000 + */ + ARC_REGION3_GENERAL = 3, + /* + * Extension registers + * AUX_GENERAL_PURPOSE_LSB_ADDR_0 + * AUX_GENERAL_PURPOSE_MSB_ADDR_0 + * ARC Address: 0x3000_0000 + */ + ARC_REGION4_HBM0_FW = 4, + /* + * Extension registers + * AUX_HBM0_LSB_ADDR + * AUX_HBM0_MSB_ADDR + * AUX_HBM0_OFFSET + * ARC Address: 0x4000_0000 + */ + ARC_REGION5_HBM1_GC_DATA = 5, + /* + * Extension registers + * AUX_HBM1_LSB_ADDR + * AUX_HBM1_MSB_ADDR + * AUX_HBM1_OFFSET + * ARC Address: 0x5000_0000 + */ + ARC_REGION6_HBM2_GC_DATA = 6, + /* + * Extension registers + * AUX_HBM2_LSB_ADDR + * AUX_HBM2_MSB_ADDR + * AUX_HBM2_OFFSET + * ARC Address: 0x6000_0000 + */ + ARC_REGION7_HBM3_GC_DATA = 7, + /* + * Extension registers + * AUX_HBM3_LSB_ADDR + * AUX_HBM3_MSB_ADDR + * AUX_HBM3_OFFSET + * ARC Address: 0x7000_0000 + */ + ARC_REGION8_DCCM = 8, + /* + * Extension registers + * None + * ARC Address: 0x8000_0000 + */ + ARC_REGION9_PCIE = 9, + /* + * Extension registers + * AUX_PCIE_LSB_ADDR + * AUX_PCIE_MSB_ADDR + * ARC Address: 0x9000_0000 + */ + ARC_REGION10_GENERAL = 10, + /* + * Extension registers + * AUX_GENERAL_PURPOSE_LSB_ADDR_1 + * AUX_GENERAL_PURPOSE_MSB_ADDR_1 + * ARC Address: 0xA000_0000 + */ + ARC_REGION11_GENERAL = 11, + /* + * Extension registers + * AUX_GENERAL_PURPOSE_LSB_ADDR_2 + * AUX_GENERAL_PURPOSE_MSB_ADDR_2 + * ARC Address: 0xB000_0000 + */ + ARC_REGION12_GENERAL = 12, + /* + * Extension registers + * AUX_GENERAL_PURPOSE_LSB_ADDR_3 + * AUX_GENERAL_PURPOSE_MSB_ADDR_3 + * ARC Address: 0xC000_0000 + */ + ARC_REGION13_GENERAL = 13, + /* + * Extension registers + * AUX_GENERAL_PURPOSE_LSB_ADDR_4 + * AUX_GENERAL_PURPOSE_MSB_ADDR_4 + * ARC Address: 0xD000_0000 + */ + ARC_REGION14_GENERAL = 14, + /* + * Extension registers + * AUX_GENERAL_PURPOSE_LSB_ADDR_5 + * AUX_GENERAL_PURPOSE_MSB_ADDR_5 + * ARC Address: 0xE000_0000 + */ + ARC_REGION15_LBU = 15 + /* + * Extension registers + * None + * ARC Address: 0xF000_0000 + */ +}; + +#endif /* __GAUDI2_ARC_COMMON_PACKETS_H__ */ diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h index df51eac10dd735..2965b6a3b42327 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h @@ -150,8 +150,7 @@ #define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_SHIFT 16 #define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_MASK 0xF0000 #define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_SHIFT 20 -#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK \ -0x100000 +#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK 0x100000 /* DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG */ #define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MASK_SHIFT 0 @@ -235,23 +234,19 @@ /* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32 */ #define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_SHIFT 0 -#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK \ -0xFFFFFFFF +#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK 0xFFFFFFFF /* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0 */ #define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_SHIFT 0 -#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK \ -0xFFFFFFFF +#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK 0xFFFFFFFF /* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32 */ #define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_SHIFT 0 -#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK \ -0xFFFFFFFF +#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK 0xFFFFFFFF /* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0 */ #define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_SHIFT 0 -#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK \ -0xFFFFFFFF +#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK 0xFFFFFFFF /* DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD */ #define DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD_R_SHIFT 0 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h similarity index 97% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h index 192eba5f07bb61..a311778b21e7a6 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h @@ -92,8 +92,7 @@ #define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_SHIFT 20 #define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK 0x100000 #define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_SHIFT 21 -#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK \ -0x7E00000 +#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK 0x7E00000 /* DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32 */ #define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_SHIFT 0 @@ -228,12 +227,8 @@ #define DCORE0_HMMU0_STLB_MEM_READ_ARPROT_R_MASK 0x7 /* DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION */ -#define \ -DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT \ -0 -#define \ -DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \ -0x1 +#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0 +#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK 0x1 #define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1 #define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2 #define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2 @@ -261,53 +256,43 @@ DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \ /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_SHIFT 0 -#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK \ -0x1FF +#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK 0x1FF /* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10 */ #define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_SHIFT 0 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h similarity index 98% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h index 7c22b9383f3cb0..fb53feb0a1a6ff 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h @@ -20,8 +20,7 @@ ***************************************** */ -#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 \ -0x40CB280 +#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 0x40CB280 #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_LOW 0x40CB284 @@ -29,8 +28,7 @@ #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_OUTER_LOOP 0x40CB28C -#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 \ -0x40CB290 +#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 0x40CB290 #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SB_REPEAT 0x40CB294 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h index f699661d76aacb..da0c94075e64f0 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h @@ -78,8 +78,7 @@ #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_SHIFT 15 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_MASK 0x8000 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_SHIFT 16 -#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK \ -0x10000 +#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK 0x10000 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_SHIFT 17 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_MASK 0x20000 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_SLV_ADR_SHIFT 18 @@ -87,11 +86,9 @@ #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_SHIFT 19 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_MASK 0x80000 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_SHIFT 20 -#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK \ -0x100000 +#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK 0x100000 #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_SHIFT 21 -#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK \ -0x200000 +#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK 0x200000 /* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 */ #define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0_V_SHIFT 0 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h index 68dd98459c86d4..1c02f3dfdb6e8b 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h @@ -106,8 +106,7 @@ #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_SHIFT 2 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_MASK 0x4 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_SHIFT 3 -#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK \ -0x8 +#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK 0x8 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 4 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x10 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_SHIFT 5 @@ -117,8 +116,7 @@ #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8 -#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \ -0x100 +#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK 0x100 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9 #define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h diff --git a/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h new file mode 100644 index 00000000000000..3d38027558148a --- /dev/null +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h @@ -0,0 +1,1203 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_ +#define ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_ + +/* + ***************************************** + * DCORE1_SYNC_MNGR_GLBL + * (Prototype: SOB_GLBL) + ***************************************** + */ + +#define mmDCORE1_SYNC_MNGR_GLBL_SM_SEI_MASK 0x431E000 + +#define mmDCORE1_SYNC_MNGR_GLBL_SM_SEI_CAUSE 0x431E004 + +#define mmDCORE1_SYNC_MNGR_GLBL_L2H_CPMR_L 0x431E008 + +#define mmDCORE1_SYNC_MNGR_GLBL_L2H_CPMR_H 0x431E00C + +#define mmDCORE1_SYNC_MNGR_GLBL_L2H_MASK_L 0x431E020 + +#define mmDCORE1_SYNC_MNGR_GLBL_L2H_MASK_H 0x431E024 + +#define mmDCORE1_SYNC_MNGR_GLBL_ASID_SEC 0x431E030 + +#define mmDCORE1_SYNC_MNGR_GLBL_ASID_PRIV_ONLY 0x431E034 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DELAY 0x431E038 + +#define mmDCORE1_SYNC_MNGR_GLBL_PI_SIZE 0x431E03C + +#define mmDCORE1_SYNC_MNGR_GLBL_SOB_ONLY 0x431E040 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INTR 0x431E044 + +#define mmDCORE1_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV 0x431E048 + +#define mmDCORE1_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE 0x431E04C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 0x431E050 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1 0x431E054 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_2 0x431E058 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_3 0x431E05C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_4 0x431E060 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_5 0x431E064 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_6 0x431E068 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_7 0x431E06C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_8 0x431E070 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_9 0x431E074 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_10 0x431E078 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_11 0x431E07C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_12 0x431E080 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_13 0x431E084 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_14 0x431E088 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_15 0x431E08C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_16 0x431E090 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_17 0x431E094 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_18 0x431E098 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_19 0x431E09C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_20 0x431E0A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_21 0x431E0A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_22 0x431E0A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_23 0x431E0AC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_24 0x431E0B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_25 0x431E0B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_26 0x431E0B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_27 0x431E0BC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_28 0x431E0C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_29 0x431E0C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_30 0x431E0C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_31 0x431E0CC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_32 0x431E0D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_33 0x431E0D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_34 0x431E0D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_35 0x431E0DC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_36 0x431E0E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_37 0x431E0E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_38 0x431E0E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_39 0x431E0EC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_40 0x431E0F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_41 0x431E0F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_42 0x431E0F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_43 0x431E0FC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_44 0x431E100 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_45 0x431E104 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_46 0x431E108 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_47 0x431E10C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_48 0x431E110 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_49 0x431E114 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_50 0x431E118 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_51 0x431E11C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_52 0x431E120 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_53 0x431E124 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_54 0x431E128 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_55 0x431E12C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_56 0x431E130 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_57 0x431E134 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_58 0x431E138 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_59 0x431E13C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_60 0x431E140 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_61 0x431E144 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_62 0x431E148 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63 0x431E14C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 0x431E150 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1 0x431E154 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_2 0x431E158 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_3 0x431E15C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_4 0x431E160 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_5 0x431E164 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_6 0x431E168 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_7 0x431E16C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_8 0x431E170 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_9 0x431E174 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_10 0x431E178 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_11 0x431E17C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_12 0x431E180 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_13 0x431E184 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_14 0x431E188 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_15 0x431E18C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_16 0x431E190 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_17 0x431E194 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_18 0x431E198 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_19 0x431E19C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_20 0x431E1A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_21 0x431E1A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_22 0x431E1A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_23 0x431E1AC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_24 0x431E1B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_25 0x431E1B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_26 0x431E1B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_27 0x431E1BC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_28 0x431E1C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_29 0x431E1C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_30 0x431E1C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_31 0x431E1CC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_32 0x431E1D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_33 0x431E1D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_34 0x431E1D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_35 0x431E1DC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_36 0x431E1E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_37 0x431E1E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_38 0x431E1E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_39 0x431E1EC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_40 0x431E1F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_41 0x431E1F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_42 0x431E1F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_43 0x431E1FC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_44 0x431E200 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_45 0x431E204 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_46 0x431E208 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_47 0x431E20C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_48 0x431E210 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_49 0x431E214 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_50 0x431E218 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_51 0x431E21C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_52 0x431E220 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_53 0x431E224 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_54 0x431E228 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_55 0x431E22C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_56 0x431E230 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_57 0x431E234 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_58 0x431E238 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_59 0x431E23C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_60 0x431E240 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_61 0x431E244 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_62 0x431E248 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63 0x431E24C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 0x431E250 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_1 0x431E254 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_2 0x431E258 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_3 0x431E25C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_4 0x431E260 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_5 0x431E264 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_6 0x431E268 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_7 0x431E26C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_8 0x431E270 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_9 0x431E274 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_10 0x431E278 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_11 0x431E27C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_12 0x431E280 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_13 0x431E284 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_14 0x431E288 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_15 0x431E28C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_16 0x431E290 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_17 0x431E294 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_18 0x431E298 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_19 0x431E29C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_20 0x431E2A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_21 0x431E2A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_22 0x431E2A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_23 0x431E2AC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_24 0x431E2B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_25 0x431E2B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_26 0x431E2B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_27 0x431E2BC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_28 0x431E2C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_29 0x431E2C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_30 0x431E2C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_31 0x431E2CC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_32 0x431E2D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_33 0x431E2D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_34 0x431E2D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_35 0x431E2DC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_36 0x431E2E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_37 0x431E2E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_38 0x431E2E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_39 0x431E2EC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_40 0x431E2F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_41 0x431E2F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_42 0x431E2F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_43 0x431E2FC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_44 0x431E300 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_45 0x431E304 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_46 0x431E308 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_47 0x431E30C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_48 0x431E310 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_49 0x431E314 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_50 0x431E318 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_51 0x431E31C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_52 0x431E320 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_53 0x431E324 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_54 0x431E328 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_55 0x431E32C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_56 0x431E330 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_57 0x431E334 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_58 0x431E338 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_59 0x431E33C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_60 0x431E340 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_61 0x431E344 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_62 0x431E348 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63 0x431E34C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0 0x431E350 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_1 0x431E354 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_2 0x431E358 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_3 0x431E35C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_4 0x431E360 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_5 0x431E364 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_6 0x431E368 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_7 0x431E36C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_8 0x431E370 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_9 0x431E374 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_10 0x431E378 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_11 0x431E37C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_12 0x431E380 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_13 0x431E384 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_14 0x431E388 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_15 0x431E38C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_16 0x431E390 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_17 0x431E394 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_18 0x431E398 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_19 0x431E39C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_20 0x431E3A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_21 0x431E3A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_22 0x431E3A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_23 0x431E3AC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_24 0x431E3B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_25 0x431E3B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_26 0x431E3B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_27 0x431E3BC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_28 0x431E3C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_29 0x431E3C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_30 0x431E3C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_31 0x431E3CC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_32 0x431E3D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_33 0x431E3D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_34 0x431E3D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_35 0x431E3DC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_36 0x431E3E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_37 0x431E3E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_38 0x431E3E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_39 0x431E3EC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_40 0x431E3F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_41 0x431E3F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_42 0x431E3F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_43 0x431E3FC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_44 0x431E400 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_45 0x431E404 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_46 0x431E408 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_47 0x431E40C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_48 0x431E410 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_49 0x431E414 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_50 0x431E418 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_51 0x431E41C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_52 0x431E420 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_53 0x431E424 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_54 0x431E428 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_55 0x431E42C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_56 0x431E430 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_57 0x431E434 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_58 0x431E438 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_59 0x431E43C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_60 0x431E440 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_61 0x431E444 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_62 0x431E448 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63 0x431E44C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_0 0x431E450 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_1 0x431E454 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_2 0x431E458 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_3 0x431E45C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_4 0x431E460 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_5 0x431E464 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_6 0x431E468 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_7 0x431E46C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_8 0x431E470 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_9 0x431E474 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_10 0x431E478 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_11 0x431E47C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_12 0x431E480 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_13 0x431E484 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_14 0x431E488 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_15 0x431E48C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_16 0x431E490 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_17 0x431E494 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_18 0x431E498 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_19 0x431E49C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_20 0x431E4A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_21 0x431E4A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_22 0x431E4A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_23 0x431E4AC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_24 0x431E4B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_25 0x431E4B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_26 0x431E4B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_27 0x431E4BC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_28 0x431E4C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_29 0x431E4C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_30 0x431E4C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_31 0x431E4CC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_32 0x431E4D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_33 0x431E4D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_34 0x431E4D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_35 0x431E4DC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_36 0x431E4E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_37 0x431E4E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_38 0x431E4E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_39 0x431E4EC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_40 0x431E4F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_41 0x431E4F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_42 0x431E4F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_43 0x431E4FC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_44 0x431E500 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_45 0x431E504 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_46 0x431E508 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_47 0x431E50C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_48 0x431E510 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_49 0x431E514 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_50 0x431E518 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_51 0x431E51C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_52 0x431E520 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_53 0x431E524 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_54 0x431E528 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_55 0x431E52C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_56 0x431E530 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_57 0x431E534 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_58 0x431E538 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_59 0x431E53C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_60 0x431E540 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_61 0x431E544 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_62 0x431E548 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_63 0x431E54C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0 0x431E550 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_1 0x431E554 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_2 0x431E558 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_3 0x431E55C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_4 0x431E560 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_5 0x431E564 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_6 0x431E568 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_7 0x431E56C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_8 0x431E570 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_9 0x431E574 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_10 0x431E578 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_11 0x431E57C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_12 0x431E580 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_13 0x431E584 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_14 0x431E588 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_15 0x431E58C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_16 0x431E590 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_17 0x431E594 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_18 0x431E598 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_19 0x431E59C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_20 0x431E5A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_21 0x431E5A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_22 0x431E5A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_23 0x431E5AC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_24 0x431E5B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_25 0x431E5B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_26 0x431E5B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_27 0x431E5BC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_28 0x431E5C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_29 0x431E5C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_30 0x431E5C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_31 0x431E5CC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_32 0x431E5D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_33 0x431E5D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_34 0x431E5D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_35 0x431E5DC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_36 0x431E5E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_37 0x431E5E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_38 0x431E5E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_39 0x431E5EC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_40 0x431E5F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_41 0x431E5F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_42 0x431E5F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_43 0x431E5FC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_44 0x431E600 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_45 0x431E604 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_46 0x431E608 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_47 0x431E60C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_48 0x431E610 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_49 0x431E614 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_50 0x431E618 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_51 0x431E61C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_52 0x431E620 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_53 0x431E624 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_54 0x431E628 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_55 0x431E62C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_56 0x431E630 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_57 0x431E634 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_58 0x431E638 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_59 0x431E63C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_60 0x431E640 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_61 0x431E644 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_62 0x431E648 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63 0x431E64C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0 0x431E650 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_1 0x431E654 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_2 0x431E658 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_3 0x431E65C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_4 0x431E660 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_5 0x431E664 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_6 0x431E668 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_7 0x431E66C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_8 0x431E670 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_9 0x431E674 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_10 0x431E678 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_11 0x431E67C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_12 0x431E680 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_13 0x431E684 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_14 0x431E688 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_15 0x431E68C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_16 0x431E690 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_17 0x431E694 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_18 0x431E698 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_19 0x431E69C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_20 0x431E6A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_21 0x431E6A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_22 0x431E6A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_23 0x431E6AC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_24 0x431E6B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_25 0x431E6B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_26 0x431E6B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_27 0x431E6BC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_28 0x431E6C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_29 0x431E6C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_30 0x431E6C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_31 0x431E6CC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_32 0x431E6D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_33 0x431E6D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_34 0x431E6D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_35 0x431E6DC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_36 0x431E6E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_37 0x431E6E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_38 0x431E6E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_39 0x431E6EC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_40 0x431E6F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_41 0x431E6F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_42 0x431E6F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_43 0x431E6FC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_44 0x431E700 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_45 0x431E704 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_46 0x431E708 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_47 0x431E70C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_48 0x431E710 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_49 0x431E714 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_50 0x431E718 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_51 0x431E71C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_52 0x431E720 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_53 0x431E724 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_54 0x431E728 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_55 0x431E72C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_56 0x431E730 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_57 0x431E734 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_58 0x431E738 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_59 0x431E73C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_60 0x431E740 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_61 0x431E744 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_62 0x431E748 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63 0x431E74C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0 0x431E750 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_1 0x431E754 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_2 0x431E758 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_3 0x431E75C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_4 0x431E760 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_5 0x431E764 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_6 0x431E768 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_7 0x431E76C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_8 0x431E770 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_9 0x431E774 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_10 0x431E778 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_11 0x431E77C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_12 0x431E780 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_13 0x431E784 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_14 0x431E788 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_15 0x431E78C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_16 0x431E790 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_17 0x431E794 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_18 0x431E798 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_19 0x431E79C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_20 0x431E7A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_21 0x431E7A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_22 0x431E7A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_23 0x431E7AC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_24 0x431E7B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_25 0x431E7B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_26 0x431E7B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_27 0x431E7BC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_28 0x431E7C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_29 0x431E7C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_30 0x431E7C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_31 0x431E7CC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_32 0x431E7D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_33 0x431E7D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_34 0x431E7D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_35 0x431E7DC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_36 0x431E7E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_37 0x431E7E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_38 0x431E7E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_39 0x431E7EC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_40 0x431E7F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_41 0x431E7F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_42 0x431E7F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_43 0x431E7FC + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_44 0x431E800 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_45 0x431E804 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_46 0x431E808 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_47 0x431E80C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_48 0x431E810 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_49 0x431E814 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_50 0x431E818 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_51 0x431E81C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_52 0x431E820 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_53 0x431E824 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_54 0x431E828 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_55 0x431E82C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_56 0x431E830 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_57 0x431E834 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_58 0x431E838 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_59 0x431E83C + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_60 0x431E840 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_61 0x431E844 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_62 0x431E848 + +#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63 0x431E84C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0 0x431E850 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_1 0x431E854 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_2 0x431E858 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_3 0x431E85C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_4 0x431E860 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_5 0x431E864 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_6 0x431E868 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_7 0x431E86C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_8 0x431E870 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_9 0x431E874 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_10 0x431E878 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_11 0x431E87C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_12 0x431E880 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_13 0x431E884 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_14 0x431E888 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_15 0x431E88C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_16 0x431E890 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_17 0x431E894 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_18 0x431E898 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_19 0x431E89C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_20 0x431E8A0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_21 0x431E8A4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_22 0x431E8A8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_23 0x431E8AC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_24 0x431E8B0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_25 0x431E8B4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_26 0x431E8B8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_27 0x431E8BC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_28 0x431E8C0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_29 0x431E8C4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_30 0x431E8C8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_31 0x431E8CC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_32 0x431E8D0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_33 0x431E8D4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_34 0x431E8D8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_35 0x431E8DC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_36 0x431E8E0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_37 0x431E8E4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_38 0x431E8E8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_39 0x431E8EC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_40 0x431E8F0 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_41 0x431E8F4 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_42 0x431E8F8 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_43 0x431E8FC + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_44 0x431E900 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_45 0x431E904 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_46 0x431E908 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_47 0x431E90C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_48 0x431E910 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_49 0x431E914 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_50 0x431E918 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_51 0x431E91C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_52 0x431E920 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_53 0x431E924 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_54 0x431E928 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_55 0x431E92C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_56 0x431E930 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_57 0x431E934 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_58 0x431E938 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_59 0x431E93C + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_60 0x431E940 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_61 0x431E944 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_62 0x431E948 + +#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63 0x431E94C + +#endif /* ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h index 6aa1b14124629e..0bf3092bfeea32 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h @@ -31,6 +31,7 @@ #include "dcore0_sync_mngr_objs_regs.h" #include "dcore0_sync_mngr_glbl_regs.h" #include "dcore0_sync_mngr_mstr_if_axuser_regs.h" +#include "dcore1_sync_mngr_glbl_regs.h" #include "pdma0_qm_arc_aux_regs.h" #include "pdma0_core_ctx_regs.h" #include "pdma0_core_regs.h" diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h index cc5842ec6ceb54..2ee79d8e62d09f 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h @@ -48,8 +48,7 @@ #define mmPCIE_DBI_PCI_CAP_PTR_REG 0x4C02034 -#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG \ -0x4C0203C +#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG 0x4C0203C #define mmPCIE_DBI_CAP_ID_NXT_PTR_REG 0x4C02040 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h index d29837883216a0..7a96aebf08b33c 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h @@ -116,8 +116,7 @@ #define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7 #define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80 #define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8 -#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \ -0x100 +#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK 0x100 #define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9 #define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h index 0276506ea52399..b4f32632cd362d 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h @@ -228,8 +228,7 @@ /* PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION */ #define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0 -#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \ -0x1 +#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK 0x1 #define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1 #define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2 #define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h index 9be3d656da3a8d..85a81e2cb54639 100644 --- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h +++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h @@ -1306,11 +1306,9 @@ #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_SHIFT 12 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_MASK 0x3F000 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_SHIFT 18 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK \ -0xFC0000 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK 0xFC0000 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_SHIFT 24 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK \ -0x3F000000 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK 0x3F000000 /* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1 */ #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC1_SHIFT 0 @@ -1322,24 +1320,17 @@ #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_SHIFT 13 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_MASK 0x2000 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_SHIFT 14 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK \ -0x4000 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT \ -16 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK \ -0xFF0000 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK 0x4000 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT 16 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK 0xFF0000 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_SHIFT 24 #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_MASK 0x7000000 /* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2 */ -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT \ -0 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK \ -0xFFFF -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT \ -16 -#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK \ -0xFFFF0000 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT 0 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK 0xFFFF +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT 16 +#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK 0xFFFF0000 /* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3 */ #define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP0_SHIFT 0 diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h rename to drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2.h diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h index 305b576222e6f3..50852cc803739c 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h +++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h @@ -958,6 +958,7 @@ enum gaudi2_async_event_id { GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318, GAUDI2_EVENT_ARC_DCCM_FULL = 1319, GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320, + GAUDI2_EVENT_DEV_RESET_REQ = 1321, GAUDI2_EVENT_SIZE, }; diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h similarity index 99% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h index d510cb10c88313..82be01bea98e54 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h +++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h @@ -2665,6 +2665,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = { .msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" }, { .fc_id = 1320, .cpu_id = 626, .valid = 1, .msg = 1, .reset = 1, .name = "FP32_NOT_SUPPORTED" }, + { .fc_id = 1321, .cpu_id = 627, .valid = 1, + .msg = 1, .reset = 1, .name = "DEV_RESET_REQ" }, }; #endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h similarity index 100% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h similarity index 82% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h index e4a7d572509616..82f3ca2a3966e2 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h +++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h @@ -20,22 +20,25 @@ #define GAUDI2_NUM_MME 4 +#define NUM_OF_GPIOS_PER_PORT 16 +#define GAUDI2_WD_GPIO (62 % NUM_OF_GPIOS_PER_PORT) + #define GAUDI2_ARCPID_TX_MB_SIZE 0x1000 #define GAUDI2_ARCPID_RX_MB_SIZE 0x400 #define GAUDI2_ARM_TX_MB_SIZE 0x400 #define GAUDI2_ARM_RX_MB_SIZE 0x1800 #define GAUDI2_DCCM_BASE_ADDR 0x27020000 -#define GAUDI2_ARCPID_TX_MB_ADDR GAUDI2_DCCM_BASE_ADDR - -#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + \ - GAUDI2_ARCPID_TX_MB_SIZE) #define GAUDI2_ARM_TX_MB_ADDR GAUDI2_MAILBOX_BASE_ADDR #define GAUDI2_ARM_RX_MB_ADDR (GAUDI2_ARM_TX_MB_ADDR + \ GAUDI2_ARM_TX_MB_SIZE) +#define GAUDI2_ARCPID_TX_MB_ADDR (GAUDI2_ARM_RX_MB_ADDR + GAUDI2_ARM_RX_MB_SIZE) + +#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + GAUDI2_ARCPID_TX_MB_SIZE) + #define GAUDI2_ARM_TX_MB_OFFSET (GAUDI2_ARM_TX_MB_ADDR - \ GAUDI2_SP_SRAM_BASE_ADDR) @@ -58,7 +61,9 @@ struct gaudi2_cold_rst_data { u32 spsram_init_done : 1; u32 fake_security_enable : 1; u32 fake_sig_validation_en : 1; - u32 reserved : 26; + u32 bist_skip_enable : 1; + u32 bist_need_iatu_config : 1; + u32 reserved : 24; }; __le32 data; }; @@ -77,10 +82,10 @@ enum gaudi2_rst_src { }; struct gaudi2_redundancy_ctx { - int redundant_hbm; - int redundant_edma; - int redundant_tpc; - int redundant_vdec; + __le32 redundant_hbm; + __le32 redundant_edma; + __le32 redundant_tpc; + __le32 redundant_vdec; __le64 hbm_mask; __le64 edma_mask; __le64 tpc_mask; diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h similarity index 98% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h index 8bf90fc18bf527..a812f8503f90d3 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h +++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h @@ -59,7 +59,7 @@ struct gaudi2_packet { /* The rest of the packet data follows. Use the corresponding * packet_XXX struct to deference the data, based on packet type */ - u8 contents[0]; + u8 contents[]; }; struct packet_nop { @@ -80,7 +80,7 @@ struct packet_wreg32 { struct packet_wreg_bulk { __le32 size64; __le32 ctl; - __le64 values[0]; /* data starts here */ + __le64 values[]; /* data starts here */ }; struct packet_msg_long { diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h similarity index 83% rename from drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h rename to drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h index ae7feb388f638b..f3eaeb6d9b7ecb 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h +++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h @@ -24,14 +24,14 @@ #define mmGIC_HOST_HALT_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 #define mmGIC_HOST_INTS_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 #define mmGIC_HOST_SOFT_RST_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 -#define mmEEPROM_COPY_LOCATION_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 #define mmCPU_RST_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 -#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 -#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 /* - * TODO: mmGIC_RAZWI_STATUS_REG is temporary - * macro and to be removed after GAUDI2 PO + * Single scratchpad register used for all ARCs to notify dccm queue full event to FW. + * So a new event would overwrite any unhandled previous event. In other words, incase + * of multiple events before previous ones are handled, last one would be considered. */ +#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 +#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 #define mmGIC_RAZWI_STATUS_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 #define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 #define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 @@ -40,11 +40,10 @@ #define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 #define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 #define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 -#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 +#define mmPPBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 #define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 #define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 #define mmRST_SRC mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0 -#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1 #define mmCOLD_RST_DATA mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2 #define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3 #define mmPID_CMD_REQ_REG mmPSOC_PID_PID_CMD_0 @@ -55,5 +54,8 @@ #define mmPID_CMD_TELEMETRY_REG_0_HI mmPSOC_PID_PID_CMD_5 #define mmPID_CMD_TELEMETRY_REG_1 mmPSOC_PID_PID_CMD_6 #define mmPID_CMD_TELEMETRY_REG_1_HI mmPSOC_PID_PID_CMD_7 +#define mmWD_GPIO_OUTSET_REG mmPSOC_GPIO3_OUTENSET +#define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT +#define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER #endif /* GAUDI2_REG_MAP_H_ */ diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h new file mode 100644 index 00000000000000..a55668f92dd133 --- /dev/null +++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2022 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +/* + * This file was generated automatically. + * DON'T EDIT THIS FILE. + */ + +#ifndef GAUDI2_SPECIAL_BLOCKS_H +#define GAUDI2_SPECIAL_BLOCKS_H + +#define GAUDI2_SPECIAL_BLOCKS { \ + { GAUDI2_BLOCK_TYPE_TPC, 0xfc008000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_TPC, 0xfc00a000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_TPC, 0xfc00b000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_TPC, 0xfc00c000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HMMU, 0xfc080000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HMMU, 0xfc081000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HMMU, 0xfc083000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HMMU, 0xfc084000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0c8000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0c9000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0ca000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0cb000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0cc000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_EU_BIST, 0xfc0cd000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0ce000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0cf000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0d0000, 4, 5, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0d1000, 4, 5, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0f8000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_MME, 0xfc0f9000, 4, 2, 0, 0x200000, 0x1000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_SYNC_MNGR, 0xfc11e000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_SYNC_MNGR, 0xfc11f000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HIF, 0xfc120000, 4, 4, 0, 0x200000, 0x4000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc140000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc141000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc142000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc143000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc144000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc145000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_SRAM, 0xfc180000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfc181000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_SRAM, 0xfc182000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1c8000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1ca000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1cb000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1cc000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e3000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e4000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e5000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc01000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc07000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc10000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc14000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc15000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc16000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4a000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4b000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4e000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4f000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc53000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc54000, 2, 0, 0, 0x1000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc58000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc59000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc5a000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc5b000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc60000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc61000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc62000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc63000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc64000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6c000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6d000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6e000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc74000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc76000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc79000, 2, 0, 0, 0x1000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc7b000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc7f000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc88000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8a000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8b000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8c000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_CPU, 0xfccc0000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_CPU, 0xfccc1000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_CPU, 0xfccc3000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd00000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd01000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd02000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd03000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd05000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_XBAR, 0xfcd40000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd41000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd42000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd43000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd44000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_XBAR, 0xfcd48000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd55000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd64000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd65000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcd74000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ROT, 0xfce08000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ROT, 0xfce0a000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ROT, 0xfce0b000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ROT, 0xfce0c000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce40000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce41000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce42000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce43000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce48000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce49000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce4a000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce4b000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_RTR, 0xfce4c000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce81000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce82000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce88000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce89000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8b000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8c000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8f000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_DEC, 0xfcf03000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_DEC, 0xfcf04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_DEC, 0xfcf05000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_XFT, 0xfcf40000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcf41000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcf42000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcf43000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcf53000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_PLL, 0xfcf73000, 1, 0, 0, 0x0, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HBM, 0xfd000000, 6, 2, 0, 0x80000, 0x20000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_HBM, 0xfd001000, 6, 2, 8, 0x80000, 0x20000, 0x1000 }, \ + { GAUDI2_BLOCK_TYPE_HBM, 0xfd009000, 6, 2, 0, 0x80000, 0x20000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd400000, 12, 2, 15, 0x80000, 0x20000, 0x1000 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd418000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd41a000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd41f000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd448000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd449000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd44a000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd44c000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd450000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd452000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd454000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd455000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd460000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd468000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ + { GAUDI2_BLOCK_TYPE_NIC, 0xfd469000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \ +} + +#endif diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h rename to drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/accel/habanalabs/include/goya/goya.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/goya.h rename to drivers/accel/habanalabs/include/goya/goya.h diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/accel/habanalabs/include/goya/goya_async_events.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/goya_async_events.h rename to drivers/accel/habanalabs/include/goya/goya_async_events.h diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/accel/habanalabs/include/goya/goya_coresight.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/goya_coresight.h rename to drivers/accel/habanalabs/include/goya/goya_coresight.h diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/accel/habanalabs/include/goya/goya_fw_if.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/goya_fw_if.h rename to drivers/accel/habanalabs/include/goya/goya_fw_if.h diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/accel/habanalabs/include/goya/goya_packets.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/goya_packets.h rename to drivers/accel/habanalabs/include/goya/goya_packets.h diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/accel/habanalabs/include/goya/goya_reg_map.h similarity index 100% rename from drivers/misc/habanalabs/include/goya/goya_reg_map.h rename to drivers/accel/habanalabs/include/goya/goya_reg_map.h diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h similarity index 100% rename from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h rename to drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h similarity index 100% rename from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h rename to drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h similarity index 100% rename from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h rename to drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h similarity index 100% rename from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h rename to drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h similarity index 100% rename from drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h rename to drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig new file mode 100644 index 00000000000000..9bdf168bf1d0e7 --- /dev/null +++ b/drivers/accel/ivpu/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_ACCEL_IVPU + tristate "Intel VPU for Meteor Lake and newer" + depends on DRM_ACCEL + depends on X86_64 && !UML + depends on PCI && PCI_MSI + select FW_LOADER + select SHMEM + help + Choose this option if you have a system that has an 14th generation Intel CPU + or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated + inference accelerator for Computer Vision and Deep Learning applications. + + If "M" is selected, the module will be called intel_vpu. diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile new file mode 100644 index 00000000000000..80f1fb3548ae82 --- /dev/null +++ b/drivers/accel/ivpu/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2023 Intel Corporation + +intel_vpu-y := \ + ivpu_drv.o \ + ivpu_fw.o \ + ivpu_gem.o \ + ivpu_hw_mtl.o \ + ivpu_ipc.o \ + ivpu_job.o \ + ivpu_jsm_msg.o \ + ivpu_mmu.o \ + ivpu_mmu_context.o \ + ivpu_pm.o + +obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o \ No newline at end of file diff --git a/drivers/accel/ivpu/TODO b/drivers/accel/ivpu/TODO new file mode 100644 index 00000000000000..9077217ae10f24 --- /dev/null +++ b/drivers/accel/ivpu/TODO @@ -0,0 +1,11 @@ +- Move to threaded_irqs to mitigate potential infinite loop in ivpu_ipc_irq_handler() +- Implement support for BLOB IDs +- Add debugfs support to improve debugging and testing +- Add tracing events for performance debugging +- Implement HW based scheduling support +- Use syncobjs for submit/sync +- Refactor IPC protocol to improve message latency +- Implement BO cache and MADVISE IOCTL +- Add support for user allocated buffers using prime import and dma-buf heaps +- Refactor struct ivpu_bo to use struct drm_gem_shmem_object +- Add driver/device documentation diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c new file mode 100644 index 00000000000000..231f29bb50257e --- /dev/null +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "vpu_boot_api.h" +#include "ivpu_drv.h" +#include "ivpu_fw.h" +#include "ivpu_gem.h" +#include "ivpu_hw.h" +#include "ivpu_ipc.h" +#include "ivpu_job.h" +#include "ivpu_jsm_msg.h" +#include "ivpu_mmu.h" +#include "ivpu_mmu_context.h" +#include "ivpu_pm.h" + +#ifndef DRIVER_VERSION_STR +#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ + __stringify(DRM_IVPU_DRIVER_MINOR) "." +#endif + +static const struct drm_driver driver; + +static struct lock_class_key submitted_jobs_xa_lock_class_key; + +int ivpu_dbg_mask; +module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); +MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); + +int ivpu_test_mode; +module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); +MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw"); + +u8 ivpu_pll_min_ratio; +module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); +MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency"); + +u8 ivpu_pll_max_ratio = U8_MAX; +module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); +MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency"); + +struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) +{ + struct ivpu_device *vdev = file_priv->vdev; + + kref_get(&file_priv->ref); + + ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n", + file_priv->ctx.id, kref_read(&file_priv->ref)); + + return file_priv; +} + +struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id) +{ + struct ivpu_file_priv *file_priv; + + xa_lock_irq(&vdev->context_xa); + file_priv = xa_load(&vdev->context_xa, id); + /* file_priv may still be in context_xa during file_priv_release() */ + if (file_priv && !kref_get_unless_zero(&file_priv->ref)) + file_priv = NULL; + xa_unlock_irq(&vdev->context_xa); + + if (file_priv) + ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n", + file_priv->ctx.id, kref_read(&file_priv->ref)); + + return file_priv; +} + +static void file_priv_release(struct kref *ref) +{ + struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); + struct ivpu_device *vdev = file_priv->vdev; + + ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id); + + ivpu_cmdq_release_all(file_priv); + ivpu_bo_remove_all_bos_from_context(&file_priv->ctx); + ivpu_jsm_context_release(vdev, file_priv->ctx.id); + ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); + drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv); + mutex_destroy(&file_priv->lock); + kfree(file_priv); +} + +void ivpu_file_priv_put(struct ivpu_file_priv **link) +{ + struct ivpu_file_priv *file_priv = *link; + struct ivpu_device *vdev = file_priv->vdev; + + drm_WARN_ON(&vdev->drm, !file_priv); + + ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n", + file_priv->ctx.id, kref_read(&file_priv->ref)); + + *link = NULL; + kref_put(&file_priv->ref, file_priv_release); +} + +static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); + struct drm_ivpu_param *args = data; + int ret = 0; + + switch (args->param) { + case DRM_IVPU_PARAM_DEVICE_ID: + args->value = pdev->device; + break; + case DRM_IVPU_PARAM_DEVICE_REVISION: + args->value = pdev->revision; + break; + case DRM_IVPU_PARAM_PLATFORM_TYPE: + args->value = vdev->platform; + break; + case DRM_IVPU_PARAM_CORE_CLOCK_RATE: + args->value = ivpu_hw_reg_pll_freq_get(vdev); + break; + case DRM_IVPU_PARAM_NUM_CONTEXTS: + args->value = ivpu_get_context_count(vdev); + break; + case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: + args->value = vdev->hw->ranges.user_low.start; + break; + case DRM_IVPU_PARAM_CONTEXT_PRIORITY: + args->value = file_priv->priority; + break; + case DRM_IVPU_PARAM_CONTEXT_ID: + args->value = file_priv->ctx.id; + break; + case DRM_IVPU_PARAM_FW_API_VERSION: + if (args->index < VPU_FW_API_VER_NUM) { + struct vpu_firmware_header *fw_hdr; + + fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data; + args->value = fw_hdr->api_version[args->index]; + } else { + ret = -EINVAL; + } + break; + case DRM_IVPU_PARAM_ENGINE_HEARTBEAT: + ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value); + break; + case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: + args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter); + break; + case DRM_IVPU_PARAM_TILE_CONFIG: + args->value = vdev->hw->tile_fuse; + break; + case DRM_IVPU_PARAM_SKU: + args->value = vdev->hw->sku; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct drm_ivpu_param *args = data; + int ret = 0; + + switch (args->param) { + case DRM_IVPU_PARAM_CONTEXT_PRIORITY: + if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME) + file_priv->priority = args->value; + else + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int ivpu_open(struct drm_device *dev, struct drm_file *file) +{ + struct ivpu_device *vdev = to_ivpu_device(dev); + struct ivpu_file_priv *file_priv; + u32 ctx_id; + void *old; + int ret; + + ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL); + if (ret) { + ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); + return ret; + } + + file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) { + ret = -ENOMEM; + goto err_xa_erase; + } + + file_priv->vdev = vdev; + file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL; + kref_init(&file_priv->ref); + mutex_init(&file_priv->lock); + + ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); + if (ret) + goto err_mutex_destroy; + + old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL); + if (xa_is_err(old)) { + ret = xa_err(old); + ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret); + goto err_ctx_fini; + } + + ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", + ctx_id, current->comm, task_pid_nr(current)); + + file->driver_priv = file_priv; + return 0; + +err_ctx_fini: + ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); +err_mutex_destroy: + mutex_destroy(&file_priv->lock); + kfree(file_priv); +err_xa_erase: + xa_erase_irq(&vdev->context_xa, ctx_id); + return ret; +} + +static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = to_ivpu_device(dev); + + ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", + file_priv->ctx.id, current->comm, task_pid_nr(current)); + + ivpu_file_priv_put(&file_priv); +} + +static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), +}; + +static int ivpu_wait_for_ready(struct ivpu_device *vdev) +{ + struct ivpu_ipc_consumer cons; + struct ivpu_ipc_hdr ipc_hdr; + unsigned long timeout; + int ret; + + if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST) + return 0; + + ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG); + + timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); + while (1) { + ret = ivpu_ipc_irq_handler(vdev); + if (ret) + break; + ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); + if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) + break; + + cond_resched(); + } + + ivpu_ipc_consumer_del(vdev, &cons); + + if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) { + ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n", + ipc_hdr.data_addr); + return -EIO; + } + + if (!ret) + ivpu_info(vdev, "VPU ready message received successfully\n"); + else + ivpu_hw_diagnose_failure(vdev); + + return ret; +} + +/** + * ivpu_boot() - Start VPU firmware + * @vdev: VPU device + * + * This function is paired with ivpu_shutdown() but it doesn't power up the + * VPU because power up has to be called very early in ivpu_probe(). + */ +int ivpu_boot(struct ivpu_device *vdev) +{ + int ret; + + /* Update boot params located at first 4KB of FW memory */ + ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr); + + ret = ivpu_hw_boot_fw(vdev); + if (ret) { + ivpu_err(vdev, "Failed to start the firmware: %d\n", ret); + return ret; + } + + ret = ivpu_wait_for_ready(vdev); + if (ret) { + ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); + return ret; + } + + ivpu_hw_irq_clear(vdev); + enable_irq(vdev->irq); + ivpu_hw_irq_enable(vdev); + ivpu_ipc_enable(vdev); + return 0; +} + +int ivpu_shutdown(struct ivpu_device *vdev) +{ + int ret; + + ivpu_hw_irq_disable(vdev); + disable_irq(vdev->irq); + ivpu_ipc_disable(vdev); + ivpu_mmu_disable(vdev); + + ret = ivpu_hw_power_down(vdev); + if (ret) + ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); + + return ret; +} + +static const struct file_operations ivpu_fops = { + .owner = THIS_MODULE, + DRM_ACCEL_FOPS, +}; + +static const struct drm_driver driver = { + .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, + + .open = ivpu_open, + .postclose = ivpu_postclose, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = ivpu_gem_prime_import, + .gem_prime_mmap = drm_gem_prime_mmap, + + .ioctls = ivpu_drm_ioctls, + .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), + .fops = &ivpu_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRM_IVPU_DRIVER_MAJOR, + .minor = DRM_IVPU_DRIVER_MINOR, +}; + +static int ivpu_irq_init(struct ivpu_device *vdev) +{ + struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret); + return ret; + } + + vdev->irq = pci_irq_vector(pdev, 0); + + ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler, + IRQF_NO_AUTOEN, DRIVER_NAME, vdev); + if (ret) + ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); + + return ret; +} + +static int ivpu_pci_init(struct ivpu_device *vdev) +{ + struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); + struct resource *bar0 = &pdev->resource[0]; + struct resource *bar4 = &pdev->resource[4]; + int ret; + + ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0); + vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0); + if (IS_ERR(vdev->regv)) { + ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv); + return PTR_ERR(vdev->regv); + } + + ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4); + vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4); + if (IS_ERR(vdev->regb)) { + ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb); + return PTR_ERR(vdev->regb); + } + + ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38)); + if (ret) { + ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret); + return ret; + } + dma_set_max_seg_size(vdev->drm.dev, UINT_MAX); + + /* Clear any pending errors */ + pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); + + ret = pcim_enable_device(pdev); + if (ret) { + ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int ivpu_dev_init(struct ivpu_device *vdev) +{ + int ret; + + vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL); + if (!vdev->hw) + return -ENOMEM; + + vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL); + if (!vdev->mmu) + return -ENOMEM; + + vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL); + if (!vdev->fw) + return -ENOMEM; + + vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL); + if (!vdev->ipc) + return -ENOMEM; + + vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL); + if (!vdev->pm) + return -ENOMEM; + + vdev->hw->ops = &ivpu_hw_mtl_ops; + vdev->platform = IVPU_PLATFORM_INVALID; + vdev->context_xa_limit.min = IVPU_GLOBAL_CONTEXT_MMU_SSID + 1; + vdev->context_xa_limit.max = IVPU_CONTEXT_LIMIT; + atomic64_set(&vdev->unique_id_counter, 0); + xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC); + xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); + lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); + + ret = ivpu_pci_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret); + goto err_xa_destroy; + } + + ret = ivpu_irq_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret); + goto err_xa_destroy; + } + + /* Init basic HW info based on buttress registers which are accessible before power up */ + ret = ivpu_hw_info_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret); + goto err_xa_destroy; + } + + /* Power up early so the rest of init code can access VPU registers */ + ret = ivpu_hw_power_up(vdev); + if (ret) { + ivpu_err(vdev, "Failed to power up HW: %d\n", ret); + goto err_xa_destroy; + } + + ret = ivpu_mmu_global_context_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret); + goto err_power_down; + } + + ret = ivpu_mmu_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret); + goto err_mmu_gctx_fini; + } + + ret = ivpu_fw_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret); + goto err_mmu_gctx_fini; + } + + ret = ivpu_ipc_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret); + goto err_fw_fini; + } + + ret = ivpu_pm_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize PM: %d\n", ret); + goto err_ipc_fini; + } + + ret = ivpu_job_done_thread_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret); + goto err_ipc_fini; + } + + ret = ivpu_fw_load(vdev); + if (ret) { + ivpu_err(vdev, "Failed to load firmware: %d\n", ret); + goto err_job_done_thread_fini; + } + + ret = ivpu_boot(vdev); + if (ret) { + ivpu_err(vdev, "Failed to boot: %d\n", ret); + goto err_job_done_thread_fini; + } + + ivpu_pm_enable(vdev); + + return 0; + +err_job_done_thread_fini: + ivpu_job_done_thread_fini(vdev); +err_ipc_fini: + ivpu_ipc_fini(vdev); +err_fw_fini: + ivpu_fw_fini(vdev); +err_mmu_gctx_fini: + ivpu_mmu_global_context_fini(vdev); +err_power_down: + ivpu_hw_power_down(vdev); +err_xa_destroy: + xa_destroy(&vdev->submitted_jobs_xa); + xa_destroy(&vdev->context_xa); + return ret; +} + +static void ivpu_dev_fini(struct ivpu_device *vdev) +{ + ivpu_pm_disable(vdev); + ivpu_shutdown(vdev); + ivpu_job_done_thread_fini(vdev); + ivpu_ipc_fini(vdev); + ivpu_fw_fini(vdev); + ivpu_mmu_global_context_fini(vdev); + + drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); + xa_destroy(&vdev->submitted_jobs_xa); + drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa)); + xa_destroy(&vdev->context_xa); +} + +static struct pci_device_id ivpu_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, + { } +}; +MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); + +static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ivpu_device *vdev; + int ret; + + vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); + + pci_set_drvdata(pdev, vdev); + + ret = ivpu_dev_init(vdev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize VPU device: %d\n", ret); + return ret; + } + + ret = drm_dev_register(&vdev->drm, 0); + if (ret) { + dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret); + ivpu_dev_fini(vdev); + } + + return ret; +} + +static void ivpu_remove(struct pci_dev *pdev) +{ + struct ivpu_device *vdev = pci_get_drvdata(pdev); + + drm_dev_unregister(&vdev->drm); + ivpu_dev_fini(vdev); +} + +static const struct dev_pm_ops ivpu_drv_pci_pm = { + SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb) + SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL) +}; + +static const struct pci_error_handlers ivpu_drv_pci_err = { + .reset_prepare = ivpu_pm_reset_prepare_cb, + .reset_done = ivpu_pm_reset_done_cb, +}; + +static struct pci_driver ivpu_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ivpu_pci_ids, + .probe = ivpu_probe, + .remove = ivpu_remove, + .driver = { + .pm = &ivpu_drv_pci_pm, + }, + .err_handler = &ivpu_drv_pci_err, +}; + +module_pci_driver(ivpu_pci_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); +MODULE_VERSION(DRIVER_VERSION_STR); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h new file mode 100644 index 00000000000000..f47b4965db2e33 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_DRV_H__ +#define __IVPU_DRV_H__ + +#include +#include +#include +#include + +#include +#include +#include + +#include "ivpu_mmu_context.h" + +#define DRIVER_NAME "intel_vpu" +#define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)" +#define DRIVER_DATE "20230117" + +#define PCI_DEVICE_ID_MTL 0x7d1d + +#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 +#define IVPU_CONTEXT_LIMIT 64 +#define IVPU_NUM_ENGINES 2 + +#define IVPU_PLATFORM_SILICON 0 +#define IVPU_PLATFORM_SIMICS 2 +#define IVPU_PLATFORM_FPGA 3 +#define IVPU_PLATFORM_INVALID 8 + +#define IVPU_DBG_REG BIT(0) +#define IVPU_DBG_IRQ BIT(1) +#define IVPU_DBG_MMU BIT(2) +#define IVPU_DBG_FILE BIT(3) +#define IVPU_DBG_MISC BIT(4) +#define IVPU_DBG_FW_BOOT BIT(5) +#define IVPU_DBG_PM BIT(6) +#define IVPU_DBG_IPC BIT(7) +#define IVPU_DBG_BO BIT(8) +#define IVPU_DBG_JOB BIT(9) +#define IVPU_DBG_JSM BIT(10) +#define IVPU_DBG_KREF BIT(11) +#define IVPU_DBG_RPM BIT(12) + +#define ivpu_err(vdev, fmt, ...) \ + drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) + +#define ivpu_err_ratelimited(vdev, fmt, ...) \ + drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) + +#define ivpu_warn(vdev, fmt, ...) \ + drm_warn(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) + +#define ivpu_warn_ratelimited(vdev, fmt, ...) \ + drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) + +#define ivpu_info(vdev, fmt, ...) drm_info(&(vdev)->drm, fmt, ##__VA_ARGS__) + +#define ivpu_dbg(vdev, type, fmt, args...) do { \ + if (unlikely(IVPU_DBG_##type & ivpu_dbg_mask)) \ + dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args); \ +} while (0) + +#define IVPU_WA(wa_name) (vdev->wa.wa_name) + +struct ivpu_wa_table { + bool punit_disabled; + bool clear_runtime_mem; +}; + +struct ivpu_hw_info; +struct ivpu_mmu_info; +struct ivpu_fw_info; +struct ivpu_ipc_info; +struct ivpu_pm_info; + +struct ivpu_device { + struct drm_device drm; + void __iomem *regb; + void __iomem *regv; + u32 platform; + u32 irq; + + struct ivpu_wa_table wa; + struct ivpu_hw_info *hw; + struct ivpu_mmu_info *mmu; + struct ivpu_fw_info *fw; + struct ivpu_ipc_info *ipc; + struct ivpu_pm_info *pm; + + struct ivpu_mmu_context gctx; + struct xarray context_xa; + struct xa_limit context_xa_limit; + + struct xarray submitted_jobs_xa; + struct task_struct *job_done_thread; + + atomic64_t unique_id_counter; + + struct { + int boot; + int jsm; + int tdr; + int reschedule_suspend; + } timeout; +}; + +/* + * file_priv has its own refcount (ref) that allows user space to close the fd + * without blocking even if VPU is still processing some jobs. + */ +struct ivpu_file_priv { + struct kref ref; + struct ivpu_device *vdev; + struct mutex lock; /* Protects cmdq */ + struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES]; + struct ivpu_mmu_context ctx; + u32 priority; + bool has_mmu_faults; +}; + +extern int ivpu_dbg_mask; +extern u8 ivpu_pll_min_ratio; +extern u8 ivpu_pll_max_ratio; + +#define IVPU_TEST_MODE_DISABLED 0 +#define IVPU_TEST_MODE_FW_TEST 1 +#define IVPU_TEST_MODE_NULL_HW 2 +extern int ivpu_test_mode; + +struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); +struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id); +void ivpu_file_priv_put(struct ivpu_file_priv **link); + +int ivpu_boot(struct ivpu_device *vdev); +int ivpu_shutdown(struct ivpu_device *vdev); + +static inline bool ivpu_is_mtl(struct ivpu_device *vdev) +{ + return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL; +} + +static inline u8 ivpu_revision(struct ivpu_device *vdev) +{ + return to_pci_dev(vdev->drm.dev)->revision; +} + +static inline u16 ivpu_device_id(struct ivpu_device *vdev) +{ + return to_pci_dev(vdev->drm.dev)->device; +} + +static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev) +{ + return container_of(dev, struct ivpu_device, drm); +} + +static inline u32 ivpu_get_context_count(struct ivpu_device *vdev) +{ + struct xa_limit ctx_limit = vdev->context_xa_limit; + + return (ctx_limit.max - ctx_limit.min + 1); +} + +static inline u32 ivpu_get_platform(struct ivpu_device *vdev) +{ + WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID); + return vdev->platform; +} + +static inline bool ivpu_is_silicon(struct ivpu_device *vdev) +{ + return ivpu_get_platform(vdev) == IVPU_PLATFORM_SILICON; +} + +static inline bool ivpu_is_simics(struct ivpu_device *vdev) +{ + return ivpu_get_platform(vdev) == IVPU_PLATFORM_SIMICS; +} + +static inline bool ivpu_is_fpga(struct ivpu_device *vdev) +{ + return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA; +} + +#endif /* __IVPU_DRV_H__ */ diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c new file mode 100644 index 00000000000000..f58951a0d81b10 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include +#include + +#include "vpu_boot_api.h" +#include "ivpu_drv.h" +#include "ivpu_fw.h" +#include "ivpu_gem.h" +#include "ivpu_hw.h" +#include "ivpu_ipc.h" +#include "ivpu_pm.h" + +#define FW_GLOBAL_MEM_START (2ull * SZ_1G) +#define FW_GLOBAL_MEM_END (3ull * SZ_1G) +#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */ +#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */ +#define FW_RUNTIME_MAX_SIZE SZ_512M +#define FW_SHAVE_NN_MAX_SIZE SZ_2M +#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) +#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) +#define FW_VERSION_HEADER_SIZE SZ_4K +#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) + +#define WATCHDOG_MSS_REDIRECT 32 +#define WATCHDOG_NCE_REDIRECT 33 + +#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31) + +#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \ + ivpu_fw_check_api(vdev, fw_hdr, #name, \ + VPU_##name##_API_VER_INDEX, \ + VPU_##name##_API_VER_MAJOR, \ + VPU_##name##_API_VER_MINOR, min_major) + +static char *ivpu_firmware; +module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644); +MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/.."); + +static int ivpu_fw_request(struct ivpu_device *vdev) +{ + static const char * const fw_names[] = { + "mtl_vpu.bin", + "intel/vpu/mtl_vpu_v0.0.bin" + }; + int ret = -ENOENT; + int i; + + if (ivpu_firmware) + return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev); + + for (i = 0; i < ARRAY_SIZE(fw_names); i++) { + ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev); + if (!ret) + return 0; + } + + ivpu_err(vdev, "Failed to request firmware: %d\n", ret); + return ret; +} + +static int +ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr, + const char *str, int index, u16 expected_major, u16 expected_minor, + u16 min_major) +{ + u16 major = (u16)(fw_hdr->api_version[index] >> 16); + u16 minor = (u16)(fw_hdr->api_version[index]); + + if (major < min_major) { + ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n", + str, major, minor, min_major); + return -EINVAL; + } + if (major != expected_major) { + ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n", + str, major, minor, expected_major, expected_minor); + } + ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n", + str, major, minor, expected_major, expected_minor); + + return 0; +} + +static int ivpu_fw_parse(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data; + u64 runtime_addr, image_load_addr, runtime_size, image_size; + + if (fw->file->size <= FW_FILE_IMAGE_OFFSET) { + ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size); + return -EINVAL; + } + + if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) { + ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version); + return -EINVAL; + } + + runtime_addr = fw_hdr->boot_params_load_address; + runtime_size = fw_hdr->runtime_size; + image_load_addr = fw_hdr->image_load_address; + image_size = fw_hdr->image_size; + + if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) { + ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr); + return -EINVAL; + } + + if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) { + ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size); + return -EINVAL; + } + + if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) { + ivpu_err(vdev, "Invalid image size: %llu\n", image_size); + return -EINVAL; + } + + if (image_load_addr < runtime_addr || + image_load_addr + image_size > runtime_addr + runtime_size) { + ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n", + image_load_addr, image_size); + return -EINVAL; + } + + if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) { + ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size); + return -EINVAL; + } + + if (fw_hdr->entry_point < image_load_addr || + fw_hdr->entry_point >= image_load_addr + image_size) { + ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); + return -EINVAL; + } + ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", + fw_hdr->header_version, fw_hdr->image_format); + ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE); + + if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3)) + return -EINVAL; + if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3)) + return -EINVAL; + + fw->runtime_addr = runtime_addr; + fw->runtime_size = runtime_size; + fw->image_load_offset = image_load_addr - runtime_addr; + fw->image_size = image_size; + fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size); + + fw->cold_boot_entry_point = fw_hdr->entry_point; + fw->entry_point = fw->cold_boot_entry_point; + + ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", + fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); + ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", + fw->runtime_addr, image_load_addr, fw->entry_point); + + return 0; +} + +static void ivpu_fw_release(struct ivpu_device *vdev) +{ + release_firmware(vdev->fw->file); +} + +static int ivpu_fw_update_global_range(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT); + u64 size = FW_SHARED_MEM_SIZE; + + if (start + size > FW_GLOBAL_MEM_END) { + ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size); + return -EINVAL; + } + + ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size); + return 0; +} + +static int ivpu_fw_mem_init(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + int ret; + + ret = ivpu_fw_update_global_range(vdev); + if (ret) + return ret; + + fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC); + if (!fw->mem) { + ivpu_err(vdev, "Failed to allocate firmware runtime memory\n"); + return -ENOMEM; + } + + if (fw->shave_nn_size) { + fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start, + fw->shave_nn_size, DRM_IVPU_BO_UNCACHED); + if (!fw->mem_shave_nn) { + ivpu_err(vdev, "Failed to allocate shavenn buffer\n"); + ivpu_bo_free_internal(fw->mem); + return -ENOMEM; + } + } + + return 0; +} + +static void ivpu_fw_mem_fini(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + + if (fw->mem_shave_nn) { + ivpu_bo_free_internal(fw->mem_shave_nn); + fw->mem_shave_nn = NULL; + } + + ivpu_bo_free_internal(fw->mem); + fw->mem = NULL; +} + +int ivpu_fw_init(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_fw_request(vdev); + if (ret) + return ret; + + ret = ivpu_fw_parse(vdev); + if (ret) + goto err_fw_release; + + ret = ivpu_fw_mem_init(vdev); + if (ret) + goto err_fw_release; + + return 0; + +err_fw_release: + ivpu_fw_release(vdev); + return ret; +} + +void ivpu_fw_fini(struct ivpu_device *vdev) +{ + ivpu_fw_mem_fini(vdev); + ivpu_fw_release(vdev); +} + +int ivpu_fw_load(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + u64 image_end_offset = fw->image_load_offset + fw->image_size; + + memset(fw->mem->kvaddr, 0, fw->image_load_offset); + memcpy(fw->mem->kvaddr + fw->image_load_offset, + fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size); + + if (IVPU_WA(clear_runtime_mem)) { + u8 *start = fw->mem->kvaddr + image_end_offset; + u64 size = fw->mem->base.size - image_end_offset; + + memset(start, 0, size); + } + + wmb(); /* Flush WC buffers after writing fw->mem */ + + return 0; +} + +static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) +{ + ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n", + boot_params->magic); + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n", + boot_params->vpu_id); + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n", + boot_params->vpu_count); + ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n", + boot_params->frequency); + ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n", + boot_params->perf_clk_frequency); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n", + boot_params->ipc_header_area_start); + ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n", + boot_params->ipc_header_area_size); + ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n", + boot_params->shared_region_base); + ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n", + boot_params->shared_region_size); + ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n", + boot_params->ipc_payload_area_start); + ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n", + boot_params->ipc_payload_area_size); + ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n", + boot_params->global_aliased_pio_base); + ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n", + boot_params->global_aliased_pio_size); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n", + boot_params->autoconfig); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n", + boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use); + ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n", + boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n", + boot_params->global_memory_allocator_base); + ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n", + boot_params->global_memory_allocator_size); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n", + boot_params->shave_nn_fw_base); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n", + boot_params->watchdog_irq_mss); + ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n", + boot_params->watchdog_irq_nce); + ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n", + boot_params->host_to_vpu_irq); + ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n", + boot_params->job_done_irq); + + ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n", + boot_params->host_version_id); + ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n", + boot_params->si_stepping); + ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n", + boot_params->device_id); + ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n", + boot_params->feature_exclusion); + ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n", + boot_params->sku); + ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n", + boot_params->min_freq_pll_ratio); + ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n", + boot_params->pn_freq_pll_ratio); + ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n", + boot_params->max_freq_pll_ratio); + ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n", + boot_params->default_trace_level); + ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n", + boot_params->tracing_buff_message_format_mask); + ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n", + boot_params->trace_destination_mask); + ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n", + boot_params->trace_hw_component_mask); + ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n", + boot_params->boot_type); + ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n", + boot_params->punit_telemetry_sram_base); + ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n", + boot_params->punit_telemetry_sram_size); + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n", + boot_params->vpu_telemetry_enable); +} + +void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) +{ + struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx; + + /* In case of warm boot we only have to reset the entrypoint addr */ + if (!ivpu_fw_is_cold_boot(vdev)) { + boot_params->save_restore_ret_address = 0; + vdev->pm->is_warmboot = true; + return; + } + + vdev->pm->is_warmboot = false; + + boot_params->magic = VPU_BOOT_PARAMS_MAGIC; + boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number; + boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev); + + /* + * Uncached region of VPU address space, covers IPC buffers, job queues + * and log buffers, programmable to L2$ Uncached by VPU MTRR + */ + boot_params->shared_region_base = vdev->hw->ranges.global_low.start; + boot_params->shared_region_size = vdev->hw->ranges.global_low.end - + vdev->hw->ranges.global_low.start; + + boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr; + boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2; + + boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2; + boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2; + + boot_params->global_aliased_pio_base = + vdev->hw->ranges.global_aliased_pio.start; + boot_params->global_aliased_pio_size = + ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio); + + /* Allow configuration for L2C_PAGE_TABLE with boot param value */ + boot_params->autoconfig = 1; + + /* Enable L2 cache for first 2GB of high memory */ + boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1; + boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = + ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start); + + if (vdev->fw->mem_shave_nn) + boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr; + + boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT; + boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT; + boot_params->si_stepping = ivpu_revision(vdev); + boot_params->device_id = ivpu_device_id(vdev); + boot_params->feature_exclusion = vdev->hw->tile_fuse; + boot_params->sku = vdev->hw->sku; + + boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio; + boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio; + boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio; + + boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev); + boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); + boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); + + wmb(); /* Flush WC buffers after writing bootparams */ + + ivpu_fw_boot_params_print(vdev, boot_params); +} diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h new file mode 100644 index 00000000000000..8d275c802d1c8e --- /dev/null +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_FW_H__ +#define __IVPU_FW_H__ + +struct ivpu_device; +struct ivpu_bo; +struct vpu_boot_params; + +struct ivpu_fw_info { + const struct firmware *file; + struct ivpu_bo *mem; + struct ivpu_bo *mem_shave_nn; + struct ivpu_bo *mem_log_crit; + struct ivpu_bo *mem_log_verb; + u64 runtime_addr; + u32 runtime_size; + u64 image_load_offset; + u32 image_size; + u32 shave_nn_size; + u64 entry_point; /* Cold or warm boot entry point for next boot */ + u64 cold_boot_entry_point; +}; + +int ivpu_fw_init(struct ivpu_device *vdev); +void ivpu_fw_fini(struct ivpu_device *vdev); +int ivpu_fw_load(struct ivpu_device *vdev); +void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp); + +static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) +{ + return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point; +} + +#endif /* __IVPU_FW_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c new file mode 100644 index 00000000000000..01d47d3bad5bbb --- /dev/null +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -0,0 +1,749 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_gem.h" +#include "ivpu_hw.h" +#include "ivpu_mmu.h" +#include "ivpu_mmu_context.h" + +MODULE_IMPORT_NS(DMA_BUF); + +static const struct drm_gem_object_funcs ivpu_gem_funcs; + +static struct lock_class_key prime_bo_lock_class_key; + +static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) +{ + /* Pages are managed by the underlying dma-buf */ + return 0; +} + +static void prime_free_pages_locked(struct ivpu_bo *bo) +{ + /* Pages are managed by the underlying dma-buf */ +} + +static int prime_map_pages_locked(struct ivpu_bo *bo) +{ + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + struct sg_table *sgt; + + sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt)); + return PTR_ERR(sgt); + } + + bo->sgt = sgt; + return 0; +} + +static void prime_unmap_pages_locked(struct ivpu_bo *bo) +{ + dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); + bo->sgt = NULL; +} + +static const struct ivpu_bo_ops prime_ops = { + .type = IVPU_BO_TYPE_PRIME, + .name = "prime", + .alloc_pages = prime_alloc_pages_locked, + .free_pages = prime_free_pages_locked, + .map_pages = prime_map_pages_locked, + .unmap_pages = prime_unmap_pages_locked, +}; + +static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) +{ + int npages = bo->base.size >> PAGE_SHIFT; + struct page **pages; + + pages = drm_gem_get_pages(&bo->base); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + if (bo->flags & DRM_IVPU_BO_WC) + set_pages_array_wc(pages, npages); + else if (bo->flags & DRM_IVPU_BO_UNCACHED) + set_pages_array_uc(pages, npages); + + bo->pages = pages; + return 0; +} + +static void shmem_free_pages_locked(struct ivpu_bo *bo) +{ + if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) + set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); + + drm_gem_put_pages(&bo->base, bo->pages, true, false); + bo->pages = NULL; +} + +static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) +{ + int npages = bo->base.size >> PAGE_SHIFT; + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + struct sg_table *sgt; + int ret; + + sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); + if (IS_ERR(sgt)) { + ivpu_err(vdev, "Failed to allocate sgtable\n"); + return PTR_ERR(sgt); + } + + ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) { + ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); + goto err_free_sgt; + } + + bo->sgt = sgt; + return 0; + +err_free_sgt: + kfree(sgt); + return ret; +} + +static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) +{ + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + + dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); + sg_free_table(bo->sgt); + kfree(bo->sgt); + bo->sgt = NULL; +} + +static const struct ivpu_bo_ops shmem_ops = { + .type = IVPU_BO_TYPE_SHMEM, + .name = "shmem", + .alloc_pages = shmem_alloc_pages_locked, + .free_pages = shmem_free_pages_locked, + .map_pages = ivpu_bo_map_pages_locked, + .unmap_pages = ivpu_bo_unmap_pages_locked, +}; + +static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) +{ + unsigned int i, npages = bo->base.size >> PAGE_SHIFT; + struct page **pages; + int ret; + + pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < npages; i++) { + pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (!pages[i]) { + ret = -ENOMEM; + goto err_free_pages; + } + cond_resched(); + } + + bo->pages = pages; + return 0; + +err_free_pages: + while (i--) + put_page(pages[i]); + kvfree(pages); + return ret; +} + +static void internal_free_pages_locked(struct ivpu_bo *bo) +{ + unsigned int i, npages = bo->base.size >> PAGE_SHIFT; + + for (i = 0; i < npages; i++) + put_page(bo->pages[i]); + + kvfree(bo->pages); + bo->pages = NULL; +} + +static const struct ivpu_bo_ops internal_ops = { + .type = IVPU_BO_TYPE_INTERNAL, + .name = "internal", + .alloc_pages = internal_alloc_pages_locked, + .free_pages = internal_free_pages_locked, + .map_pages = ivpu_bo_map_pages_locked, + .unmap_pages = ivpu_bo_unmap_pages_locked, +}; + +static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) +{ + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + int ret; + + lockdep_assert_held(&bo->lock); + drm_WARN_ON(&vdev->drm, bo->sgt); + + ret = bo->ops->alloc_pages(bo); + if (ret) { + ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret); + return ret; + } + + ret = bo->ops->map_pages(bo); + if (ret) { + ivpu_err(vdev, "Failed to map pages for BO: %d", ret); + goto err_free_pages; + } + return ret; + +err_free_pages: + bo->ops->free_pages(bo); + return ret; +} + +static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) +{ + mutex_lock(&bo->lock); + + WARN_ON(!bo->sgt); + bo->ops->unmap_pages(bo); + WARN_ON(bo->sgt); + bo->ops->free_pages(bo); + WARN_ON(bo->pages); + + mutex_unlock(&bo->lock); +} + +/* + * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. + * + * This function pins physical memory pages, then maps the physical pages + * to IOMMU address space and finally updates the VPU MMU page tables + * to allow the VPU to translate VPU address to IOMMU address. + */ +int __must_check ivpu_bo_pin(struct ivpu_bo *bo) +{ + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + int ret = 0; + + mutex_lock(&bo->lock); + + if (!bo->vpu_addr) { + ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n", + bo->ctx->id, bo->handle); + ret = -EINVAL; + goto unlock; + } + + if (!bo->sgt) { + ret = ivpu_bo_alloc_and_map_pages_locked(bo); + if (ret) + goto unlock; + } + + if (!bo->mmu_mapped) { + ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, + ivpu_bo_is_snooped(bo)); + if (ret) { + ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); + goto unlock; + } + bo->mmu_mapped = true; + } + +unlock: + mutex_unlock(&bo->lock); + + return ret; +} + +static int +ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, + const struct ivpu_addr_range *range) +{ + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + int ret; + + if (!range) { + if (bo->flags & DRM_IVPU_BO_HIGH_MEM) + range = &vdev->hw->ranges.user_high; + else + range = &vdev->hw->ranges.user_low; + } + + mutex_lock(&ctx->lock); + ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node); + if (!ret) { + bo->ctx = ctx; + bo->vpu_addr = bo->mm_node.start; + list_add_tail(&bo->ctx_node, &ctx->bo_list); + } + mutex_unlock(&ctx->lock); + + return ret; +} + +static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) +{ + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + struct ivpu_mmu_context *ctx = bo->ctx; + + ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", + ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); + + mutex_lock(&bo->lock); + + if (bo->mmu_mapped) { + drm_WARN_ON(&vdev->drm, !bo->sgt); + ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); + bo->mmu_mapped = false; + } + + mutex_lock(&ctx->lock); + list_del(&bo->ctx_node); + bo->vpu_addr = 0; + bo->ctx = NULL; + ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); + mutex_unlock(&ctx->lock); + + mutex_unlock(&bo->lock); +} + +void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) +{ + struct ivpu_bo *bo, *tmp; + + list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) + ivpu_bo_free_vpu_addr(bo); +} + +static struct ivpu_bo * +ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, + u64 size, u32 flags, const struct ivpu_bo_ops *ops, + const struct ivpu_addr_range *range, u64 user_ptr) +{ + struct ivpu_bo *bo; + int ret = 0; + + if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) + return ERR_PTR(-EINVAL); + + switch (flags & DRM_IVPU_BO_CACHE_MASK) { + case DRM_IVPU_BO_CACHED: + case DRM_IVPU_BO_UNCACHED: + case DRM_IVPU_BO_WC: + break; + default: + return ERR_PTR(-EINVAL); + } + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + + mutex_init(&bo->lock); + bo->base.funcs = &ivpu_gem_funcs; + bo->flags = flags; + bo->ops = ops; + bo->user_ptr = user_ptr; + + if (ops->type == IVPU_BO_TYPE_SHMEM) + ret = drm_gem_object_init(&vdev->drm, &bo->base, size); + else + drm_gem_private_object_init(&vdev->drm, &bo->base, size); + + if (ret) { + ivpu_err(vdev, "Failed to initialize drm object\n"); + goto err_free; + } + + if (flags & DRM_IVPU_BO_MAPPABLE) { + ret = drm_gem_create_mmap_offset(&bo->base); + if (ret) { + ivpu_err(vdev, "Failed to allocate mmap offset\n"); + goto err_release; + } + } + + if (mmu_context) { + ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range); + if (ret) { + ivpu_err(vdev, "Failed to add BO to context: %d\n", ret); + goto err_release; + } + } + + return bo; + +err_release: + drm_gem_object_release(&bo->base); +err_free: + kfree(bo); + return ERR_PTR(ret); +} + +static void ivpu_bo_free(struct drm_gem_object *obj) +{ + struct ivpu_bo *bo = to_ivpu_bo(obj); + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + + if (bo->ctx) + ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", + bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); + else + ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", + (bool)bo->sgt, bo->mmu_mapped); + + drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); + + vunmap(bo->kvaddr); + + if (bo->ctx) + ivpu_bo_free_vpu_addr(bo); + + if (bo->sgt) + ivpu_bo_unmap_and_free_pages(bo); + + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sgt); + + drm_gem_object_release(&bo->base); + + mutex_destroy(&bo->lock); + kfree(bo); +} + +static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct ivpu_bo *bo = to_ivpu_bo(obj); + struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + + ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", + bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name); + + if (obj->import_attach) { + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + drm_gem_object_put(obj); + vma->vm_private_data = NULL; + return dma_buf_mmap(obj->dma_buf, vma, 0); + } + + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND; + vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); + + return 0; +} + +static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) +{ + struct ivpu_bo *bo = to_ivpu_bo(obj); + loff_t npages = obj->size >> PAGE_SHIFT; + int ret = 0; + + mutex_lock(&bo->lock); + + if (!bo->sgt) + ret = ivpu_bo_alloc_and_map_pages_locked(bo); + + mutex_unlock(&bo->lock); + + if (ret) + return ERR_PTR(ret); + + return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); +} + +static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct ivpu_bo *bo = to_ivpu_bo(obj); + loff_t npages = obj->size >> PAGE_SHIFT; + pgoff_t page_offset; + struct page *page; + vm_fault_t ret; + int err; + + mutex_lock(&bo->lock); + + if (!bo->sgt) { + err = ivpu_bo_alloc_and_map_pages_locked(bo); + if (err) { + ret = vmf_error(err); + goto unlock; + } + } + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + if (page_offset >= npages) { + ret = VM_FAULT_SIGBUS; + } else { + page = bo->pages[page_offset]; + ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); + } + +unlock: + mutex_unlock(&bo->lock); + + return ret; +} + +static const struct vm_operations_struct ivpu_vm_ops = { + .fault = ivpu_vm_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs ivpu_gem_funcs = { + .free = ivpu_bo_free, + .mmap = ivpu_bo_mmap, + .vm_ops = &ivpu_vm_ops, + .get_sg_table = ivpu_bo_get_sg_table, +}; + +int +ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct drm_ivpu_bo_create *args = data; + u64 size = PAGE_ALIGN(args->size); + struct ivpu_bo *bo; + int ret; + + if (args->flags & ~DRM_IVPU_BO_FLAGS) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); + if (IS_ERR(bo)) { + ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", + bo, file_priv->ctx.id, args->size, args->flags); + return PTR_ERR(bo); + } + + ret = drm_gem_handle_create(file, &bo->base, &bo->handle); + if (!ret) { + args->vpu_addr = bo->vpu_addr; + args->handle = bo->handle; + } + + drm_gem_object_put(&bo->base); + + ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", + file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags); + + return ret; +} + +struct ivpu_bo * +ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags) +{ + const struct ivpu_addr_range *range; + struct ivpu_addr_range fixed_range; + struct ivpu_bo *bo; + pgprot_t prot; + int ret; + + drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); + drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); + + if (vpu_addr) { + fixed_range.start = vpu_addr; + fixed_range.end = vpu_addr + size; + range = &fixed_range; + } else { + range = &vdev->hw->ranges.global_low; + } + + bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); + if (IS_ERR(bo)) { + ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", + bo, vpu_addr, size, flags); + return NULL; + } + + ret = ivpu_bo_pin(bo); + if (ret) + goto err_put; + + if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) + drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); + + prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); + bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); + if (!bo->kvaddr) { + ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n"); + goto err_put; + } + + ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", + bo->vpu_addr, bo->base.size, flags); + + return bo; + +err_put: + drm_gem_object_put(&bo->base); + return NULL; +} + +void ivpu_bo_free_internal(struct ivpu_bo *bo) +{ + drm_gem_object_put(&bo->base); +} + +struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) +{ + struct ivpu_device *vdev = to_ivpu_device(dev); + struct dma_buf_attachment *attach; + struct ivpu_bo *bo; + + attach = dma_buf_attach(buf, dev->dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + get_dma_buf(buf); + + bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); + if (IS_ERR(bo)) { + ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); + goto err_detach; + } + + lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); + + bo->base.import_attach = attach; + + return &bo->base; + +err_detach: + dma_buf_detach(buf, attach); + dma_buf_put(buf); + return ERR_CAST(bo); +} + +int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = to_ivpu_device(dev); + struct drm_ivpu_bo_info *args = data; + struct drm_gem_object *obj; + struct ivpu_bo *bo; + int ret = 0; + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + bo = to_ivpu_bo(obj); + + mutex_lock(&bo->lock); + + if (!bo->ctx) { + ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); + if (ret) { + ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret); + goto unlock; + } + } + + args->flags = bo->flags; + args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); + args->vpu_addr = bo->vpu_addr; + args->size = obj->size; +unlock: + mutex_unlock(&bo->lock); + drm_gem_object_put(obj); + return ret; +} + +int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ivpu_bo_wait *args = data; + struct drm_gem_object *obj; + unsigned long timeout; + long ret; + + timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) + return -EINVAL; + + ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); + if (ret == 0) { + ret = -ETIMEDOUT; + } else if (ret > 0) { + ret = 0; + args->job_status = to_ivpu_bo(obj)->job_status; + } + + drm_gem_object_put(obj); + + return ret; +} + +static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) +{ + unsigned long dma_refcount = 0; + + if (bo->base.dma_buf && bo->base.dma_buf->file) + dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); + + drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", + bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, + kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); +} + +void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) +{ + struct ivpu_device *vdev = to_ivpu_device(dev); + struct ivpu_file_priv *file_priv; + unsigned long ctx_id; + struct ivpu_bo *bo; + + drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", + "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); + + mutex_lock(&vdev->gctx.lock); + list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) + ivpu_bo_print_info(bo, p); + mutex_unlock(&vdev->gctx.lock); + + xa_for_each(&vdev->context_xa, ctx_id, file_priv) { + file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); + if (!file_priv) + continue; + + mutex_lock(&file_priv->ctx.lock); + list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) + ivpu_bo_print_info(bo, p); + mutex_unlock(&file_priv->ctx.lock); + + ivpu_file_priv_put(&file_priv); + } +} + +void ivpu_bo_list_print(struct drm_device *dev) +{ + struct drm_printer p = drm_info_printer(dev->dev); + + ivpu_bo_list(dev, &p); +} diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h new file mode 100644 index 00000000000000..6b0ceda5f25374 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ +#ifndef __IVPU_GEM_H__ +#define __IVPU_GEM_H__ + +#include +#include + +struct dma_buf; +struct ivpu_bo_ops; +struct ivpu_file_priv; + +struct ivpu_bo { + struct drm_gem_object base; + const struct ivpu_bo_ops *ops; + + struct ivpu_mmu_context *ctx; + struct list_head ctx_node; + struct drm_mm_node mm_node; + + struct mutex lock; /* Protects: pages, sgt, mmu_mapped */ + struct sg_table *sgt; + struct page **pages; + bool mmu_mapped; + + void *kvaddr; + u64 vpu_addr; + u32 handle; + u32 flags; + uintptr_t user_ptr; + u32 job_status; +}; + +enum ivpu_bo_type { + IVPU_BO_TYPE_SHMEM = 1, + IVPU_BO_TYPE_INTERNAL, + IVPU_BO_TYPE_PRIME, +}; + +struct ivpu_bo_ops { + enum ivpu_bo_type type; + const char *name; + int (*alloc_pages)(struct ivpu_bo *bo); + void (*free_pages)(struct ivpu_bo *bo); + int (*map_pages)(struct ivpu_bo *bo); + void (*unmap_pages)(struct ivpu_bo *bo); +}; + +int ivpu_bo_pin(struct ivpu_bo *bo); +void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx); +void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); +void ivpu_bo_list_print(struct drm_device *dev); + +struct ivpu_bo * +ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags); +void ivpu_bo_free_internal(struct ivpu_bo *bo); +struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); +void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo); + +int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + +static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj) +{ + return container_of(obj, struct ivpu_bo, base); +} + +static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset) +{ + if (offset > bo->base.size || !bo->pages) + return NULL; + + return bo->pages[offset / PAGE_SIZE]; +} + +static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_CACHE_MASK; +} + +static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) +{ + return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; +} + +static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot) +{ + if (bo->flags & DRM_IVPU_BO_WC) + return pgprot_writecombine(prot); + + if (bo->flags & DRM_IVPU_BO_UNCACHED) + return pgprot_noncached(prot); + + return prot; +} + +static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo) +{ + return to_ivpu_device(bo->base.dev); +} + +static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) +{ + if (vpu_addr < bo->vpu_addr) + return NULL; + + if (vpu_addr >= (bo->vpu_addr + bo->base.size)) + return NULL; + + return bo->kvaddr + (vpu_addr - bo->vpu_addr); +} + +static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) +{ + if (cpu_addr < bo->kvaddr) + return 0; + + if (cpu_addr >= (bo->kvaddr + bo->base.size)) + return 0; + + return bo->vpu_addr + (cpu_addr - bo->kvaddr); +} + +#endif /* __IVPU_GEM_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h new file mode 100644 index 00000000000000..50a9304ab09cf2 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_HW_H__ +#define __IVPU_HW_H__ + +#include "ivpu_drv.h" + +struct ivpu_hw_ops { + int (*info_init)(struct ivpu_device *vdev); + int (*power_up)(struct ivpu_device *vdev); + int (*boot_fw)(struct ivpu_device *vdev); + int (*power_down)(struct ivpu_device *vdev); + bool (*is_idle)(struct ivpu_device *vdev); + void (*wdt_disable)(struct ivpu_device *vdev); + void (*diagnose_failure)(struct ivpu_device *vdev); + u32 (*reg_pll_freq_get)(struct ivpu_device *vdev); + u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev); + u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev); + u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev); + void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id); + u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev); + u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev); + void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr); + void (*irq_clear)(struct ivpu_device *vdev); + void (*irq_enable)(struct ivpu_device *vdev); + void (*irq_disable)(struct ivpu_device *vdev); + irqreturn_t (*irq_handler)(int irq, void *ptr); +}; + +struct ivpu_addr_range { + resource_size_t start; + resource_size_t end; +}; + +struct ivpu_hw_info { + const struct ivpu_hw_ops *ops; + struct { + struct ivpu_addr_range global_low; + struct ivpu_addr_range global_high; + struct ivpu_addr_range user_low; + struct ivpu_addr_range user_high; + struct ivpu_addr_range global_aliased_pio; + } ranges; + struct { + u8 min_ratio; + u8 max_ratio; + /* + * Pll ratio for the efficiency frequency. The VPU has optimum + * performance to power ratio at this frequency. + */ + u8 pn_ratio; + u32 profiling_freq; + } pll; + u32 tile_fuse; + u32 sku; + u16 config; +}; + +extern const struct ivpu_hw_ops ivpu_hw_mtl_ops; + +static inline int ivpu_hw_info_init(struct ivpu_device *vdev) +{ + return vdev->hw->ops->info_init(vdev); +}; + +static inline int ivpu_hw_power_up(struct ivpu_device *vdev) +{ + ivpu_dbg(vdev, PM, "HW power up\n"); + + return vdev->hw->ops->power_up(vdev); +}; + +static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev) +{ + return vdev->hw->ops->boot_fw(vdev); +}; + +static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev) +{ + return vdev->hw->ops->is_idle(vdev); +}; + +static inline int ivpu_hw_power_down(struct ivpu_device *vdev) +{ + ivpu_dbg(vdev, PM, "HW power down\n"); + + return vdev->hw->ops->power_down(vdev); +}; + +static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev) +{ + vdev->hw->ops->wdt_disable(vdev); +}; + +/* Register indirect accesses */ +static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->reg_pll_freq_get(vdev); +}; + +static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->reg_telemetry_offset_get(vdev); +}; + +static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->reg_telemetry_size_get(vdev); +}; + +static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->reg_telemetry_enable_get(vdev); +}; + +static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id) +{ + vdev->hw->ops->reg_db_set(vdev, db_id); +}; + +static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->reg_ipc_rx_addr_get(vdev); +}; + +static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->reg_ipc_rx_count_get(vdev); +}; + +static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) +{ + vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr); +}; + +static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev) +{ + vdev->hw->ops->irq_clear(vdev); +}; + +static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev) +{ + vdev->hw->ops->irq_enable(vdev); +}; + +static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev) +{ + vdev->hw->ops->irq_disable(vdev); +}; + +static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size) +{ + range->start = start; + range->end = start + size; +} + +static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range) +{ + return range->end - range->start; +} + +static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev) +{ + vdev->hw->ops->diagnose_failure(vdev); +} + +#endif /* __IVPU_HW_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c new file mode 100644 index 00000000000000..62bfaa9081c4f8 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw_mtl.c @@ -0,0 +1,1084 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include "ivpu_drv.h" +#include "ivpu_fw.h" +#include "ivpu_hw_mtl_reg.h" +#include "ivpu_hw_reg_io.h" +#include "ivpu_hw.h" +#include "ivpu_ipc.h" +#include "ivpu_mmu.h" +#include "ivpu_pm.h" + +#define TILE_FUSE_ENABLE_BOTH 0x0 +#define TILE_FUSE_ENABLE_UPPER 0x1 +#define TILE_FUSE_ENABLE_LOWER 0x2 + +#define TILE_SKU_BOTH_MTL 0x3630 +#define TILE_SKU_LOWER_MTL 0x3631 +#define TILE_SKU_UPPER_MTL 0x3632 + +/* Work point configuration values */ +#define WP_CONFIG_1_TILE_5_3_RATIO 0x0101 +#define WP_CONFIG_1_TILE_4_3_RATIO 0x0102 +#define WP_CONFIG_2_TILE_5_3_RATIO 0x0201 +#define WP_CONFIG_2_TILE_4_3_RATIO 0x0202 +#define WP_CONFIG_0_TILE_PLL_OFF 0x0000 + +#define PLL_REF_CLK_FREQ (50 * 1000000) +#define PLL_SIMULATION_FREQ (10 * 1000000) +#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) +#define PLL_DEFAULT_EPP_VALUE 0x80 + +#define TIM_SAFE_ENABLE 0xf1d0dead +#define TIM_WATCHDOG_RESET_VALUE 0xffffffff + +#define TIMEOUT_US (150 * USEC_PER_MSEC) +#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) +#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) +#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC) + +#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) + +#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ + (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) + +#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) + +#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ + (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ + (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR))) + +#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) +#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) + +#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ + (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ + (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ + (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ + (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ + (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ + (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) + +static char *ivpu_platform_to_str(u32 platform) +{ + switch (platform) { + case IVPU_PLATFORM_SILICON: + return "IVPU_PLATFORM_SILICON"; + case IVPU_PLATFORM_SIMICS: + return "IVPU_PLATFORM_SIMICS"; + case IVPU_PLATFORM_FPGA: + return "IVPU_PLATFORM_FPGA"; + default: + return "Invalid platform"; + } +} + +static void ivpu_hw_read_platform(struct ivpu_device *vdev) +{ + u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL); + u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl); + + if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA) + vdev->platform = platform; + else + vdev->platform = IVPU_PLATFORM_SILICON; + + ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", + ivpu_platform_to_str(vdev->platform), vdev->platform); +} + +static void ivpu_hw_wa_init(struct ivpu_device *vdev) +{ + vdev->wa.punit_disabled = ivpu_is_fpga(vdev); + vdev->wa.clear_runtime_mem = false; +} + +static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) +{ + if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) { + vdev->timeout.boot = 100000; + vdev->timeout.jsm = 50000; + vdev->timeout.tdr = 2000000; + vdev->timeout.reschedule_suspend = 1000; + } else { + vdev->timeout.boot = 1000; + vdev->timeout.jsm = 500; + vdev->timeout.tdr = 2000; + vdev->timeout.reschedule_suspend = 10; + } +} + +static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) +{ + return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); +} + +/* Send KMD initiated workpoint change */ +static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, + u16 target_ratio, u16 config) +{ + int ret; + u32 val; + + ret = ivpu_pll_wait_for_cmd_send(vdev); + if (ret) { + ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); + return ret; + } + + val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0); + val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); + val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); + REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val); + + val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1); + val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); + val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val); + REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val); + + val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2); + val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); + REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val); + + val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD); + val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val); + REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val); + + ret = ivpu_pll_wait_for_cmd_send(vdev); + if (ret) + ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); + + return ret; +} + +static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable) +{ + u32 exp_val = enable ? 0x1 : 0x0; + + if (IVPU_WA(punit_disabled)) + return 0; + + return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); +} + +static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) +{ + if (IVPU_WA(punit_disabled)) + return 0; + + return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); +} + +static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) +{ + struct ivpu_hw_info *hw = vdev->hw; + u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio; + u32 fmin_fuse, fmax_fuse; + + fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE); + fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); + fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); + + fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE); + fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); + + hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); + hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); + hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); +} + +static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) +{ + struct ivpu_hw_info *hw = vdev->hw; + u16 target_ratio; + u16 config; + int ret; + + if (IVPU_WA(punit_disabled)) { + ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n", + ivpu_platform_to_str(vdev->platform)); + return 0; + } + + if (enable) { + target_ratio = hw->pll.pn_ratio; + config = hw->config; + } else { + target_ratio = 0; + config = 0; + } + + ivpu_dbg(vdev, PM, "PLL workpoint request: %d Hz\n", PLL_RATIO_TO_FREQ(target_ratio)); + + ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config); + if (ret) { + ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); + return ret; + } + + ret = ivpu_pll_wait_for_lock(vdev, enable); + if (ret) { + ivpu_err(vdev, "Timed out waiting for PLL lock\n"); + return ret; + } + + if (enable) { + ret = ivpu_pll_wait_for_status_ready(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); + return ret; + } + } + + return 0; +} + +static int ivpu_pll_enable(struct ivpu_device *vdev) +{ + return ivpu_pll_drive(vdev, true); +} + +static int ivpu_pll_disable(struct ivpu_device *vdev) +{ + return ivpu_pll_drive(vdev, false); +} + +static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR); + + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val); + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val); + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val); + + REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val); +} + +static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET); + + if (enable) { + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val); + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val); + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val); + } else { + val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val); + val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val); + val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val); + } + + REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val); +} + +static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET); + + if (enable) { + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val); + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val); + val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val); + } else { + val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val); + val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val); + val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val); + } + + REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val); +} + +static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN); + + if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN); + + if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY); + + if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN); + + if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN); + + if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY); + + if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev) +{ + ivpu_boot_host_ss_rst_clr_assert(vdev); + + return ivpu_boot_noc_qreqn_check(vdev, 0x0); +} + +static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev) +{ + REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0); +} + +static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN); + if (enable) + val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); + else + val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); + REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val); + + ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); + if (ret) { + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); + if (ret) + ivpu_err(vdev, "Failed qdeny check: %d\n", ret); + + return ret; +} + +static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) +{ + return ivpu_boot_host_ss_axi_drive(vdev, true); +} + +static int ivpu_boot_host_ss_axi_disable(struct ivpu_device *vdev) +{ + return ivpu_boot_host_ss_axi_drive(vdev, false); +} + +static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN); + if (enable) { + val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val); + val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); + } else { + val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val); + val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); + } + REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val); + + ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); + if (ret) { + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); + if (ret) + ivpu_err(vdev, "Failed qdeny check: %d\n", ret); + + return ret; +} + +static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) +{ + return ivpu_boot_host_ss_top_noc_drive(vdev, true); +} + +static int ivpu_boot_host_ss_top_noc_disable(struct ivpu_device *vdev) +{ + return ivpu_boot_host_ss_top_noc_drive(vdev, false); +} + +static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); + + if (enable) + val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); + else + val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); + + REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); +} + +static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0); + + if (enable) + val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); + else + val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); + + REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val); +} + +static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) +{ + /* FPGA model (UPF) is not power aware, skipped Power Island polling */ + if (ivpu_is_fpga(vdev)) + return 0; + + return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, + exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); +} + +static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0); + + if (enable) + val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); + else + val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); + + REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val); +} + +static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE); + + if (enable) + val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); + else + val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); + + REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val); +} + +static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev) +{ + ivpu_boot_dpu_active_drive(vdev, false); + ivpu_boot_pwr_island_isolation_drive(vdev, true); + ivpu_boot_pwr_island_trickle_drive(vdev, false); + ivpu_boot_pwr_island_drive(vdev, false); + + return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0); +} + +static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) +{ + int ret; + + ivpu_boot_pwr_island_trickle_drive(vdev, true); + ivpu_boot_pwr_island_drive(vdev, true); + + ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); + if (ret) { + ivpu_err(vdev, "Timed out waiting for power island status\n"); + return ret; + } + + ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); + if (ret) { + ivpu_err(vdev, "Failed qrenqn check %d\n", ret); + return ret; + } + + ivpu_boot_host_ss_clk_drive(vdev, true); + ivpu_boot_pwr_island_isolation_drive(vdev, false); + ivpu_boot_host_ss_rst_drive(vdev, true); + ivpu_boot_dpu_active_drive(vdev, true); + + return ret; +} + +static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES); + + val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); + + REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val); +} + +static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) +{ + u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV); + + if (ivpu_is_fpga(vdev)) { + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); + } else { + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val); + val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val); + } + + REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val); +} + +static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) +{ + u32 val; + + val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); + val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); + + val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); + REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); + + val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); + REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); + + val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); + REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); + + val = vdev->fw->entry_point >> 9; + REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val); + + val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val); + REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val); + + ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", + vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume"); +} + +static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); + return ret; + } + + val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL); + if (enable) + val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val); + else + val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val); + REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val); + + ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); + if (ret) + ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); + + return ret; +} + +static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev) +{ + struct ivpu_hw_info *hw = vdev->hw; + u32 tile_fuse; + + tile_fuse = REGB_RD32(MTL_BUTTRESS_TILE_FUSE); + if (!REG_TEST_FLD(MTL_BUTTRESS_TILE_FUSE, VALID, tile_fuse)) + ivpu_warn(vdev, "Tile Fuse: Invalid (0x%x)\n", tile_fuse); + + hw->tile_fuse = REG_GET_FLD(MTL_BUTTRESS_TILE_FUSE, SKU, tile_fuse); + switch (hw->tile_fuse) { + case TILE_FUSE_ENABLE_LOWER: + hw->sku = TILE_SKU_LOWER_MTL; + hw->config = WP_CONFIG_1_TILE_5_3_RATIO; + ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Lower\n"); + break; + case TILE_FUSE_ENABLE_UPPER: + hw->sku = TILE_SKU_UPPER_MTL; + hw->config = WP_CONFIG_1_TILE_4_3_RATIO; + ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Upper\n"); + break; + case TILE_FUSE_ENABLE_BOTH: + hw->sku = TILE_SKU_BOTH_MTL; + hw->config = WP_CONFIG_2_TILE_5_3_RATIO; + ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Both\n"); + break; + default: + hw->config = WP_CONFIG_0_TILE_PLL_OFF; + ivpu_dbg(vdev, MISC, "Tile Fuse: Disable\n"); + break; + } + + ivpu_pll_init_frequency_ratios(vdev); + + ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M); + ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M); + ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M); + ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G); + hw->ranges.global_aliased_pio = hw->ranges.user_low; + + return 0; +} + +static int ivpu_hw_mtl_reset(struct ivpu_device *vdev) +{ + int ret; + u32 val; + + if (IVPU_WA(punit_disabled)) + return 0; + + ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); + return ret; + } + + val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET); + val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val); + REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val); + + ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) + ivpu_err(vdev, "Timed out waiting for RESET completion\n"); + + return ret; +} + +static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_boot_d0i3_drive(vdev, true); + if (ret) + ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); + + udelay(5); /* VPU requires 5 us to complete the transition */ + + return ret; +} + +static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_boot_d0i3_drive(vdev, false); + if (ret) + ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); + + return ret; +} + +static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev) +{ + int ret; + + ivpu_hw_read_platform(vdev); + ivpu_hw_wa_init(vdev); + ivpu_hw_timeouts_init(vdev); + + ret = ivpu_hw_mtl_reset(vdev); + if (ret) + ivpu_warn(vdev, "Failed to reset HW: %d\n", ret); + + ret = ivpu_hw_mtl_d0i3_disable(vdev); + if (ret) + ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); + + ret = ivpu_pll_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); + return ret; + } + + ret = ivpu_boot_host_ss_configure(vdev); + if (ret) { + ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); + return ret; + } + + /* + * The control circuitry for vpu_idle indication logic powers up active. + * To ensure unnecessary low power mode signal from LRT during bring up, + * KMD disables the circuitry prior to bringing up the Main Power island. + */ + ivpu_boot_vpu_idle_gen_disable(vdev); + + ret = ivpu_boot_pwr_domain_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); + return ret; + } + + ret = ivpu_boot_host_ss_axi_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); + return ret; + } + + ret = ivpu_boot_host_ss_top_noc_enable(vdev); + if (ret) + ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); + + return ret; +} + +static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev) +{ + ivpu_boot_no_snoop_enable(vdev); + ivpu_boot_tbu_mmu_enable(vdev); + ivpu_boot_soc_cpu_boot(vdev); + + return 0; +} + +static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev) +{ + u32 val; + + if (IVPU_WA(punit_disabled)) + return true; + + val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS); + return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) && + REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val); +} + +static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev) +{ + int ret = 0; + + /* FPGA requires manual clearing of IP_Reset bit by enabling quiescent state */ + if (ivpu_is_fpga(vdev)) { + if (ivpu_boot_host_ss_top_noc_disable(vdev)) { + ivpu_err(vdev, "Failed to disable TOP NOC\n"); + ret = -EIO; + } + + if (ivpu_boot_host_ss_axi_disable(vdev)) { + ivpu_err(vdev, "Failed to disable AXI\n"); + ret = -EIO; + } + } + + if (ivpu_boot_pwr_domain_disable(vdev)) { + ivpu_err(vdev, "Failed to disable power domain\n"); + ret = -EIO; + } + + if (ivpu_pll_disable(vdev)) { + ivpu_err(vdev, "Failed to disable PLL\n"); + ret = -EIO; + } + + if (ivpu_hw_mtl_d0i3_enable(vdev)) + ivpu_warn(vdev, "Failed to enable D0I3\n"); + + return ret; +} + +static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev) +{ + u32 val; + + /* Enable writing and set non-zero WDT value */ + REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); + REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); + + /* Enable writing and disable watchdog timer */ + REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); + REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0); + + /* Now clear the timeout interrupt */ + val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG); + val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); + REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val); +} + +/* Register indirect accesses */ +static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev) +{ + u32 pll_curr_ratio; + + pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL); + pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK; + + if (!ivpu_is_silicon(vdev)) + return PLL_SIMULATION_FREQ; + + return PLL_RATIO_TO_FREQ(pll_curr_ratio); +} + +static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev) +{ + return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET); +} + +static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev) +{ + return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE); +} + +static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev) +{ + return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE); +} + +static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id) +{ + u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0; + u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET); + + REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val); +} + +static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev) +{ + return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM); +} + +static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev) +{ + u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT); + + return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); +} + +static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) +{ + REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr); +} + +static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev) +{ + REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); +} + +static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev) +{ + REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); + REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); + REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); + REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0); +} + +static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev) +{ + REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1); + REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); + REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull); + REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0); +} + +static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev) +{ + ivpu_err_ratelimited(vdev, "WDT NCE irq\n"); + + ivpu_pm_schedule_recovery(vdev); +} + +static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev) +{ + ivpu_err_ratelimited(vdev, "WDT MSS irq\n"); + + ivpu_hw_wdt_disable(vdev); + ivpu_pm_schedule_recovery(vdev); +} + +static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev) +{ + ivpu_err_ratelimited(vdev, "NOC Firewall irq\n"); + + ivpu_pm_schedule_recovery(vdev); +} + +/* Handler for IRQs from VPU core (irqV) */ +static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq) +{ + u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + + REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) + ivpu_mmu_irq_evtq_handler(vdev); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) + ivpu_ipc_irq_handler(vdev); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) + ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) + ivpu_mmu_irq_gerr_handler(vdev); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) + ivpu_hw_mtl_irq_wdt_mss_handler(vdev); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) + ivpu_hw_mtl_irq_wdt_nce_handler(vdev); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) + ivpu_hw_mtl_irq_noc_firewall_handler(vdev); + + return status; +} + +/* Handler for IRQs from Buttress core (irqB) */ +static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq) +{ + u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; + bool schedule_recovery = false; + + if (status == 0) + return 0; + + /* Disable global interrupt before handling local buttress interrupts */ + REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1); + + if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL)); + + if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { + ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0)); + REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1); + schedule_recovery = true; + } + + if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) { + u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG); + + ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", + ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), + REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), + REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); + REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1); + schedule_recovery = true; + } + + /* + * Clear local interrupt status by writing 0 to all bits. + * This must be done after interrupts are cleared at the source. + * Writing 1 triggers an interrupt, so we can't perform read update write. + */ + REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0); + + /* Re-enable global interrupt */ + REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0); + + if (schedule_recovery) + ivpu_pm_schedule_recovery(vdev); + + return status; +} + +static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr) +{ + struct ivpu_device *vdev = ptr; + u32 ret_irqv, ret_irqb; + + ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq); + ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq); + + return IRQ_RETVAL(ret_irqb | ret_irqv); +} + +static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev) +{ + u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; + + if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev)) + ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) + ivpu_err(vdev, "WDT MSS timeout detected\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) + ivpu_err(vdev, "WDT NCE timeout detected\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) + ivpu_err(vdev, "NOC Firewall irq detected\n"); + + if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) + ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0)); + + if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) { + u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG); + + ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", + ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), + REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), + REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); + } +} + +const struct ivpu_hw_ops ivpu_hw_mtl_ops = { + .info_init = ivpu_hw_mtl_info_init, + .power_up = ivpu_hw_mtl_power_up, + .is_idle = ivpu_hw_mtl_is_idle, + .power_down = ivpu_hw_mtl_power_down, + .boot_fw = ivpu_hw_mtl_boot_fw, + .wdt_disable = ivpu_hw_mtl_wdt_disable, + .diagnose_failure = ivpu_hw_mtl_diagnose_failure, + .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get, + .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get, + .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get, + .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get, + .reg_db_set = ivpu_hw_mtl_reg_db_set, + .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get, + .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get, + .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set, + .irq_clear = ivpu_hw_mtl_irq_clear, + .irq_enable = ivpu_hw_mtl_irq_enable, + .irq_disable = ivpu_hw_mtl_irq_disable, + .irq_handler = ivpu_hw_mtl_irq_handler, +}; diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h new file mode 100644 index 00000000000000..d83ccfd9a871b6 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_HW_MTL_REG_H__ +#define __IVPU_HW_MTL_REG_H__ + +#include + +#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u + +#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u +#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0) +#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1) +#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2) + +#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u +#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0) +#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16) + +#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu +#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0) +#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16) + +#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u +#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0) + +#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u +#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0) + +#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u +#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0) + +#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu +#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0) + +#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u + +#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u +#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0) +#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8) + +#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u +#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0) + +#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu +#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0) +#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2) + +#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u +#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u + +#define MTL_BUTTRESS_PLL_STATUS 0x00000040u +#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1) + +#define MTL_BUTTRESS_VPU_STATUS 0x00000044u +#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0) +#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1) + +#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u +#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0) +#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2) + +#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u +#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0) + +#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u +#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u +#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u + +#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u +#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u +#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u + +#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u +#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0) +#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12) +#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20) + +#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u + +#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u +#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1) +#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10) +#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11) + +#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u +#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1) +#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10) +#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11) + +#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u +#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1) +#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10) +#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11) + +#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u +#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0) +#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8) +#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16) + +#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u +#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29) + +#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u +#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u +#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu +#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define MTL_VPU_TOP_NOC_QREQN 0x00000160u +#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0) +#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1) + +#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u +#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0) +#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1) + +#define MTL_VPU_TOP_NOC_QDENY 0x00000168u +#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0) +#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1) + +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0) +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1) +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2) +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3) +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4) +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5) +#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6) + +#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u +#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30) +#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31) + +#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u +#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0) +#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1) +#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2) + +#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u +#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u +#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u + +#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u + +#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu +#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0) +#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8) +#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16) +#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24) + +#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u +#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3) + +#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u +#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3) + +#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u +#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3) + +#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu +#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3) + +#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u +#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0) + +#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u +#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0) + +#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u +#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0) +#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1) +#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3) + +#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u +#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0) +#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16) + +#define MTL_VPU_HOST_MMU_IDR0 0x00200000u +#define MTL_VPU_HOST_MMU_IDR1 0x00200004u +#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu +#define MTL_VPU_HOST_MMU_IDR5 0x00200014u +#define MTL_VPU_HOST_MMU_CR0 0x00200020u +#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u +#define MTL_VPU_HOST_MMU_CR1 0x00200028u +#define MTL_VPU_HOST_MMU_CR2 0x0020002cu +#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u +#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u + +#define MTL_VPU_HOST_MMU_GERROR 0x00200060u +#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0) +#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2) +#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3) +#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) +#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) +#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) +#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7) + +#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u + +#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u +#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u +#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u +#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u +#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu +#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u +#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u +#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu +#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) +#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K) + +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6) +#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11) + +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8) +#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9) + +#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u + +#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u +#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1) + +#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u +#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1) + +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4) + +#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu +#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u +#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u +#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u + +#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u +#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) + +#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u +#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) + +#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u + +#endif /* __IVPU_HW_MTL_REG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h new file mode 100644 index 00000000000000..43c2c0c2d05072 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_HW_REG_IO_H__ +#define __IVPU_HW_REG_IO_H__ + +#include +#include +#include + +#include "ivpu_drv.h" + +#define REG_POLL_SLEEP_US 50 +#define REG_IO_ERROR 0xffffffff + +#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__) +#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg)) +#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__) +#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__) +#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__) + +#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__) +#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg)) +#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__) +#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__) +#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__) + +#define REGV_WR32I(reg, stride, index, val) \ + ivpu_hw_reg_wr32_index(vdev, vdev->regv, (reg), (stride), (index), (val), #reg, __func__) + +#define REG_FLD(REG, FLD) \ + (REG##_##FLD##_MASK) +#define REG_FLD_NUM(REG, FLD, num) \ + FIELD_PREP(REG##_##FLD##_MASK, num) +#define REG_GET_FLD(REG, FLD, val) \ + FIELD_GET(REG##_##FLD##_MASK, val) +#define REG_CLR_FLD(REG, FLD, val) \ + ((val) & ~(REG##_##FLD##_MASK)) +#define REG_SET_FLD(REG, FLD, val) \ + ((val) | (REG##_##FLD##_MASK)) +#define REG_SET_FLD_NUM(REG, FLD, num, val) \ + (((val) & ~(REG##_##FLD##_MASK)) | FIELD_PREP(REG##_##FLD##_MASK, num)) +#define REG_TEST_FLD(REG, FLD, val) \ + ((REG##_##FLD##_MASK) == ((val) & (REG##_##FLD##_MASK))) +#define REG_TEST_FLD_NUM(REG, FLD, num, val) \ + ((num) == FIELD_GET(REG##_##FLD##_MASK, val)) + +#define REGB_POLL(reg, var, cond, timeout_us) \ + read_poll_timeout(REGB_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg) + +#define REGV_POLL(reg, var, cond, timeout_us) \ + read_poll_timeout(REGV_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg) + +#define REGB_POLL_FLD(reg, fld, val, timeout_us) \ +({ \ + u32 var; \ + REGB_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \ +}) + +#define REGV_POLL_FLD(reg, fld, val, timeout_us) \ +({ \ + u32 var; \ + REGV_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \ +}) + +static inline u32 +ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg, + const char *name, const char *func) +{ + u32 val = readl(base + reg); + + ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%08x\n", func, name, reg, val); + return val; +} + +static inline u64 +ivpu_hw_reg_rd64(struct ivpu_device *vdev, void __iomem *base, u32 reg, + const char *name, const char *func) +{ + u64 val = readq(base + reg); + + ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%016llx\n", func, name, reg, val); + return val; +} + +static inline void +ivpu_hw_reg_wr32(struct ivpu_device *vdev, void __iomem *base, u32 reg, u32 val, + const char *name, const char *func) +{ + ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%08x\n", func, name, reg, val); + writel(val, base + reg); +} + +static inline void +ivpu_hw_reg_wr64(struct ivpu_device *vdev, void __iomem *base, u32 reg, u64 val, + const char *name, const char *func) +{ + ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%016llx\n", func, name, reg, val); + writeq(val, base + reg); +} + +static inline void +ivpu_hw_reg_wr32_index(struct ivpu_device *vdev, void __iomem *base, u32 reg, + u32 stride, u32 index, u32 val, const char *name, + const char *func) +{ + reg += index * stride; + + ivpu_dbg(vdev, REG, "%s WR: %s_%d (0x%08x) <= 0x%08x\n", func, name, index, reg, val); + writel(val, base + reg); +} + +#endif /* __IVPU_HW_REG_IO_H__ */ diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c new file mode 100644 index 00000000000000..3adcfa80fc0e55 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_gem.h" +#include "ivpu_hw.h" +#include "ivpu_hw_reg_io.h" +#include "ivpu_ipc.h" +#include "ivpu_jsm_msg.h" +#include "ivpu_pm.h" + +#define IPC_MAX_RX_MSG 128 +#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD) + +struct ivpu_ipc_tx_buf { + struct ivpu_ipc_hdr ipc; + struct vpu_jsm_msg jsm; +}; + +struct ivpu_ipc_rx_msg { + struct list_head link; + struct ivpu_ipc_hdr *ipc_hdr; + struct vpu_jsm_msg *jsm_msg; +}; + +static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c, + struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr) +{ + ivpu_dbg(vdev, IPC, + "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)", + c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel, + ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status); +} + +static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c, + struct vpu_jsm_msg *jsm_msg, u32 vpu_addr) +{ + u32 *payload = (u32 *)&jsm_msg->payload; + + ivpu_dbg(vdev, JSM, + "%s: vpu:0x%08x (type:0x%x, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n", + c, vpu_addr, jsm_msg->type, jsm_msg->status, jsm_msg->request_id, jsm_msg->result, + payload[0], payload[1], payload[2], payload[3], payload[4]); +} + +static void +ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, + struct vpu_jsm_msg *jsm_msg) +{ + ipc_hdr->status = IVPU_IPC_HDR_FREE; + if (jsm_msg) + jsm_msg->status = VPU_JSM_MSG_FREE; + wmb(); /* Flush WC buffers for message statuses */ +} + +static void ivpu_ipc_mem_fini(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + + ivpu_bo_free_internal(ipc->mem_rx); + ivpu_bo_free_internal(ipc->mem_tx); +} + +static int +ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + struct vpu_jsm_msg *req) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_tx_buf *tx_buf; + u32 tx_buf_vpu_addr; + u32 jsm_vpu_addr; + + tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf)); + if (!tx_buf_vpu_addr) { + ivpu_err(vdev, "Failed to reserve IPC buffer, size %ld\n", + sizeof(*tx_buf)); + return -ENOMEM; + } + + tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr); + if (drm_WARN_ON(&vdev->drm, !tx_buf)) { + gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf)); + return -EIO; + } + + jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm); + + if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE) + ivpu_warn(vdev, "IPC message vpu:0x%x not released by firmware\n", + tx_buf_vpu_addr); + + if (tx_buf->jsm.status != VPU_JSM_MSG_FREE) + ivpu_warn(vdev, "JSM message vpu:0x%x not released by firmware\n", + jsm_vpu_addr); + + memset(tx_buf, 0, sizeof(*tx_buf)); + tx_buf->ipc.data_addr = jsm_vpu_addr; + /* TODO: Set data_size to actual JSM message size, not union of all messages */ + tx_buf->ipc.data_size = sizeof(*req); + tx_buf->ipc.channel = cons->channel; + tx_buf->ipc.src_node = 0; + tx_buf->ipc.dst_node = 1; + tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED; + tx_buf->jsm.type = req->type; + tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED; + tx_buf->jsm.payload = req->payload; + + req->request_id = atomic_inc_return(&ipc->request_id); + tx_buf->jsm.request_id = req->request_id; + cons->request_id = req->request_id; + wmb(); /* Flush WC buffers for IPC, JSM msgs */ + + cons->tx_vpu_addr = tx_buf_vpu_addr; + + ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr); + ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr); + + return 0; +} + +static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + + if (vpu_addr) + gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf)); +} + +static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr) +{ + ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr); +} + +void +ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + + INIT_LIST_HEAD(&cons->link); + cons->channel = channel; + cons->tx_vpu_addr = 0; + cons->request_id = 0; + spin_lock_init(&cons->rx_msg_lock); + INIT_LIST_HEAD(&cons->rx_msg_list); + init_waitqueue_head(&cons->rx_msg_wq); + + spin_lock_irq(&ipc->cons_list_lock); + list_add_tail(&cons->link, &ipc->cons_list); + spin_unlock_irq(&ipc->cons_list_lock); +} + +void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_rx_msg *rx_msg, *r; + + spin_lock_irq(&ipc->cons_list_lock); + list_del(&cons->link); + spin_unlock_irq(&ipc->cons_list_lock); + + spin_lock_irq(&cons->rx_msg_lock); + list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) { + list_del(&rx_msg->link); + ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); + atomic_dec(&ipc->rx_msg_count); + kfree(rx_msg); + } + spin_unlock_irq(&cons->rx_msg_lock); + + ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr); +} + +static int +ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + int ret; + + ret = mutex_lock_interruptible(&ipc->lock); + if (ret) + return ret; + + if (!ipc->on) { + ret = -EAGAIN; + goto unlock; + } + + ret = ivpu_ipc_tx_prepare(vdev, cons, req); + if (ret) + goto unlock; + + ivpu_ipc_tx(vdev, cons->tx_vpu_addr); + +unlock: + mutex_unlock(&ipc->lock); + return ret; +} + +int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + struct ivpu_ipc_hdr *ipc_buf, + struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_rx_msg *rx_msg; + int wait_ret, ret = 0; + + wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq, + (IS_KTHREAD() && kthread_should_stop()) || + !list_empty(&cons->rx_msg_list), + msecs_to_jiffies(timeout_ms)); + + if (IS_KTHREAD() && kthread_should_stop()) + return -EINTR; + + if (wait_ret == 0) + return -ETIMEDOUT; + + if (wait_ret < 0) + return -ERESTARTSYS; + + spin_lock_irq(&cons->rx_msg_lock); + rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link); + if (!rx_msg) { + spin_unlock_irq(&cons->rx_msg_lock); + return -EAGAIN; + } + list_del(&rx_msg->link); + spin_unlock_irq(&cons->rx_msg_lock); + + if (ipc_buf) + memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf)); + if (rx_msg->jsm_msg) { + u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload)); + + if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) { + ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result); + ret = -EBADMSG; + } + + if (ipc_payload) + memcpy(ipc_payload, rx_msg->jsm_msg, size); + } + + ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); + atomic_dec(&ipc->rx_msg_count); + kfree(rx_msg); + + return ret; +} + +static int +ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp_type, + struct vpu_jsm_msg *resp, u32 channel, + unsigned long timeout_ms) +{ + struct ivpu_ipc_consumer cons; + int ret; + + ivpu_ipc_consumer_add(vdev, &cons, channel); + + ret = ivpu_ipc_send(vdev, &cons, req); + if (ret) { + ivpu_warn(vdev, "IPC send failed: %d\n", ret); + goto consumer_del; + } + + ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms); + if (ret) { + ivpu_warn(vdev, "IPC receive failed: type 0x%x, ret %d\n", req->type, ret); + goto consumer_del; + } + + if (resp->type != expected_resp_type) { + ivpu_warn(vdev, "Invalid JSM response type: 0x%x\n", resp->type); + ret = -EBADE; + } + +consumer_del: + ivpu_ipc_consumer_del(vdev, &cons); + return ret; +} + +int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp_type, + struct vpu_jsm_msg *resp, u32 channel, + unsigned long timeout_ms) +{ + struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; + struct vpu_jsm_msg hb_resp; + int ret, hb_ret; + + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + + ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp, + channel, timeout_ms); + if (ret != -ETIMEDOUT) + goto rpm_put; + + hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, + &hb_resp, VPU_IPC_CHAN_ASYNC_CMD, + vdev->timeout.jsm); + if (hb_ret == -ETIMEDOUT) { + ivpu_hw_diagnose_failure(vdev); + ivpu_pm_schedule_recovery(vdev); + } + +rpm_put: + ivpu_rpm_put(vdev); + return ret; +} + +static bool +ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) +{ + if (cons->channel != ipc_hdr->channel) + return false; + + if (!jsm_msg || jsm_msg->request_id == cons->request_id) + return true; + + return false; +} + +static void +ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_rx_msg *rx_msg; + unsigned long flags; + + lockdep_assert_held(&ipc->cons_list_lock); + + rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC); + if (!rx_msg) { + ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); + return; + } + + atomic_inc(&ipc->rx_msg_count); + + rx_msg->ipc_hdr = ipc_hdr; + rx_msg->jsm_msg = jsm_msg; + + spin_lock_irqsave(&cons->rx_msg_lock, flags); + list_add_tail(&rx_msg->link, &cons->rx_msg_list); + spin_unlock_irqrestore(&cons->rx_msg_lock, flags); + + wake_up(&cons->rx_msg_wq); +} + +int ivpu_ipc_irq_handler(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_consumer *cons; + struct ivpu_ipc_hdr *ipc_hdr; + struct vpu_jsm_msg *jsm_msg; + unsigned long flags; + bool dispatched; + u32 vpu_addr; + + /* + * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt. + * Without purge IPC FIFO to 0 next IPC interrupts won't be generated. + */ + while (ivpu_hw_reg_ipc_rx_count_get(vdev)) { + vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev); + if (vpu_addr == REG_IO_ERROR) { + ivpu_err(vdev, "Failed to read IPC rx addr register\n"); + return -EIO; + } + + ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr); + if (!ipc_hdr) { + ivpu_warn(vdev, "IPC msg 0x%x out of range\n", vpu_addr); + continue; + } + ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr); + + jsm_msg = NULL; + if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) { + jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr); + if (!jsm_msg) { + ivpu_warn(vdev, "JSM msg 0x%x out of range\n", ipc_hdr->data_addr); + ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL); + continue; + } + ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr); + } + + if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) { + ivpu_warn(vdev, "IPC RX msg dropped, msg count %d\n", IPC_MAX_RX_MSG); + ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); + continue; + } + + dispatched = false; + spin_lock_irqsave(&ipc->cons_list_lock, flags); + list_for_each_entry(cons, &ipc->cons_list, link) { + if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) { + ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg); + dispatched = true; + break; + } + } + spin_unlock_irqrestore(&ipc->cons_list_lock, flags); + + if (!dispatched) { + ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr); + ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); + } + } + + return 0; +} + +int ivpu_ipc_init(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + int ret = -ENOMEM; + + ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC); + if (!ipc->mem_tx) + return ret; + + ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC); + if (!ipc->mem_rx) + goto err_free_tx; + + ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT), + -1, "TX_IPC_JSM"); + if (IS_ERR(ipc->mm_tx)) { + ret = PTR_ERR(ipc->mm_tx); + ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx); + goto err_free_rx; + } + + ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ipc->mem_tx->base.size, -1); + if (ret) { + ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret); + goto err_free_rx; + } + + INIT_LIST_HEAD(&ipc->cons_list); + spin_lock_init(&ipc->cons_list_lock); + drmm_mutex_init(&vdev->drm, &ipc->lock); + + ivpu_ipc_reset(vdev); + return 0; + +err_free_rx: + ivpu_bo_free_internal(ipc->mem_rx); +err_free_tx: + ivpu_bo_free_internal(ipc->mem_tx); + return ret; +} + +void ivpu_ipc_fini(struct ivpu_device *vdev) +{ + ivpu_ipc_mem_fini(vdev); +} + +void ivpu_ipc_enable(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + + mutex_lock(&ipc->lock); + ipc->on = true; + mutex_unlock(&ipc->lock); +} + +void ivpu_ipc_disable(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_consumer *cons, *c; + unsigned long flags; + + mutex_lock(&ipc->lock); + ipc->on = false; + mutex_unlock(&ipc->lock); + + spin_lock_irqsave(&ipc->cons_list_lock, flags); + list_for_each_entry_safe(cons, c, &ipc->cons_list, link) + wake_up(&cons->rx_msg_wq); + spin_unlock_irqrestore(&ipc->cons_list_lock, flags); +} + +void ivpu_ipc_reset(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + + mutex_lock(&ipc->lock); + + memset(ipc->mem_tx->kvaddr, 0, ipc->mem_tx->base.size); + memset(ipc->mem_rx->kvaddr, 0, ipc->mem_rx->base.size); + wmb(); /* Flush WC buffers for TX and RX rings */ + + mutex_unlock(&ipc->lock); +} diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h new file mode 100644 index 00000000000000..9838202ecfadf2 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_IPC_H__ +#define __IVPU_IPC_H__ + +#include +#include + +#include "vpu_jsm_api.h" + +struct ivpu_bo; + +/* VPU FW boot notification */ +#define IVPU_IPC_CHAN_BOOT_MSG 0x3ff +#define IVPU_IPC_BOOT_MSG_DATA_ADDR 0x424f4f54 + +/* The alignment to be used for IPC Buffers and IPC Data. */ +#define IVPU_IPC_ALIGNMENT 64 + +#define IVPU_IPC_HDR_FREE 0 +#define IVPU_IPC_HDR_ALLOCATED 0 + +/** + * struct ivpu_ipc_hdr - The IPC message header structure, exchanged + * with the VPU device firmware. + * @data_addr: The VPU address of the payload (JSM message) + * @data_size: The size of the payload. + * @channel: The channel used. + * @src_node: The Node ID of the sender. + * @dst_node: The Node ID of the intended receiver. + * @status: IPC buffer usage status + */ +struct ivpu_ipc_hdr { + u32 data_addr; + u32 data_size; + u16 channel; + u8 src_node; + u8 dst_node; + u8 status; +} __packed __aligned(IVPU_IPC_ALIGNMENT); + +struct ivpu_ipc_consumer { + struct list_head link; + u32 channel; + u32 tx_vpu_addr; + u32 request_id; + + spinlock_t rx_msg_lock; /* Protects rx_msg_list */ + struct list_head rx_msg_list; + wait_queue_head_t rx_msg_wq; +}; + +struct ivpu_ipc_info { + struct gen_pool *mm_tx; + struct ivpu_bo *mem_tx; + struct ivpu_bo *mem_rx; + + atomic_t rx_msg_count; + + spinlock_t cons_list_lock; /* Protects cons_list */ + struct list_head cons_list; + + atomic_t request_id; + struct mutex lock; /* Lock on status */ + bool on; +}; + +int ivpu_ipc_init(struct ivpu_device *vdev); +void ivpu_ipc_fini(struct ivpu_device *vdev); + +void ivpu_ipc_enable(struct ivpu_device *vdev); +void ivpu_ipc_disable(struct ivpu_device *vdev); +void ivpu_ipc_reset(struct ivpu_device *vdev); + +int ivpu_ipc_irq_handler(struct ivpu_device *vdev); + +void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + u32 channel); +void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons); + +int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload, + unsigned long timeout_ms); + +int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp_type, + struct vpu_jsm_msg *resp, u32 channel, + unsigned long timeout_ms); + +#endif /* __IVPU_IPC_H__ */ diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c new file mode 100644 index 00000000000000..94068aedf97cfe --- /dev/null +++ b/drivers/accel/ivpu/ivpu_job.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_hw.h" +#include "ivpu_ipc.h" +#include "ivpu_job.h" +#include "ivpu_jsm_msg.h" +#include "ivpu_pm.h" + +#define CMD_BUF_IDX 0 +#define JOB_ID_JOB_MASK GENMASK(7, 0) +#define JOB_ID_CONTEXT_MASK GENMASK(31, 8) +#define JOB_MAX_BUFFER_COUNT 65535 + +static unsigned int ivpu_tdr_timeout_ms; +module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644); +MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default"); + +static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) +{ + ivpu_hw_reg_db_set(vdev, cmdq->db_id); +} + +static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine) +{ + struct ivpu_device *vdev = file_priv->vdev; + struct vpu_job_queue_header *jobq_header; + struct ivpu_cmdq *cmdq; + + cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); + if (!cmdq) + return NULL; + + cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC); + if (!cmdq->mem) + goto cmdq_free; + + cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev); + cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) / + sizeof(struct vpu_job_queue_entry)); + + cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr; + jobq_header = &cmdq->jobq->header; + jobq_header->engine_idx = engine; + jobq_header->head = 0; + jobq_header->tail = 0; + wmb(); /* Flush WC buffer for jobq->header */ + + return cmdq; + +cmdq_free: + kfree(cmdq); + return NULL; +} + +static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) +{ + if (!cmdq) + return; + + ivpu_bo_free_internal(cmdq->mem); + kfree(cmdq); +} + +static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine) +{ + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; + int ret; + + lockdep_assert_held(&file_priv->lock); + + if (!cmdq) { + cmdq = ivpu_cmdq_alloc(file_priv, engine); + if (!cmdq) + return NULL; + file_priv->cmdq[engine] = cmdq; + } + + if (cmdq->db_registered) + return cmdq; + + ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, + cmdq->mem->vpu_addr, cmdq->mem->base.size); + if (ret) + return NULL; + + cmdq->db_registered = true; + + return cmdq; +} + +static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine) +{ + struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; + + lockdep_assert_held(&file_priv->lock); + + if (cmdq) { + file_priv->cmdq[engine] = NULL; + if (cmdq->db_registered) + ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id); + + ivpu_cmdq_free(file_priv, cmdq); + } +} + +void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv) +{ + int i; + + mutex_lock(&file_priv->lock); + + for (i = 0; i < IVPU_NUM_ENGINES; i++) + ivpu_cmdq_release_locked(file_priv, i); + + mutex_unlock(&file_priv->lock); +} + +/* + * Mark the doorbell as unregistered and reset job queue pointers. + * This function needs to be called when the VPU hardware is restarted + * and FW looses job queue state. The next time job queue is used it + * will be registered again. + */ +static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine) +{ + struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; + + lockdep_assert_held(&file_priv->lock); + + if (cmdq) { + cmdq->db_registered = false; + cmdq->jobq->header.head = 0; + cmdq->jobq->header.tail = 0; + wmb(); /* Flush WC buffer for jobq header */ + } +} + +static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv) +{ + int i; + + mutex_lock(&file_priv->lock); + + for (i = 0; i < IVPU_NUM_ENGINES; i++) + ivpu_cmdq_reset_locked(file_priv, i); + + mutex_unlock(&file_priv->lock); +} + +void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) +{ + struct ivpu_file_priv *file_priv; + unsigned long ctx_id; + + xa_for_each(&vdev->context_xa, ctx_id, file_priv) { + file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); + if (!file_priv) + continue; + + ivpu_cmdq_reset_all(file_priv); + + ivpu_file_priv_put(&file_priv); + } +} + +static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) +{ + struct ivpu_device *vdev = job->vdev; + struct vpu_job_queue_header *header = &cmdq->jobq->header; + struct vpu_job_queue_entry *entry; + u32 tail = READ_ONCE(header->tail); + u32 next_entry = (tail + 1) % cmdq->entry_count; + + /* Check if there is space left in job queue */ + if (next_entry == header->head) { + ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n", + job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail); + return -EBUSY; + } + + entry = &cmdq->jobq->job[tail]; + entry->batch_buf_addr = job->cmd_buf_vpu_addr; + entry->job_id = job->job_id; + entry->flags = 0; + wmb(); /* Ensure that tail is updated after filling entry */ + header->tail = next_entry; + wmb(); /* Flush WC buffer for jobq header */ + + return 0; +} + +struct ivpu_fence { + struct dma_fence base; + spinlock_t lock; /* protects base */ + struct ivpu_device *vdev; +}; + +static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence) +{ + return container_of(fence, struct ivpu_fence, base); +} + +static const char *ivpu_fence_get_driver_name(struct dma_fence *fence) +{ + return DRIVER_NAME; +} + +static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence) +{ + struct ivpu_fence *ivpu_fence = to_vpu_fence(fence); + + return dev_name(ivpu_fence->vdev->drm.dev); +} + +static const struct dma_fence_ops ivpu_fence_ops = { + .get_driver_name = ivpu_fence_get_driver_name, + .get_timeline_name = ivpu_fence_get_timeline_name, +}; + +static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev) +{ + struct ivpu_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return NULL; + + fence->vdev = vdev; + spin_lock_init(&fence->lock); + dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1); + + return &fence->base; +} + +static void job_get(struct ivpu_job *job, struct ivpu_job **link) +{ + struct ivpu_device *vdev = job->vdev; + + kref_get(&job->ref); + *link = job; + + ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref)); +} + +static void job_release(struct kref *ref) +{ + struct ivpu_job *job = container_of(ref, struct ivpu_job, ref); + struct ivpu_device *vdev = job->vdev; + u32 i; + + for (i = 0; i < job->bo_count; i++) + if (job->bos[i]) + drm_gem_object_put(&job->bos[i]->base); + + dma_fence_put(job->done_fence); + ivpu_file_priv_put(&job->file_priv); + + ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id); + kfree(job); + + /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */ + ivpu_rpm_put(vdev); +} + +static void job_put(struct ivpu_job *job) +{ + struct ivpu_device *vdev = job->vdev; + + ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref)); + kref_put(&job->ref, job_release); +} + +static struct ivpu_job * +ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) +{ + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_job *job; + size_t buf_size; + int ret; + + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return NULL; + + buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *); + job = kzalloc(buf_size, GFP_KERNEL); + if (!job) + goto err_rpm_put; + + kref_init(&job->ref); + + job->vdev = vdev; + job->engine_idx = engine_idx; + job->bo_count = bo_count; + job->done_fence = ivpu_fence_create(vdev); + if (!job->done_fence) { + ivpu_warn_ratelimited(vdev, "Failed to create a fence\n"); + goto err_free_job; + } + + job->file_priv = ivpu_file_priv_get(file_priv); + + ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx); + + return job; + +err_free_job: + kfree(job); +err_rpm_put: + ivpu_rpm_put(vdev); + return NULL; +} + +static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status) +{ + struct ivpu_job *job; + + job = xa_erase(&vdev->submitted_jobs_xa, job_id); + if (!job) + return -ENOENT; + + if (job->file_priv->has_mmu_faults) + job_status = VPU_JSM_STATUS_ABORTED; + + job->bos[CMD_BUF_IDX]->job_status = job_status; + dma_fence_signal(job->done_fence); + + ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n", + job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status); + + job_put(job); + return 0; +} + +static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg) +{ + struct vpu_ipc_msg_payload_job_done *payload; + struct vpu_jsm_msg *job_ret_msg = msg; + int ret; + + payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload; + + ret = ivpu_job_done(vdev, payload->job_id, payload->job_status); + if (ret) + ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret); +} + +void ivpu_jobs_abort_all(struct ivpu_device *vdev) +{ + struct ivpu_job *job; + unsigned long id; + + xa_for_each(&vdev->submitted_jobs_xa, id, job) + ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED); +} + +static int ivpu_direct_job_submission(struct ivpu_job *job) +{ + struct ivpu_file_priv *file_priv = job->file_priv; + struct ivpu_device *vdev = job->vdev; + struct xa_limit job_id_range; + struct ivpu_cmdq *cmdq; + int ret; + + mutex_lock(&file_priv->lock); + + cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx); + if (!cmdq) { + ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n", + file_priv->ctx.id, job->engine_idx); + ret = -EINVAL; + goto err_unlock; + } + + job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); + job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK; + + job_get(job, &job); + ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL); + if (ret) { + ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret); + goto err_job_put; + } + + ret = ivpu_cmdq_push_job(cmdq, job); + if (ret) + goto err_xa_erase; + + ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n", + job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id, + job->engine_idx, cmdq->jobq->header.tail); + + if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) { + ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS); + cmdq->jobq->header.head = cmdq->jobq->header.tail; + wmb(); /* Flush WC buffer for jobq header */ + } else { + ivpu_cmdq_ring_db(vdev, cmdq); + } + + mutex_unlock(&file_priv->lock); + return 0; + +err_xa_erase: + xa_erase(&vdev->submitted_jobs_xa, job->job_id); +err_job_put: + job_put(job); +err_unlock: + mutex_unlock(&file_priv->lock); + return ret; +} + +static int +ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, + u32 buf_count, u32 commands_offset) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct ww_acquire_ctx acquire_ctx; + struct ivpu_bo *bo; + int ret; + u32 i; + + for (i = 0; i < buf_count; i++) { + struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]); + + if (!obj) + return -ENOENT; + + job->bos[i] = to_ivpu_bo(obj); + + ret = ivpu_bo_pin(job->bos[i]); + if (ret) + return ret; + } + + bo = job->bos[CMD_BUF_IDX]; + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) { + ivpu_warn(vdev, "Buffer is already in use\n"); + return -EBUSY; + } + + if (commands_offset >= bo->base.size) { + ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset); + return -EINVAL; + } + + job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; + + ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, + &acquire_ctx); + if (ret) { + ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); + return ret; + } + + for (i = 0; i < buf_count; i++) { + ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1); + if (ret) { + ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); + goto unlock_reservations; + } + } + + for (i = 0; i < buf_count; i++) + dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE); + +unlock_reservations: + drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); + + wmb(); /* Flush write combining buffers */ + + return ret; +} + +int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + int ret = 0; + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct drm_ivpu_submit *params = data; + struct ivpu_job *job; + u32 *buf_handles; + + if (params->engine > DRM_IVPU_ENGINE_COPY) + return -EINVAL; + + if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT) + return -EINVAL; + + if (!IS_ALIGNED(params->commands_offset, 8)) + return -EINVAL; + + if (!file_priv->ctx.id) + return -EINVAL; + + if (file_priv->has_mmu_faults) + return -EBADFD; + + buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL); + if (!buf_handles) + return -ENOMEM; + + ret = copy_from_user(buf_handles, + (void __user *)params->buffers_ptr, + params->buffer_count * sizeof(u32)); + if (ret) { + ret = -EFAULT; + goto free_handles; + } + + ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", + file_priv->ctx.id, params->buffer_count); + + job = ivpu_create_job(file_priv, params->engine, params->buffer_count); + if (!job) { + ivpu_err(vdev, "Failed to create job\n"); + ret = -ENOMEM; + goto free_handles; + } + + ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count, + params->commands_offset); + if (ret) { + ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret); + goto job_put; + } + + ret = ivpu_direct_job_submission(job); + if (ret) { + dma_fence_signal(job->done_fence); + ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret); + } + +job_put: + job_put(job); +free_handles: + kfree(buf_handles); + + return ret; +} + +static int ivpu_job_done_thread(void *arg) +{ + struct ivpu_device *vdev = (struct ivpu_device *)arg; + struct ivpu_ipc_consumer cons; + struct vpu_jsm_msg jsm_msg; + bool jobs_submitted; + unsigned int timeout; + int ret; + + ivpu_dbg(vdev, JOB, "Started %s\n", __func__); + + ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET); + + while (!kthread_should_stop()) { + timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; + jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa); + ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout); + if (!ret) { + ivpu_job_done_message(vdev, &jsm_msg); + } else if (ret == -ETIMEDOUT) { + if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) { + ivpu_err(vdev, "TDR detected, timeout %d ms", timeout); + ivpu_hw_diagnose_failure(vdev); + ivpu_pm_schedule_recovery(vdev); + } + } + } + + ivpu_ipc_consumer_del(vdev, &cons); + + ivpu_jobs_abort_all(vdev); + + ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__); + return 0; +} + +int ivpu_job_done_thread_init(struct ivpu_device *vdev) +{ + struct task_struct *thread; + + thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread"); + if (IS_ERR(thread)) { + ivpu_err(vdev, "Failed to start job completion thread\n"); + return -EIO; + } + + get_task_struct(thread); + wake_up_process(thread); + + vdev->job_done_thread = thread; + + return 0; +} + +void ivpu_job_done_thread_fini(struct ivpu_device *vdev) +{ + kthread_stop(vdev->job_done_thread); + put_task_struct(vdev->job_done_thread); +} diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h new file mode 100644 index 00000000000000..aa1f0b9479b0b2 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_job.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_JOB_H__ +#define __IVPU_JOB_H__ + +#include +#include + +#include "ivpu_gem.h" + +struct ivpu_device; +struct ivpu_file_priv; + +/** + * struct ivpu_cmdq - Object representing device queue used to send jobs. + * @jobq: Pointer to job queue memory shared with the device + * @mem: Memory allocated for the job queue, shared with device + * @entry_count Number of job entries in the queue + * @db_id: Doorbell assigned to this job queue + * @db_registered: True if doorbell is registered in device + */ +struct ivpu_cmdq { + struct vpu_job_queue *jobq; + struct ivpu_bo *mem; + u32 entry_count; + u32 db_id; + bool db_registered; +}; + +/** + * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer. + * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW. + * This is a unit of execution, and be tracked by the job_id for + * any status reporting from VPU FW through IPC JOB RET/DONE message. + * @file_priv: The client that submitted this job + * @job_id: Job ID for KMD tracking and job status reporting from VPU FW + * @status: Status of the Job from IPC JOB RET/DONE message + * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job + * @submit_status_offset: Offset within batch buffer where job completion handler + will update the job status + */ +struct ivpu_job { + struct kref ref; + struct ivpu_device *vdev; + struct ivpu_file_priv *file_priv; + struct dma_fence *done_fence; + u64 cmd_buf_vpu_addr; + u32 job_id; + u32 engine_idx; + size_t bo_count; + struct ivpu_bo *bos[]; +}; + +int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + +void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv); +void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); + +int ivpu_job_done_thread_init(struct ivpu_device *vdev); +void ivpu_job_done_thread_fini(struct ivpu_device *vdev); + +void ivpu_jobs_abort_all(struct ivpu_device *vdev); + +#endif /* __IVPU_JOB_H__ */ diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c new file mode 100644 index 00000000000000..831bfd2b2d39d4 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include "ivpu_drv.h" +#include "ivpu_ipc.h" +#include "ivpu_jsm_msg.h" + +int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id, + u64 jobq_base, u32 jobq_size) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB }; + struct vpu_jsm_msg resp; + int ret = 0; + + req.payload.register_db.db_idx = db_id; + req.payload.register_db.jobq_base = jobq_base; + req.payload.register_db.jobq_size = jobq_size; + req.payload.register_db.host_ssid = ctx_id; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret); + return ret; + } + + ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id); + + return 0; +} + +int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB }; + struct vpu_jsm_msg resp; + int ret = 0; + + req.payload.unregister_db.db_idx = db_id; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret); + return ret; + } + + ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id); + + return 0; +} + +int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; + struct vpu_jsm_msg resp; + int ret; + + if (engine > VPU_ENGINE_COPY) + return -EINVAL; + + req.payload.query_engine_hb.engine_idx = engine; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret); + return ret; + } + + *heartbeat = resp.payload.query_engine_hb_done.heartbeat; + return ret; +} + +int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET }; + struct vpu_jsm_msg resp; + int ret; + + if (engine > VPU_ENGINE_COPY) + return -EINVAL; + + req.payload.engine_reset.engine_idx = engine; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) + ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret); + + return ret; +} + +int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT }; + struct vpu_jsm_msg resp; + int ret; + + if (engine > VPU_ENGINE_COPY) + return -EINVAL; + + req.payload.engine_preempt.engine_idx = engine; + req.payload.engine_preempt.preempt_id = preempt_id; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) + ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret); + + return ret; +} + +int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL }; + struct vpu_jsm_msg resp; + int ret; + + if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1)) + return -ENOMEM; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) + ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret); + + return ret; +} + +int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask, + u64 *trace_hw_component_mask) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY }; + struct vpu_jsm_msg resp; + int ret; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret); + return ret; + } + + *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask; + *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask; + + return ret; +} + +int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask, + u64 trace_hw_component_mask) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG }; + struct vpu_jsm_msg resp; + int ret; + + req.payload.trace_config.trace_level = trace_level; + req.payload.trace_config.trace_destination_mask = trace_destination_mask; + req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) + ivpu_warn(vdev, "Failed to set config: %d\n", ret); + + return ret; +} + +int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE }; + struct vpu_jsm_msg resp; + + req.payload.ssid_release.host_ssid = host_ssid; + + return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); +} diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h new file mode 100644 index 00000000000000..ab50d7b017c1b2 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_jsm_msg.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_JSM_MSG_H__ +#define __IVPU_JSM_MSG_H__ + +#include "vpu_jsm_api.h" + +int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id, + u64 jobq_base, u32 jobq_size); +int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id); +int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat); +int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine); +int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id); +int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size); +int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask, + u64 *trace_hw_component_mask); +int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask, + u64 trace_hw_component_mask); +int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid); +#endif diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c new file mode 100644 index 00000000000000..694e978aba6637 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -0,0 +1,883 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_hw_mtl_reg.h" +#include "ivpu_hw_reg_io.h" +#include "ivpu_mmu.h" +#include "ivpu_mmu_context.h" +#include "ivpu_pm.h" + +#define IVPU_MMU_IDR0_REF 0x080f3e0f +#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f +#define IVPU_MMU_IDR1_REF 0x0e739d18 +#define IVPU_MMU_IDR3_REF 0x0000003c +#define IVPU_MMU_IDR5_REF 0x00040070 +#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075 +#define IVPU_MMU_IDR5_REF_FPGA 0x00800075 + +#define IVPU_MMU_CDTAB_ENT_SIZE 64 +#define IVPU_MMU_CDTAB_ENT_COUNT_LOG2 8 /* 256 entries */ +#define IVPU_MMU_CDTAB_ENT_COUNT ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2) + +#define IVPU_MMU_STREAM_ID0 0 +#define IVPU_MMU_STREAM_ID3 3 + +#define IVPU_MMU_STRTAB_ENT_SIZE 64 +#define IVPU_MMU_STRTAB_ENT_COUNT 4 +#define IVPU_MMU_STRTAB_CFG_LOG2SIZE 2 +#define IVPU_MMU_STRTAB_CFG IVPU_MMU_STRTAB_CFG_LOG2SIZE + +#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */ +#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2) +#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1) +#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1) +#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) +#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK) + +#define IVPU_MMU_CMDQ_CMD_SIZE 16 +#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE) + +#define IVPU_MMU_EVTQ_CMD_SIZE 32 +#define IVPU_MMU_EVTQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE) + +#define IVPU_MMU_CMD_OPCODE GENMASK(7, 0) + +#define IVPU_MMU_CMD_SYNC_0_CS GENMASK(13, 12) +#define IVPU_MMU_CMD_SYNC_0_MSH GENMASK(23, 22) +#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24) +#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24) +#define IVPU_MMU_CMD_SYNC_0_MSI_DATA GENMASK(63, 32) + +#define IVPU_MMU_CMD_CFGI_0_SSEC BIT(10) +#define IVPU_MMU_CMD_CFGI_0_SSV BIT(11) +#define IVPU_MMU_CMD_CFGI_0_SSID GENMASK(31, 12) +#define IVPU_MMU_CMD_CFGI_0_SID GENMASK(63, 32) +#define IVPU_MMU_CMD_CFGI_1_RANGE GENMASK(4, 0) + +#define IVPU_MMU_CMD_TLBI_0_ASID GENMASK(63, 48) +#define IVPU_MMU_CMD_TLBI_0_VMID GENMASK(47, 32) + +#define CMD_PREFETCH_CFG 0x1 +#define CMD_CFGI_STE 0x3 +#define CMD_CFGI_ALL 0x4 +#define CMD_CFGI_CD 0x5 +#define CMD_CFGI_CD_ALL 0x6 +#define CMD_TLBI_NH_ASID 0x11 +#define CMD_TLBI_EL2_ALL 0x20 +#define CMD_TLBI_NSNH_ALL 0x30 +#define CMD_SYNC 0x46 + +#define IVPU_MMU_EVT_F_UUT 0x01 +#define IVPU_MMU_EVT_C_BAD_STREAMID 0x02 +#define IVPU_MMU_EVT_F_STE_FETCH 0x03 +#define IVPU_MMU_EVT_C_BAD_STE 0x04 +#define IVPU_MMU_EVT_F_BAD_ATS_TREQ 0x05 +#define IVPU_MMU_EVT_F_STREAM_DISABLED 0x06 +#define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07 +#define IVPU_MMU_EVT_C_BAD_SUBSTREAMID 0x08 +#define IVPU_MMU_EVT_F_CD_FETCH 0x09 +#define IVPU_MMU_EVT_C_BAD_CD 0x0a +#define IVPU_MMU_EVT_F_WALK_EABT 0x0b +#define IVPU_MMU_EVT_F_TRANSLATION 0x10 +#define IVPU_MMU_EVT_F_ADDR_SIZE 0x11 +#define IVPU_MMU_EVT_F_ACCESS 0x12 +#define IVPU_MMU_EVT_F_PERMISSION 0x13 +#define IVPU_MMU_EVT_F_TLB_CONFLICT 0x20 +#define IVPU_MMU_EVT_F_CFG_CONFLICT 0x21 +#define IVPU_MMU_EVT_E_PAGE_REQUEST 0x24 +#define IVPU_MMU_EVT_F_VMS_FETCH 0x25 + +#define IVPU_MMU_EVT_OP_MASK GENMASK_ULL(7, 0) +#define IVPU_MMU_EVT_SSID_MASK GENMASK_ULL(31, 12) + +#define IVPU_MMU_Q_BASE_RWA BIT(62) +#define IVPU_MMU_Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) +#define IVPU_MMU_STRTAB_BASE_RA BIT(62) +#define IVPU_MMU_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6) + +#define IVPU_MMU_IRQ_EVTQ_EN BIT(2) +#define IVPU_MMU_IRQ_GERROR_EN BIT(0) + +#define IVPU_MMU_CR0_ATSCHK BIT(4) +#define IVPU_MMU_CR0_CMDQEN BIT(3) +#define IVPU_MMU_CR0_EVTQEN BIT(2) +#define IVPU_MMU_CR0_PRIQEN BIT(1) +#define IVPU_MMU_CR0_SMMUEN BIT(0) + +#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10) +#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8) +#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6) +#define IVPU_MMU_CR1_QUEUE_SH GENMASK(5, 4) +#define IVPU_MMU_CR1_QUEUE_OC GENMASK(3, 2) +#define IVPU_MMU_CR1_QUEUE_IC GENMASK(1, 0) +#define IVPU_MMU_CACHE_NC 0 +#define IVPU_MMU_CACHE_WB 1 +#define IVPU_MMU_CACHE_WT 2 +#define IVPU_MMU_SH_NSH 0 +#define IVPU_MMU_SH_OSH 2 +#define IVPU_MMU_SH_ISH 3 + +#define IVPU_MMU_CMDQ_OP GENMASK_ULL(7, 0) + +#define IVPU_MMU_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) +#define IVPU_MMU_CD_0_TCR_TG0 GENMASK_ULL(7, 6) +#define IVPU_MMU_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) +#define IVPU_MMU_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10) +#define IVPU_MMU_CD_0_TCR_SH0 GENMASK_ULL(13, 12) +#define IVPU_MMU_CD_0_TCR_EPD0 BIT_ULL(14) +#define IVPU_MMU_CD_0_TCR_EPD1 BIT_ULL(30) +#define IVPU_MMU_CD_0_ENDI BIT(15) +#define IVPU_MMU_CD_0_V BIT(31) +#define IVPU_MMU_CD_0_TCR_IPS GENMASK_ULL(34, 32) +#define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38) +#define IVPU_MMU_CD_0_AA64 BIT(41) +#define IVPU_MMU_CD_0_S BIT(44) +#define IVPU_MMU_CD_0_R BIT(45) +#define IVPU_MMU_CD_0_A BIT(46) +#define IVPU_MMU_CD_0_ASET BIT(47) +#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48) + +#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4) + +#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59) +#define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4) +#define IVPU_MMU_STE_0_S1FMT_LINEAR 0 +#define IVPU_MMU_STE_DWORDS 8 +#define IVPU_MMU_STE_0_CFG_S1_TRANS 5 +#define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1) +#define IVPU_MMU_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6) +#define IVPU_MMU_STE_0_V BIT(0) + +#define IVPU_MMU_STE_1_STRW_NSEL1 0ul +#define IVPU_MMU_STE_1_CONT GENMASK_ULL(16, 13) +#define IVPU_MMU_STE_1_STRW GENMASK_ULL(31, 30) +#define IVPU_MMU_STE_1_PRIVCFG GENMASK_ULL(49, 48) +#define IVPU_MMU_STE_1_PRIVCFG_UNPRIV 2ul +#define IVPU_MMU_STE_1_INSTCFG GENMASK_ULL(51, 50) +#define IVPU_MMU_STE_1_INSTCFG_DATA 2ul +#define IVPU_MMU_STE_1_MEV BIT(19) +#define IVPU_MMU_STE_1_S1STALLD BIT(27) +#define IVPU_MMU_STE_1_S1C_CACHE_NC 0ul +#define IVPU_MMU_STE_1_S1C_CACHE_WBRA 1ul +#define IVPU_MMU_STE_1_S1C_CACHE_WT 2ul +#define IVPU_MMU_STE_1_S1C_CACHE_WB 3ul +#define IVPU_MMU_STE_1_S1CIR GENMASK_ULL(3, 2) +#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4) +#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6) +#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0) +#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0 + +#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC) +#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC) + +#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \ + (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \ + (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \ + (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \ + (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \ + (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \ + (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT))) + +static char *ivpu_mmu_event_to_str(u32 cmd) +{ + switch (cmd) { + case IVPU_MMU_EVT_F_UUT: + return "Unsupported Upstream Transaction"; + case IVPU_MMU_EVT_C_BAD_STREAMID: + return "Transaction StreamID out of range"; + case IVPU_MMU_EVT_F_STE_FETCH: + return "Fetch of STE caused external abort"; + case IVPU_MMU_EVT_C_BAD_STE: + return "Used STE invalid"; + case IVPU_MMU_EVT_F_BAD_ATS_TREQ: + return "Address Request disallowed for a StreamID"; + case IVPU_MMU_EVT_F_STREAM_DISABLED: + return "Transaction marks non-substream disabled"; + case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN: + return "MMU bypass is disallowed for this StreamID"; + case IVPU_MMU_EVT_C_BAD_SUBSTREAMID: + return "Invalid StreamID"; + case IVPU_MMU_EVT_F_CD_FETCH: + return "Fetch of CD caused external abort"; + case IVPU_MMU_EVT_C_BAD_CD: + return "Fetched CD invalid"; + case IVPU_MMU_EVT_F_WALK_EABT: + return " An external abort occurred fetching a TLB"; + case IVPU_MMU_EVT_F_TRANSLATION: + return "Translation fault"; + case IVPU_MMU_EVT_F_ADDR_SIZE: + return " Output address caused address size fault"; + case IVPU_MMU_EVT_F_ACCESS: + return "Access flag fault"; + case IVPU_MMU_EVT_F_PERMISSION: + return "Permission fault occurred on page access"; + case IVPU_MMU_EVT_F_TLB_CONFLICT: + return "A TLB conflict"; + case IVPU_MMU_EVT_F_CFG_CONFLICT: + return "A configuration cache conflict"; + case IVPU_MMU_EVT_E_PAGE_REQUEST: + return "Page request hint from a client device"; + case IVPU_MMU_EVT_F_VMS_FETCH: + return "Fetch of VMS caused external abort"; + default: + return "Unknown CMDQ command"; + } +} + +static void ivpu_mmu_config_check(struct ivpu_device *vdev) +{ + u32 val_ref; + u32 val; + + if (ivpu_is_simics(vdev)) + val_ref = IVPU_MMU_IDR0_REF_SIMICS; + else + val_ref = IVPU_MMU_IDR0_REF; + + val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0); + if (val != val_ref) + ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref); + + val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1); + if (val != IVPU_MMU_IDR1_REF) + ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF); + + val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3); + if (val != IVPU_MMU_IDR3_REF) + ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF); + + if (ivpu_is_simics(vdev)) + val_ref = IVPU_MMU_IDR5_REF_SIMICS; + else if (ivpu_is_fpga(vdev)) + val_ref = IVPU_MMU_IDR5_REF_FPGA; + else + val_ref = IVPU_MMU_IDR5_REF; + + val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5); + if (val != val_ref) + ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref); +} + +static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; + size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE; + + cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL); + if (!cdtab->base) + return -ENOMEM; + + ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size); + + return 0; +} + +static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + struct ivpu_mmu_strtab *strtab = &mmu->strtab; + size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE; + + strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL); + if (!strtab->base) + return -ENOMEM; + + strtab->base_cfg = IVPU_MMU_STRTAB_CFG; + strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA; + strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK; + + ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n", + &strtab->dma, &strtab->dma_q, size); + + return 0; +} + +static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + struct ivpu_mmu_queue *q = &mmu->cmdq; + + q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL); + if (!q->base) + return -ENOMEM; + + q->dma_q = IVPU_MMU_Q_BASE_RWA; + q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; + q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; + + ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", + &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE); + + return 0; +} + +static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + struct ivpu_mmu_queue *q = &mmu->evtq; + + q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL); + if (!q->base) + return -ENOMEM; + + q->dma_q = IVPU_MMU_Q_BASE_RWA; + q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; + q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; + + ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n", + &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE); + + return 0; +} + +static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_mmu_cdtab_alloc(vdev); + if (ret) { + ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_strtab_alloc(vdev); + if (ret) { + ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_cmdq_alloc(vdev); + if (ret) { + ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_evtq_alloc(vdev); + if (ret) + ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret); + + return ret; +} + +static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val) +{ + u32 reg_ack = reg + 4; /* ACK register is 4B after base register */ + u32 val_ack; + int ret; + + REGV_WR32(reg, val); + + ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US); + if (ret) + ivpu_err(vdev, "Failed to write register 0x%x\n", reg); + + return ret; +} + +static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev) +{ + u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN; + int ret; + + ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0); + if (ret) + return ret; + + return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl); +} + +static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) +{ + struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; + + return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), + IVPU_MMU_QUEUE_TIMEOUT_US); +} + +static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1) +{ + struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; + u64 *queue_buffer = q->base; + int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); + + if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) { + ivpu_err(vdev, "Failed to write MMU CMD %s\n", name); + return -EBUSY; + } + + queue_buffer[idx] = data0; + queue_buffer[idx + 1] = data1; + q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK; + + ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1); + + return 0; +} + +static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) +{ + struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; + u64 val; + int ret; + + val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) | + FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) | + FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) | + FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf); + + ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0); + if (ret) + return ret; + + clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); + REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod); + + ret = ivpu_mmu_cmdq_wait_for_cons(vdev); + if (ret) + ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret); + + return ret; +} + +static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev) +{ + u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL); + u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f); + + return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1); +} + +static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid) +{ + u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) | + FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid); + + return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0); +} + +static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev) +{ + u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL); + + return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0); +} + +static int ivpu_mmu_reset(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + u32 val; + int ret; + + memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); + clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); + mmu->cmdq.prod = 0; + mmu->cmdq.cons = 0; + + memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); + clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE); + mmu->evtq.prod = 0; + mmu->evtq.cons = 0; + + ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0); + if (ret) + return ret; + + val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) | + FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) | + FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) | + FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) | + FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) | + FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB); + REGV_WR32(MTL_VPU_HOST_MMU_CR1, val); + + REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q); + REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg); + + REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); + REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0); + REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0); + + val = IVPU_MMU_CR0_CMDQEN; + ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + if (ret) + return ret; + + ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); + if (ret) + return ret; + + ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev); + if (ret) + return ret; + + ret = ivpu_mmu_cmdq_sync(vdev); + if (ret) + return ret; + + REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q); + REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0); + REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0); + + val |= IVPU_MMU_CR0_EVTQEN; + ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + if (ret) + return ret; + + val |= IVPU_MMU_CR0_ATSCHK; + ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + if (ret) + return ret; + + ret = ivpu_mmu_irqs_setup(vdev); + if (ret) + return ret; + + val |= IVPU_MMU_CR0_SMMUEN; + return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); +} + +static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + struct ivpu_mmu_strtab *strtab = &mmu->strtab; + struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; + u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE); + u64 str[2]; + + str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) | + FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) | + FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) | + IVPU_MMU_STE_0_V | + (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK); + + str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) | + FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) | + FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) | + FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) | + FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) | + FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) | + FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) | + FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) | + IVPU_MMU_STE_1_MEV | + IVPU_MMU_STE_1_S1STALLD; + + WRITE_ONCE(entry[1], str[1]); + WRITE_ONCE(entry[0], str[0]); + + clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE); + + ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]); +} + +static int ivpu_mmu_strtab_init(struct ivpu_device *vdev) +{ + ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0); + ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3); + + return 0; +} + +int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + int ret; + + ret = mutex_lock_interruptible(&mmu->lock); + if (ret) + return ret; + + if (!mmu->on) { + ret = 0; + goto unlock; + } + + ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid); + if (ret) + goto unlock; + + ret = ivpu_mmu_cmdq_sync(vdev); +unlock: + mutex_unlock(&mmu->lock); + return ret; +} + +static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; + u64 *entry; + u64 cd[4]; + int ret; + + if (ssid > IVPU_MMU_CDTAB_ENT_COUNT) + return -EINVAL; + + entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE); + + if (cd_dma != 0) { + cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) | + FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) | + IVPU_MMU_CD_0_TCR_EPD1 | + IVPU_MMU_CD_0_AA64 | + IVPU_MMU_CD_0_R | + IVPU_MMU_CD_0_ASET | + IVPU_MMU_CD_0_V; + cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK; + cd[2] = 0; + cd[3] = 0x0000000000007444; + + /* For global context generate memory fault on VPU */ + if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) + cd[0] |= IVPU_MMU_CD_0_A; + } else { + memset(cd, 0, sizeof(cd)); + } + + WRITE_ONCE(entry[1], cd[1]); + WRITE_ONCE(entry[2], cd[2]); + WRITE_ONCE(entry[3], cd[3]); + WRITE_ONCE(entry[0], cd[0]); + + clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE); + + ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]); + + ret = mutex_lock_interruptible(&mmu->lock); + if (ret) + return ret; + + if (!mmu->on) { + ret = 0; + goto unlock; + } + + ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); + if (ret) + goto unlock; + + ret = ivpu_mmu_cmdq_sync(vdev); +unlock: + mutex_unlock(&mmu->lock); + return ret; +} + +static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma); + if (ret) + ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret); + + return ret; +} + +static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma) +{ + int ret; + + if (ssid == 0) { + ivpu_err(vdev, "Invalid SSID: %u\n", ssid); + return -EINVAL; + } + + ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma); + if (ret) + ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret); + + return ret; +} + +int ivpu_mmu_init(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + int ret; + + ivpu_dbg(vdev, MMU, "Init..\n"); + + drmm_mutex_init(&vdev->drm, &mmu->lock); + ivpu_mmu_config_check(vdev); + + ret = ivpu_mmu_structs_alloc(vdev); + if (ret) + return ret; + + ret = ivpu_mmu_strtab_init(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_cd_add_gbl(vdev); + if (ret) { + ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to resume MMU: %d\n", ret); + return ret; + } + + ivpu_dbg(vdev, MMU, "Init done\n"); + + return 0; +} + +int ivpu_mmu_enable(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + int ret; + + mutex_lock(&mmu->lock); + + mmu->on = true; + + ret = ivpu_mmu_reset(vdev); + if (ret) { + ivpu_err(vdev, "Failed to reset MMU: %d\n", ret); + goto err; + } + + ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); + if (ret) + goto err; + + ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev); + if (ret) + goto err; + + ret = ivpu_mmu_cmdq_sync(vdev); + if (ret) + goto err; + + mutex_unlock(&mmu->lock); + + return 0; +err: + mmu->on = false; + mutex_unlock(&mmu->lock); + return ret; +} + +void ivpu_mmu_disable(struct ivpu_device *vdev) +{ + struct ivpu_mmu_info *mmu = vdev->mmu; + + mutex_lock(&mmu->lock); + mmu->on = false; + mutex_unlock(&mmu->lock); +} + +static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event) +{ + u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]); + u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]); + u64 fetch_addr = ((u64)event[7]) << 32 | event[6]; + u64 in_addr = ((u64)event[5]) << 32 | event[4]; + u32 sid = event[1]; + + ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n", + op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr); +} + +static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) +{ + struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq; + u32 idx = IVPU_MMU_Q_IDX(evtq->cons); + u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); + + evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC); + if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) + return NULL; + + clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); + + evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; + REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); + + return evt; +} + +void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) +{ + bool schedule_recovery = false; + u32 *event; + u32 ssid; + + ivpu_dbg(vdev, IRQ, "MMU event queue\n"); + + while ((event = ivpu_mmu_get_event(vdev)) != NULL) { + ivpu_mmu_dump_event(vdev, event); + + ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]); + if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) + schedule_recovery = true; + else + ivpu_mmu_user_context_mark_invalid(vdev, ssid); + } + + if (schedule_recovery) + ivpu_pm_schedule_recovery(vdev); +} + +void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) +{ + u32 gerror_val, gerrorn_val, active; + + ivpu_dbg(vdev, IRQ, "MMU error\n"); + + gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR); + gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN); + + active = gerror_val ^ gerrorn_val; + if (!(active & IVPU_MMU_GERROR_ERR_MASK)) + return; + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active)) + ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active)) + ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active)) + ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active)) + ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active)) + ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active)) + ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n"); + + if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active)) + ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); + + REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val); +} + +int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) +{ + return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma); +} + +void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid) +{ + ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */ +} diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h new file mode 100644 index 00000000000000..cb551126806baa --- /dev/null +++ b/drivers/accel/ivpu/ivpu_mmu.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_MMU_H__ +#define __IVPU_MMU_H__ + +struct ivpu_device; + +struct ivpu_mmu_cdtab { + void *base; + dma_addr_t dma; +}; + +struct ivpu_mmu_strtab { + void *base; + dma_addr_t dma; + u64 dma_q; + u32 base_cfg; +}; + +struct ivpu_mmu_queue { + void *base; + dma_addr_t dma; + u64 dma_q; + u32 prod; + u32 cons; +}; + +struct ivpu_mmu_info { + struct mutex lock; /* Protects cdtab, strtab, cmdq, on */ + struct ivpu_mmu_cdtab cdtab; + struct ivpu_mmu_strtab strtab; + struct ivpu_mmu_queue cmdq; + struct ivpu_mmu_queue evtq; + bool on; +}; + +int ivpu_mmu_init(struct ivpu_device *vdev); +void ivpu_mmu_disable(struct ivpu_device *vdev); +int ivpu_mmu_enable(struct ivpu_device *vdev); +int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable); +void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid); +int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid); + +void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev); +void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev); + +#endif /* __IVPU_MMU_H__ */ diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c new file mode 100644 index 00000000000000..8ce9b12ac35668 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_hw.h" +#include "ivpu_mmu.h" +#include "ivpu_mmu_context.h" + +#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30) +#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) +#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) +#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0) +#define IVPU_MMU_ENTRY_FLAG_NG BIT(11) +#define IVPU_MMU_ENTRY_FLAG_AF BIT(10) +#define IVPU_MMU_ENTRY_FLAG_USER BIT(6) +#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) +#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) +#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) + +#define IVPU_MMU_PAGE_SIZE SZ_4K +#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) +#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) +#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) + +#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 +#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) +#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK) +#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ + IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) + +static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) +{ + dma_addr_t pgd_dma; + u64 *pgd; + + pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL); + if (!pgd) + return -ENOMEM; + + pgtable->pgd = pgd; + pgtable->pgd_dma = pgd_dma; + + return 0; +} + +static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) +{ + int pgd_index, pmd_index; + + for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) { + u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index]; + u64 *pmd = pgtable->pgd_entries[pgd_index]; + + if (!pmd_entries) + continue; + + for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) { + if (pmd_entries[pmd_index]) + dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, + pmd_entries[pmd_index], + pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK); + } + + kfree(pmd_entries); + dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index], + pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK); + } + + dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd, + pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK); +} + +static u64* +ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index) +{ + u64 **pmd_entries; + dma_addr_t pmd_dma; + u64 *pmd; + + if (pgtable->pgd_entries[pgd_index]) + return pgtable->pgd_entries[pgd_index]; + + pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL); + if (!pmd) + return NULL; + + pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); + if (!pmd_entries) + goto err_free_pgd; + + pgtable->pgd_entries[pgd_index] = pmd; + pgtable->pgd_cpu_entries[pgd_index] = pmd_entries; + pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID; + + return pmd; + +err_free_pgd: + dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma); + return NULL; +} + +static u64* +ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, + int pgd_index, int pmd_index) +{ + dma_addr_t pte_dma; + u64 *pte; + + if (pgtable->pgd_cpu_entries[pgd_index][pmd_index]) + return pgtable->pgd_cpu_entries[pgd_index][pmd_index]; + + pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL); + if (!pte) + return NULL; + + pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte; + pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID; + + return pte; +} + +static int +ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, dma_addr_t dma_addr, int prot) +{ + u64 *pte; + int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); + int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); + int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); + + /* Allocate PMD - second level page table if needed */ + if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index)) + return -ENOMEM; + + /* Allocate PTE - third level page table if needed */ + pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index); + if (!pte) + return -ENOMEM; + + /* Update PTE - third level page table with DMA address */ + pte[pte_index] = dma_addr | prot; + + return 0; +} + +static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) +{ + int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); + int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); + int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); + + /* Update PTE with dummy physical address and clear flags */ + ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID; +} + +static void +ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) +{ + u64 end_addr = vpu_addr + size; + u64 *pgd = ctx->pgtable.pgd; + + /* Align to PMD entry (2 MB) */ + vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1); + + while (vpu_addr < end_addr) { + int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); + u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE; + u64 *pmd = ctx->pgtable.pgd_entries[pgd_index]; + + while (vpu_addr < end_addr && vpu_addr < pmd_end) { + int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); + u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index]; + + clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE); + vpu_addr += IVPU_MMU_PTE_MAP_SIZE; + } + clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE); + } + clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE); +} + +static int +ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot) +{ + while (size) { + int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); + + if (ret) + return ret; + + vpu_addr += IVPU_MMU_PAGE_SIZE; + dma_addr += IVPU_MMU_PAGE_SIZE; + size -= IVPU_MMU_PAGE_SIZE; + } + + return 0; +} + +static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) +{ + while (size) { + ivpu_mmu_context_unmap_page(ctx, vpu_addr); + vpu_addr += IVPU_MMU_PAGE_SIZE; + size -= IVPU_MMU_PAGE_SIZE; + } +} + +int +ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) +{ + struct scatterlist *sg; + int prot; + int ret; + u64 i; + + if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) + return -EINVAL; + /* + * VPU is only 32 bit, but DMA engine is 38 bit + * Ranges < 2 GB are reserved for VPU internal registers + * Limit range to 8 GB + */ + if (vpu_addr < SZ_2G || vpu_addr > SZ_8G) + return -EINVAL; + + prot = IVPU_MMU_ENTRY_MAPPED; + if (llc_coherent) + prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; + + mutex_lock(&ctx->lock); + + for_each_sgtable_dma_sg(sgt, sg, i) { + u64 dma_addr = sg_dma_address(sg) - sg->offset; + size_t size = sg_dma_len(sg) + sg->offset; + + ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); + if (ret) { + ivpu_err(vdev, "Failed to map context pages\n"); + mutex_unlock(&ctx->lock); + return ret; + } + ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size); + vpu_addr += size; + } + + mutex_unlock(&ctx->lock); + + ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); + if (ret) + ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); + return ret; +} + +void +ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, struct sg_table *sgt) +{ + struct scatterlist *sg; + int ret; + u64 i; + + if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) + ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr); + + mutex_lock(&ctx->lock); + + for_each_sgtable_dma_sg(sgt, sg, i) { + size_t size = sg_dma_len(sg) + sg->offset; + + ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); + ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size); + vpu_addr += size; + } + + mutex_unlock(&ctx->lock); + + ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); + if (ret) + ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); +} + +int +ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, + const struct ivpu_addr_range *range, + u64 size, struct drm_mm_node *node) +{ + lockdep_assert_held(&ctx->lock); + + return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, + 0, range->start, range->end, DRM_MM_INSERT_BEST); +} + +void +ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) +{ + lockdep_assert_held(&ctx->lock); + + drm_mm_remove_node(node); +} + +static int +ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) +{ + u64 start, end; + int ret; + + mutex_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->bo_list); + + ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); + if (ret) + return ret; + + if (!context_id) { + start = vdev->hw->ranges.global_low.start; + end = vdev->hw->ranges.global_high.end; + } else { + start = vdev->hw->ranges.user_low.start; + end = vdev->hw->ranges.user_high.end; + } + + drm_mm_init(&ctx->mm, start, end - start); + ctx->id = context_id; + + return 0; +} + +static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) +{ + drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd); + + mutex_destroy(&ctx->lock); + ivpu_mmu_pgtable_free(vdev, &ctx->pgtable); + drm_mm_takedown(&ctx->mm); +} + +int ivpu_mmu_global_context_init(struct ivpu_device *vdev) +{ + return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); +} + +void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) +{ + return ivpu_mmu_context_fini(vdev, &vdev->gctx); +} + +void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) +{ + struct ivpu_file_priv *file_priv; + + xa_lock(&vdev->context_xa); + + file_priv = xa_load(&vdev->context_xa, ssid); + if (file_priv) + file_priv->has_mmu_faults = true; + + xa_unlock(&vdev->context_xa); +} + +int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id) +{ + int ret; + + drm_WARN_ON(&vdev->drm, !ctx_id); + + ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); + if (ret) { + ivpu_err(vdev, "Failed to initialize context: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); + if (ret) { + ivpu_err(vdev, "Failed to set page table: %d\n", ret); + goto err_context_fini; + } + + return 0; + +err_context_fini: + ivpu_mmu_context_fini(vdev, ctx); + return ret; +} + +void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) +{ + drm_WARN_ON(&vdev->drm, !ctx->id); + + ivpu_mmu_clear_pgtable(vdev, ctx->id); + ivpu_mmu_context_fini(vdev, ctx); +} diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h new file mode 100644 index 00000000000000..ddf11b95023a0e --- /dev/null +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_MMU_CONTEXT_H__ +#define __IVPU_MMU_CONTEXT_H__ + +#include + +struct ivpu_device; +struct ivpu_file_priv; +struct ivpu_addr_range; + +#define IVPU_MMU_PGTABLE_ENTRIES 512 + +struct ivpu_mmu_pgtable { + u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES]; + u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES]; + u64 *pgd; + dma_addr_t pgd_dma; +}; + +struct ivpu_mmu_context { + struct mutex lock; /* protects: mm, pgtable, bo_list */ + struct drm_mm mm; + struct ivpu_mmu_pgtable pgtable; + struct list_head bo_list; + u32 id; +}; + +int ivpu_mmu_global_context_init(struct ivpu_device *vdev); +void ivpu_mmu_global_context_fini(struct ivpu_device *vdev); + +int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id); +void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); +void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid); + +int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, + const struct ivpu_addr_range *range, + u64 size, struct drm_mm_node *node); +void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, + struct drm_mm_node *node); + +int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); +void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, struct sg_table *sgt); + +#endif /* __IVPU_MMU_CONTEXT_H__ */ diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c new file mode 100644 index 00000000000000..553bcbd787b3c8 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include "vpu_boot_api.h" +#include "ivpu_drv.h" +#include "ivpu_hw.h" +#include "ivpu_fw.h" +#include "ivpu_ipc.h" +#include "ivpu_job.h" +#include "ivpu_mmu.h" +#include "ivpu_pm.h" + +static bool ivpu_disable_recovery; +module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644); +MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected"); + +#define PM_RESCHEDULE_LIMIT 5 + +static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + + ivpu_cmdq_reset_all_contexts(vdev); + ivpu_ipc_reset(vdev); + ivpu_fw_load(vdev); + fw->entry_point = fw->cold_boot_entry_point; +} + +static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) +{ + struct ivpu_fw_info *fw = vdev->fw; + struct vpu_boot_params *bp = fw->mem->kvaddr; + + if (!bp->save_restore_ret_address) { + ivpu_pm_prepare_cold_boot(vdev); + return; + } + + ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address); + fw->entry_point = bp->save_restore_ret_address; +} + +static int ivpu_suspend(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_shutdown(vdev); + if (ret) { + ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); + return ret; + } + + return ret; +} + +static int ivpu_resume(struct ivpu_device *vdev) +{ + int ret; + +retry: + ret = ivpu_hw_power_up(vdev); + if (ret) { + ivpu_err(vdev, "Failed to power up HW: %d\n", ret); + return ret; + } + + ret = ivpu_mmu_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to resume MMU: %d\n", ret); + ivpu_hw_power_down(vdev); + return ret; + } + + ret = ivpu_boot(vdev); + if (ret) { + ivpu_mmu_disable(vdev); + ivpu_hw_power_down(vdev); + if (!ivpu_fw_is_cold_boot(vdev)) { + ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret); + ivpu_pm_prepare_cold_boot(vdev); + goto retry; + } else { + ivpu_err(vdev, "Failed to resume the FW: %d\n", ret); + } + } + + return ret; +} + +static void ivpu_pm_recovery_work(struct work_struct *work) +{ + struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work); + struct ivpu_device *vdev = pm->vdev; + char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL}; + int ret; + + ret = pci_reset_function(to_pci_dev(vdev->drm.dev)); + if (ret) + ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); + + kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); +} + +void ivpu_pm_schedule_recovery(struct ivpu_device *vdev) +{ + struct ivpu_pm_info *pm = vdev->pm; + + if (ivpu_disable_recovery) { + ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n"); + return; + } + + if (ivpu_is_fpga(vdev)) { + ivpu_err(vdev, "Recovery not available on FPGA\n"); + return; + } + + /* Schedule recovery if it's not in progress */ + if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) { + ivpu_hw_irq_disable(vdev); + queue_work(system_long_wq, &pm->recovery_work); + } +} + +int ivpu_pm_suspend_cb(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + int ret; + + ivpu_dbg(vdev, PM, "Suspend..\n"); + + ret = ivpu_suspend(vdev); + if (ret && vdev->pm->suspend_reschedule_counter) { + ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", + vdev->pm->suspend_reschedule_counter); + pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); + vdev->pm->suspend_reschedule_counter--; + return -EBUSY; + } else if (!vdev->pm->suspend_reschedule_counter) { + ivpu_warn(vdev, "Failed to enter idle, force suspend\n"); + ivpu_pm_prepare_cold_boot(vdev); + } else { + ivpu_pm_prepare_warm_boot(vdev); + } + + vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; + + pci_save_state(to_pci_dev(dev)); + pci_set_power_state(to_pci_dev(dev), PCI_D3hot); + + ivpu_dbg(vdev, PM, "Suspend done.\n"); + + return ret; +} + +int ivpu_pm_resume_cb(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + int ret; + + ivpu_dbg(vdev, PM, "Resume..\n"); + + pci_set_power_state(to_pci_dev(dev), PCI_D0); + pci_restore_state(to_pci_dev(dev)); + + ret = ivpu_resume(vdev); + if (ret) + ivpu_err(vdev, "Failed to resume: %d\n", ret); + + ivpu_dbg(vdev, PM, "Resume done.\n"); + + return ret; +} + +int ivpu_pm_runtime_suspend_cb(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + int ret; + + ivpu_dbg(vdev, PM, "Runtime suspend..\n"); + + if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { + ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", + vdev->pm->suspend_reschedule_counter); + pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); + vdev->pm->suspend_reschedule_counter--; + return -EAGAIN; + } + + ret = ivpu_suspend(vdev); + if (ret) + ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret); + + if (!vdev->pm->suspend_reschedule_counter) { + ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n"); + ivpu_pm_prepare_cold_boot(vdev); + } else { + ivpu_pm_prepare_warm_boot(vdev); + } + + vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; + + ivpu_dbg(vdev, PM, "Runtime suspend done.\n"); + + return 0; +} + +int ivpu_pm_runtime_resume_cb(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + int ret; + + ivpu_dbg(vdev, PM, "Runtime resume..\n"); + + ret = ivpu_resume(vdev); + if (ret) + ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); + + ivpu_dbg(vdev, PM, "Runtime resume done.\n"); + + return ret; +} + +int ivpu_rpm_get(struct ivpu_device *vdev) +{ + int ret; + + ivpu_dbg(vdev, RPM, "rpm_get count %d\n", atomic_read(&vdev->drm.dev->power.usage_count)); + + ret = pm_runtime_resume_and_get(vdev->drm.dev); + if (!drm_WARN_ON(&vdev->drm, ret < 0)) + vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; + + return ret; +} + +void ivpu_rpm_put(struct ivpu_device *vdev) +{ + ivpu_dbg(vdev, RPM, "rpm_put count %d\n", atomic_read(&vdev->drm.dev->power.usage_count)); + + pm_runtime_mark_last_busy(vdev->drm.dev); + pm_runtime_put_autosuspend(vdev->drm.dev); +} + +void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) +{ + struct ivpu_device *vdev = pci_get_drvdata(pdev); + + pm_runtime_get_sync(vdev->drm.dev); + + ivpu_dbg(vdev, PM, "Pre-reset..\n"); + atomic_set(&vdev->pm->in_reset, 1); + ivpu_shutdown(vdev); + ivpu_pm_prepare_cold_boot(vdev); + ivpu_jobs_abort_all(vdev); + ivpu_dbg(vdev, PM, "Pre-reset done.\n"); +} + +void ivpu_pm_reset_done_cb(struct pci_dev *pdev) +{ + struct ivpu_device *vdev = pci_get_drvdata(pdev); + int ret; + + ivpu_dbg(vdev, PM, "Post-reset..\n"); + ret = ivpu_resume(vdev); + if (ret) + ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); + atomic_set(&vdev->pm->in_reset, 0); + ivpu_dbg(vdev, PM, "Post-reset done.\n"); + + pm_runtime_put_autosuspend(vdev->drm.dev); +} + +int ivpu_pm_init(struct ivpu_device *vdev) +{ + struct device *dev = vdev->drm.dev; + struct ivpu_pm_info *pm = vdev->pm; + + pm->vdev = vdev; + pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; + + atomic_set(&pm->in_reset, 0); + INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); + + pm_runtime_use_autosuspend(dev); + + if (ivpu_disable_recovery) + pm_runtime_set_autosuspend_delay(dev, -1); + else if (ivpu_is_silicon(vdev)) + pm_runtime_set_autosuspend_delay(dev, 100); + else + pm_runtime_set_autosuspend_delay(dev, 60000); + + return 0; +} + +void ivpu_pm_enable(struct ivpu_device *vdev) +{ + struct device *dev = vdev->drm.dev; + + pm_runtime_set_active(dev); + pm_runtime_allow(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + ivpu_dbg(vdev, RPM, "Enable RPM count %d\n", atomic_read(&dev->power.usage_count)); +} + +void ivpu_pm_disable(struct ivpu_device *vdev) +{ + struct device *dev = vdev->drm.dev; + + ivpu_dbg(vdev, RPM, "Disable RPM count %d\n", atomic_read(&dev->power.usage_count)); + + pm_runtime_get_noresume(vdev->drm.dev); + pm_runtime_forbid(vdev->drm.dev); +} diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h new file mode 100644 index 00000000000000..dc1b3758e13f4a --- /dev/null +++ b/drivers/accel/ivpu/ivpu_pm.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_PM_H__ +#define __IVPU_PM_H__ + +#include + +struct ivpu_device; + +struct ivpu_pm_info { + struct ivpu_device *vdev; + struct work_struct recovery_work; + atomic_t in_reset; + bool is_warmboot; + u32 suspend_reschedule_counter; +}; + +int ivpu_pm_init(struct ivpu_device *vdev); +void ivpu_pm_enable(struct ivpu_device *vdev); +void ivpu_pm_disable(struct ivpu_device *vdev); + +int ivpu_pm_suspend_cb(struct device *dev); +int ivpu_pm_resume_cb(struct device *dev); +int ivpu_pm_runtime_suspend_cb(struct device *dev); +int ivpu_pm_runtime_resume_cb(struct device *dev); + +void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev); +void ivpu_pm_reset_done_cb(struct pci_dev *pdev); + +int __must_check ivpu_rpm_get(struct ivpu_device *vdev); +void ivpu_rpm_put(struct ivpu_device *vdev); + +void ivpu_pm_schedule_recovery(struct ivpu_device *vdev); + +#endif /* __IVPU_PM_H__ */ diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h new file mode 100644 index 00000000000000..6b71be92ba6538 --- /dev/null +++ b/drivers/accel/ivpu/vpu_boot_api.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef VPU_BOOT_API_H +#define VPU_BOOT_API_H + +/* + * =========== FW API version information beginning ================ + * The bellow values will be used to construct the version info this way: + * fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) | + * VPU_BOOT_API_VER_MINOR; + * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes. + */ + +/* + * Major version changes that break backward compatibility. + * Major version must start from 1 and can only be incremented. + */ +#define VPU_BOOT_API_VER_MAJOR 3 + +/* + * Minor version changes when API backward compatibility is preserved. + * Resets to 0 if Major version is incremented. + */ +#define VPU_BOOT_API_VER_MINOR 12 + +/* + * API header changed (field names, documentation, formatting) but API itself has not been changed + */ +#define VPU_BOOT_API_VER_PATCH 2 + +/* + * Index in the API version table + * Must be unique for each API + */ +#define VPU_BOOT_API_VER_INDEX 0 +/* ------------ FW API version information end ---------------------*/ + +#pragma pack(push, 1) + +/* + * Firmware image header format + */ +#define VPU_FW_HEADER_SIZE 4096 +#define VPU_FW_HEADER_VERSION 0x1 +#define VPU_FW_VERSION_SIZE 32 +#define VPU_FW_API_VER_NUM 16 + +struct vpu_firmware_header { + u32 header_version; + u32 image_format; + u64 image_load_address; + u32 image_size; + u64 entry_point; + u8 vpu_version[VPU_FW_VERSION_SIZE]; + u32 compression_type; + u64 firmware_version_load_address; + u32 firmware_version_size; + u64 boot_params_load_address; + u32 api_version[VPU_FW_API_VER_NUM]; + /* Size of memory require for firmware execution */ + u32 runtime_size; + u32 shave_nn_fw_size; +}; + +/* + * Firmware boot parameters format + */ + +#define VPU_BOOT_PLL_COUNT 3 +#define VPU_BOOT_PLL_OUT_COUNT 4 + +/** Values for boot_type field */ +#define VPU_BOOT_TYPE_COLDBOOT 0 +#define VPU_BOOT_TYPE_WARMBOOT 1 + +/** Value for magic filed */ +#define VPU_BOOT_PARAMS_MAGIC 0x10000 + +/** VPU scheduling mode. By default, OS scheduling is used. */ +#define VPU_SCHEDULING_MODE_OS 0 +#define VPU_SCHEDULING_MODE_HW 1 + +enum VPU_BOOT_L2_CACHE_CFG_TYPE { + VPU_BOOT_L2_CACHE_CFG_UPA = 0, + VPU_BOOT_L2_CACHE_CFG_NN = 1, + VPU_BOOT_L2_CACHE_CFG_NUM = 2 +}; + +/** + * Logging destinations. + * + * Logging output can be directed to different logging destinations. This enum + * defines the list of logging destinations supported by the VPU firmware (NOTE: + * a specific VPU FW binary may support only a subset of such output + * destinations, depending on the target platform and compile options). + */ +enum vpu_trace_destination { + VPU_TRACE_DESTINATION_PIPEPRINT = 0x1, + VPU_TRACE_DESTINATION_VERBOSE_TRACING = 0x2, + VPU_TRACE_DESTINATION_NORTH_PEAK = 0x4, +}; + +/* + * Processor bit shifts (for loggable HW components). + */ +#define VPU_TRACE_PROC_BIT_ARM 0 +#define VPU_TRACE_PROC_BIT_LRT 1 +#define VPU_TRACE_PROC_BIT_LNN 2 +#define VPU_TRACE_PROC_BIT_SHV_0 3 +#define VPU_TRACE_PROC_BIT_SHV_1 4 +#define VPU_TRACE_PROC_BIT_SHV_2 5 +#define VPU_TRACE_PROC_BIT_SHV_3 6 +#define VPU_TRACE_PROC_BIT_SHV_4 7 +#define VPU_TRACE_PROC_BIT_SHV_5 8 +#define VPU_TRACE_PROC_BIT_SHV_6 9 +#define VPU_TRACE_PROC_BIT_SHV_7 10 +#define VPU_TRACE_PROC_BIT_SHV_8 11 +#define VPU_TRACE_PROC_BIT_SHV_9 12 +#define VPU_TRACE_PROC_BIT_SHV_10 13 +#define VPU_TRACE_PROC_BIT_SHV_11 14 +#define VPU_TRACE_PROC_BIT_SHV_12 15 +#define VPU_TRACE_PROC_BIT_SHV_13 16 +#define VPU_TRACE_PROC_BIT_SHV_14 17 +#define VPU_TRACE_PROC_BIT_SHV_15 18 +#define VPU_TRACE_PROC_BIT_ACT_SHV_0 19 +#define VPU_TRACE_PROC_BIT_ACT_SHV_1 20 +#define VPU_TRACE_PROC_BIT_ACT_SHV_2 21 +#define VPU_TRACE_PROC_BIT_ACT_SHV_3 22 +#define VPU_TRACE_PROC_NO_OF_HW_DEVS 23 + +/* KMB HW component IDs are sequential, so define first and last IDs. */ +#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_LRT +#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_SHV_15 + +struct vpu_boot_l2_cache_config { + u8 use; + u8 cfg; +}; + +struct vpu_warm_boot_section { + u32 src; + u32 dst; + u32 size; + u32 core_id; + u32 is_clear_op; +}; + +struct vpu_boot_params { + u32 magic; + u32 vpu_id; + u32 vpu_count; + u32 pad0[5]; + /* Clock frequencies: 0x20 - 0xFF */ + u32 frequency; + u32 pll[VPU_BOOT_PLL_COUNT][VPU_BOOT_PLL_OUT_COUNT]; + u32 perf_clk_frequency; + u32 pad1[42]; + /* Memory regions: 0x100 - 0x1FF */ + u64 ipc_header_area_start; + u32 ipc_header_area_size; + u64 shared_region_base; + u32 shared_region_size; + u64 ipc_payload_area_start; + u32 ipc_payload_area_size; + u64 global_aliased_pio_base; + u32 global_aliased_pio_size; + u32 autoconfig; + struct vpu_boot_l2_cache_config cache_defaults[VPU_BOOT_L2_CACHE_CFG_NUM]; + u64 global_memory_allocator_base; + u32 global_memory_allocator_size; + /** + * ShaveNN FW section VPU base address + * On VPU2.7 HW this address must be within 2GB range starting from L2C_PAGE_TABLE base + */ + u64 shave_nn_fw_base; + u64 save_restore_ret_address; /* stores the address of FW's restore entry point */ + u32 pad2[43]; + /* IRQ re-direct numbers: 0x200 - 0x2FF */ + s32 watchdog_irq_mss; + s32 watchdog_irq_nce; + /* ARM -> VPU doorbell interrupt. ARM is notifying VPU of async command or compute job. */ + u32 host_to_vpu_irq; + /* VPU -> ARM job done interrupt. VPU is notifying ARM of compute job completion. */ + u32 job_done_irq; + /* VPU -> ARM IRQ line to use to request MMU update. */ + u32 mmu_update_request_irq; + /* ARM -> VPU IRQ line to use to notify of MMU update completion. */ + u32 mmu_update_done_irq; + /* ARM -> VPU IRQ line to use to request power level change. */ + u32 set_power_level_irq; + /* VPU -> ARM IRQ line to use to notify of power level change completion. */ + u32 set_power_level_done_irq; + /* VPU -> ARM IRQ line to use to notify of VPU idle state change */ + u32 set_vpu_idle_update_irq; + /* VPU -> ARM IRQ line to use to request counter reset. */ + u32 metric_query_event_irq; + /* ARM -> VPU IRQ line to use to notify of counter reset completion. */ + u32 metric_query_event_done_irq; + /* VPU -> ARM IRQ line to use to notify of preemption completion. */ + u32 preemption_done_irq; + /* Padding. */ + u32 pad3[52]; + /* Silicon information: 0x300 - 0x3FF */ + u32 host_version_id; + u32 si_stepping; + u64 device_id; + u64 feature_exclusion; + u64 sku; + /** PLL ratio for minimum clock frequency */ + u32 min_freq_pll_ratio; + /** PLL ratio for maximum clock frequency */ + u32 max_freq_pll_ratio; + /** + * Initial log level threshold (messages with log level severity less than + * the threshold will not be logged); applies to every enabled logging + * destination and loggable HW component. See 'mvLog_t' enum for acceptable + * values. + */ + u32 default_trace_level; + u32 boot_type; + u64 punit_telemetry_sram_base; + u64 punit_telemetry_sram_size; + u32 vpu_telemetry_enable; + u64 crit_tracing_buff_addr; + u32 crit_tracing_buff_size; + u64 verbose_tracing_buff_addr; + u32 verbose_tracing_buff_size; + u64 verbose_tracing_sw_component_mask; /* TO BE REMOVED */ + /** + * Mask of destinations to which logging messages are delivered; bitwise OR + * of values defined in vpu_trace_destination enum. + */ + u32 trace_destination_mask; + /** + * Mask of hardware components for which logging is enabled; bitwise OR of + * bits defined by the VPU_TRACE_PROC_BIT_* macros. + */ + u64 trace_hw_component_mask; + /** Mask of trace message formats supported by the driver */ + u64 tracing_buff_message_format_mask; + u64 trace_reserved_1[2]; + /** + * Period at which the VPU reads the temp sensor values into MMIO, on + * platforms where that is necessary (in ms). 0 to disable reads. + */ + u32 temp_sensor_period_ms; + /** PLL ratio for efficient clock frequency */ + u32 pn_freq_pll_ratio; + u32 pad4[28]; + /* Warm boot information: 0x400 - 0x43F */ + u32 warm_boot_sections_count; + u32 warm_boot_start_address_reference; + u32 warm_boot_section_info_address_offset; + u32 pad5[13]; + /* Power States transitions timestamps: 0x440 - 0x46F*/ + struct { + /* VPU_IDLE -> VPU_ACTIVE transition initiated timestamp */ + u64 vpu_active_state_requested; + /* VPU_IDLE -> VPU_ACTIVE transition completed timestamp */ + u64 vpu_active_state_achieved; + /* VPU_ACTIVE -> VPU_IDLE transition initiated timestamp */ + u64 vpu_idle_state_requested; + /* VPU_ACTIVE -> VPU_IDLE transition completed timestamp */ + u64 vpu_idle_state_achieved; + /* VPU_IDLE -> VPU_STANDBY transition initiated timestamp */ + u64 vpu_standby_state_requested; + /* VPU_IDLE -> VPU_STANDBY transition completed timestamp */ + u64 vpu_standby_state_achieved; + } power_states_timestamps; + /* VPU scheduling mode. Values defined by VPU_SCHEDULING_MODE_* macros. */ + u32 vpu_scheduling_mode; + /* Present call period in milliseconds. */ + u32 vpu_focus_present_timer_ms; + /* Unused/reserved: 0x478 - 0xFFF */ + u32 pad6[738]; +}; + +/* + * Magic numbers set between host and vpu to detect corruptio of tracing init + */ + +#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE) + +/* Tracing buffer message format definitions */ +#define VPU_TRACING_FORMAT_STRING 0 +#define VPU_TRACING_FORMAT_MIPI 2 +/* + * Header of the tracing buffer. + * The below defined header will be stored at the beginning of + * each allocated tracing buffer, followed by a series of 256b + * of ASCII trace message entries. + */ +struct vpu_tracing_buffer_header { + /** + * Magic number set by host to detect corruption + * @see VPU_TRACING_BUFFER_CANARY + */ + u32 host_canary_start; + /* offset from start of buffer for trace entries */ + u32 read_index; + u32 pad_to_cache_line_size_0[14]; + /* End of first cache line */ + + /** + * Magic number set by host to detect corruption + * @see VPU_TRACING_BUFFER_CANARY + */ + u32 vpu_canary_start; + /* offset from start of buffer from write start */ + u32 write_index; + /* counter for buffer wrapping */ + u32 wrap_count; + /* legacy field - do not use */ + u32 reserved_0; + /** + * Size of the log buffer include this header (@header_size) and space + * reserved for all messages. If @alignment` is greater that 0 the @Size + * must be multiple of @Alignment. + */ + u32 size; + /* Header version */ + u16 header_version; + /* Header size */ + u16 header_size; + /* + * Format of the messages in the trace buffer + * 0 - null terminated string + * 1 - size + null terminated string + * 2 - MIPI-SysT encoding + */ + u32 format; + /* + * Message alignment + * 0 - messages are place 1 after another + * n - every message starts and multiple on offset + */ + u32 alignment; /* 64, 128, 256 */ + /* Name of the logging entity, i.e "LRT", "LNN", "SHV0", etc */ + char name[16]; + u32 pad_to_cache_line_size_1[4]; + /* End of second cache line */ +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h new file mode 100644 index 00000000000000..2949ec8365bd54 --- /dev/null +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -0,0 +1,1008 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +/** + * @file + * @brief JSM shared definitions + * + * @ingroup Jsm + * @brief JSM shared definitions + * @{ + */ +#ifndef VPU_JSM_API_H +#define VPU_JSM_API_H + +/* + * Major version changes that break backward compatibility + */ +#define VPU_JSM_API_VER_MAJOR 3 + +/* + * Minor version changes when API backward compatibility is preserved. + */ +#define VPU_JSM_API_VER_MINOR 0 + +/* + * API header changed (field names, documentation, formatting) but API itself has not been changed + */ +#define VPU_JSM_API_VER_PATCH 1 + +/* + * Index in the API version table + */ +#define VPU_JSM_API_VER_INDEX 4 + +/* + * Number of Priority Bands for Hardware Scheduling + * Bands: RealTime, Focus, Normal, Idle + */ +#define VPU_HWS_NUM_PRIORITY_BANDS 4 + +/* Max number of impacted contexts that can be dealt with the engine reset command */ +#define VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS 3 + +/** Pack the API structures for now, once alignment issues are fixed this can be removed */ +#pragma pack(push, 1) + +/* + * Engine indexes. + */ +#define VPU_ENGINE_COMPUTE 0 +#define VPU_ENGINE_COPY 1 +#define VPU_ENGINE_NB 2 + +/* + * VPU status values. + */ +#define VPU_JSM_STATUS_SUCCESS 0x0U +#define VPU_JSM_STATUS_PARSING_ERR 0x1U +#define VPU_JSM_STATUS_PROCESSING_ERR 0x2U +#define VPU_JSM_STATUS_PREEMPTED 0x3U +#define VPU_JSM_STATUS_ABORTED 0x4U +#define VPU_JSM_STATUS_USER_CTX_VIOL_ERR 0x5U +#define VPU_JSM_STATUS_GLOBAL_CTX_VIOL_ERR 0x6U +#define VPU_JSM_STATUS_MVNCI_WRONG_INPUT_FORMAT 0x7U +#define VPU_JSM_STATUS_MVNCI_UNSUPPORTED_NETWORK_ELEMENT 0x8U +#define VPU_JSM_STATUS_MVNCI_INVALID_HANDLE 0x9U +#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU +#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU +#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU +/* Job status returned when the job was preempted mid-inference */ +#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU + +/* + * Host <-> VPU IPC channels. + * ASYNC commands use a high priority channel, other messages use low-priority ones. + */ +#define VPU_IPC_CHAN_ASYNC_CMD 0 +#define VPU_IPC_CHAN_GEN_CMD 10 +#define VPU_IPC_CHAN_JOB_RET 11 + +/* + * Job flags bit masks. + */ +#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001 + +/* + * Sizes of the reserved areas in jobs, in bytes. + */ +#define VPU_JOB_RESERVED_BYTES 16 +/* + * Sizes of the reserved areas in job queues, in bytes. + */ +#define VPU_JOB_QUEUE_RESERVED_BYTES 52 + +/* + * Max length (including trailing NULL char) of trace entity name (e.g., the + * name of a logging destination or a loggable HW component). + */ +#define VPU_TRACE_ENTITY_NAME_MAX_LEN 32 + +/* + * Max length (including trailing NULL char) of a dyndbg command. + * + * NOTE: 96 is used so that the size of 'struct vpu_ipc_msg' in the JSM API is + * 128 bytes (multiple of 64 bytes, the cache line size). + */ +#define VPU_DYNDBG_CMD_MAX_LEN 96 + +/* + * Job format. + */ +struct vpu_job_queue_entry { + u64 batch_buf_addr; /**< Address of VPU commands batch buffer */ + u32 job_id; /**< Job ID */ + u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */ + u64 root_page_table_addr; /**< Address of root page table to use for this job */ + u64 root_page_table_update_counter; /**< Page tables update events counter */ + u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */ + u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */ + u8 reserved_0[VPU_JOB_RESERVED_BYTES]; +}; + +/* + * Job queue control registers. + */ +struct vpu_job_queue_header { + u32 engine_idx; + u32 head; + u32 tail; + u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES]; +}; + +/* + * Job queue format. + */ +struct vpu_job_queue { + struct vpu_job_queue_header header; + struct vpu_job_queue_entry job[]; +}; + +/** + * Logging entity types. + * + * This enum defines the different types of entities involved in logging. + */ +enum vpu_trace_entity_type { + /** Logging destination (entity where logs can be stored / printed). */ + VPU_TRACE_ENTITY_TYPE_DESTINATION = 1, + /** Loggable HW component (HW entity that can be logged). */ + VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, +}; + +/* + * Host <-> VPU IPC messages types. + */ +enum vpu_ipc_msg_type { + VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF, + /* IPC Host -> Device, Async commands */ + VPU_JSM_MSG_ASYNC_CMD = 0x1100, + VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD, + VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, + VPU_JSM_MSG_REGISTER_DB = 0x1102, + VPU_JSM_MSG_UNREGISTER_DB = 0x1103, + VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104, + VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105, + VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106, + VPU_JSM_MSG_SET_POWER_LEVEL = 0x1107, + /* @deprecated */ + VPU_JSM_MSG_METRIC_STREAMER_OPEN = 0x1108, + /* @deprecated */ + VPU_JSM_MSG_METRIC_STREAMER_CLOSE = 0x1109, + /** Configure logging (used to modify configuration passed in boot params). */ + VPU_JSM_MSG_TRACE_SET_CONFIG = 0x110a, + /** Return current logging configuration. */ + VPU_JSM_MSG_TRACE_GET_CONFIG = 0x110b, + /** + * Get masks of destinations and HW components supported by the firmware + * (may vary between HW generations and FW compile + * time configurations) + */ + VPU_JSM_MSG_TRACE_GET_CAPABILITY = 0x110c, + /** Get the name of a destination or HW component. */ + VPU_JSM_MSG_TRACE_GET_NAME = 0x110d, + /** + * Release resource associated with host ssid . All jobs that belong to the host_ssid + * aborted and removed from internal scheduling queues. All doorbells assigned + * to the host_ssid are unregistered and any internal FW resources belonging to + * the host_ssid are released. + */ + VPU_JSM_MSG_SSID_RELEASE = 0x110e, + /** + * Start collecting metric data. + * @see vpu_jsm_metric_streamer_start + */ + VPU_JSM_MSG_METRIC_STREAMER_START = 0x110f, + /** + * Stop collecting metric data. This command will return success if it is called + * for a metric stream that has already been stopped or was never started. + * @see vpu_jsm_metric_streamer_stop + */ + VPU_JSM_MSG_METRIC_STREAMER_STOP = 0x1110, + /** + * Update current and next buffer for metric data collection. This command can + * also be used to request information about the number of collected samples + * and the amount of data written to the buffer. + * @see vpu_jsm_metric_streamer_update + */ + VPU_JSM_MSG_METRIC_STREAMER_UPDATE = 0x1111, + /** + * Request description of selected metric groups and metric counters within + * each group. The VPU will write the description of groups and counters to + * the buffer specified in the command structure. + * @see vpu_jsm_metric_streamer_start + */ + VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112, + /** Control command: Priority band setup */ + VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113, + /** Control command: Create command queue */ + VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114, + /** Control command: Destroy command queue */ + VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115, + /** Control command: Set context scheduling properties */ + VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116, + /* + * Register a doorbell to notify VPU of new work. The doorbell may later be + * deallocated or reassigned to another context. + */ + VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, + /* IPC Host -> Device, General commands */ + VPU_JSM_MSG_GENERAL_CMD = 0x1200, + VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD, + /** + * Control dyndbg behavior by executing a dyndbg command; equivalent to + * Linux command: `echo '' > /dynamic_debug/control`. + */ + VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, + /* IPC Device -> Host, Job completion */ + VPU_JSM_MSG_JOB_DONE = 0x2100, + /* IPC Device -> Host, Async command completion */ + VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, + VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, + VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201, + VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202, + VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203, + VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204, + VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205, + VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206, + VPU_JSM_MSG_SET_POWER_LEVEL_DONE = 0x2207, + /* @deprecated */ + VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE = 0x2208, + /* @deprecated */ + VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE = 0x2209, + /** Response to VPU_JSM_MSG_TRACE_SET_CONFIG. */ + VPU_JSM_MSG_TRACE_SET_CONFIG_RSP = 0x220a, + /** Response to VPU_JSM_MSG_TRACE_GET_CONFIG. */ + VPU_JSM_MSG_TRACE_GET_CONFIG_RSP = 0x220b, + /** Response to VPU_JSM_MSG_TRACE_GET_CAPABILITY. */ + VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c, + /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */ + VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d, + /** Response to VPU_JSM_MSG_SSID_RELEASE. */ + VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e, + /** + * Response to VPU_JSM_MSG_METRIC_STREAMER_START. + * VPU will return an error result if metric collection cannot be started, + * e.g. when the specified metric mask is invalid. + * @see vpu_jsm_metric_streamer_done + */ + VPU_JSM_MSG_METRIC_STREAMER_START_DONE = 0x220f, + /** + * Response to VPU_JSM_MSG_METRIC_STREAMER_STOP. + * Returns information about collected metric data. + * @see vpu_jsm_metric_streamer_done + */ + VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE = 0x2210, + /** + * Response to VPU_JSM_MSG_METRIC_STREAMER_UPDATE. + * Returns information about collected metric data. + * @see vpu_jsm_metric_streamer_done + */ + VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE = 0x2211, + /** + * Response to VPU_JSM_MSG_METRIC_STREAMER_INFO. + * Returns a description of the metric groups and metric counters. + * @see vpu_jsm_metric_streamer_done + */ + VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE = 0x2212, + /** + * Asynchronous event sent from the VPU to the host either when the current + * metric buffer is full or when the VPU has collected a multiple of + * @notify_sample_count samples as indicated through the start command + * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected + * metric data. + * @see vpu_jsm_metric_streamer_done + */ + VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213, + /** Response to control command: Priority band setup */ + VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214, + /** Response to control command: Create command queue */ + VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215, + /** Response to control command: Destroy command queue */ + VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, + /** Response to control command: Set context scheduling properties */ + VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, + /* IPC Device -> Host, General command completion */ + VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300, + VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE, + /** Response to VPU_JSM_MSG_DYNDBG_CONTROL. */ + VPU_JSM_MSG_DYNDBG_CONTROL_RSP = 0x2301, +}; + +enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED }; + +/* + * Host <-> LRT IPC message payload definitions + */ +struct vpu_ipc_msg_payload_engine_reset { + /* Engine to be reset. */ + u32 engine_idx; + /* Reserved */ + u32 reserved_0; +}; + +struct vpu_ipc_msg_payload_engine_preempt { + /* Engine to be preempted. */ + u32 engine_idx; + /* ID of the preemption request. */ + u32 preempt_id; +}; + +/* + * @brief Register doorbell command structure. + * This structure supports doorbell registration for only OS scheduling. + * @see VPU_JSM_MSG_REGISTER_DB + */ +struct vpu_ipc_msg_payload_register_db { + /* Index of the doorbell to register. */ + u32 db_idx; + /* Reserved */ + u32 reserved_0; + /* Virtual address in Global GTT pointing to the start of job queue. */ + u64 jobq_base; + /* Size of the job queue in bytes. */ + u32 jobq_size; + /* Host sub-stream ID for the context assigned to the doorbell. */ + u32 host_ssid; +}; + +/** + * @brief Unregister doorbell command structure. + * Request structure to unregister a doorbell for both HW and OS scheduling. + * @see VPU_JSM_MSG_UNREGISTER_DB + */ +struct vpu_ipc_msg_payload_unregister_db { + /* Index of the doorbell to unregister. */ + u32 db_idx; + /* Reserved */ + u32 reserved_0; +}; + +struct vpu_ipc_msg_payload_query_engine_hb { + /* Engine to return heartbeat value. */ + u32 engine_idx; + /* Reserved */ + u32 reserved_0; +}; + +struct vpu_ipc_msg_payload_power_level { + /** + * Requested power level. The power level value is in the + * range [0, power_level_count-1] where power_level_count + * is the number of available power levels as returned by + * the get power level count command. A power level of 0 + * corresponds to the maximum possible power level, while + * power_level_count-1 corresponds to the minimum possible + * power level. Values outside of this range are not + * considered to be valid. + */ + u32 power_level; + /* Reserved */ + u32 reserved_0; +}; + +struct vpu_ipc_msg_payload_ssid_release { + /* Host sub-stream ID for the context to be released. */ + u32 host_ssid; + /* Reserved */ + u32 reserved_0; +}; + +/** + * @brief Metric streamer start command structure. + * This structure is also used with VPU_JSM_MSG_METRIC_STREAMER_INFO to request metric + * groups and metric counters description from the firmware. + * @see VPU_JSM_MSG_METRIC_STREAMER_START + * @see VPU_JSM_MSG_METRIC_STREAMER_INFO + */ +struct vpu_jsm_metric_streamer_start { + /** + * Bitmask to select the desired metric groups. + * A metric group can belong only to one metric streamer instance at a time. + * Since each metric streamer instance has a unique set of metric groups, it + * can also identify a metric streamer instance if more than one instance was + * started. If the VPU device does not support multiple metric streamer instances, + * then VPU_JSM_MSG_METRIC_STREAMER_START will return an error even if the second + * instance has different groups to the first. + */ + u64 metric_group_mask; + /** Sampling rate in nanoseconds. */ + u64 sampling_rate; + /** + * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message + * after every @notify_sample_count samples is collected or dropped by the VPU. + * If set to UINT_MAX the VPU will only generate a notification when the metric + * buffer is full. If set to 0 the VPU will never generate a notification. + */ + u32 notify_sample_count; + u32 reserved_0; + /** + * Address and size of the buffer where the VPU will write metric data. The + * VPU writes all counters from enabled metric groups one after another. If + * there is no space left to write data at the next sample period the VPU + * will switch to the next buffer (@see next_buffer_addr) and will optionally + * send a notification to the host driver if @notify_sample_count is non-zero. + * If @next_buffer_addr is NULL the VPU will stop collecting metric data. + */ + u64 buffer_addr; + u64 buffer_size; + /** + * Address and size of the next buffer to write metric data to after the initial + * buffer is full. If the address is NULL the VPU will stop collecting metric + * data. + */ + u64 next_buffer_addr; + u64 next_buffer_size; +}; + +/** + * @brief Metric streamer stop command structure. + * @see VPU_JSM_MSG_METRIC_STREAMER_STOP + */ +struct vpu_jsm_metric_streamer_stop { + /** Bitmask to select the desired metric groups. */ + u64 metric_group_mask; +}; + +/** + * Provide VPU FW with buffers to write metric data. + * @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE + */ +struct vpu_jsm_metric_streamer_update { + /** Metric group mask that identifies metric streamer instance. */ + u64 metric_group_mask; + /** + * Address and size of the buffer where the VPU will write metric data. If + * the buffer address is 0 or same as the currently used buffer the VPU will + * continue writing metric data to the current buffer. In this case the + * buffer size is ignored and the size of the current buffer is unchanged. + * If the address is non-zero and differs from the current buffer address the + * VPU will immediately switch data collection to the new buffer. + */ + u64 buffer_addr; + u64 buffer_size; + /** + * Address and size of the next buffer to write metric data after the initial + * buffer is full. If the address is NULL the VPU will stop collecting metric + * data but will continue to record dropped samples. + * + * Note that there is a hazard possible if both buffer_addr and the next_buffer_addr + * are non-zero in same update request. It is the host's responsibility to ensure + * that both addresses make sense even if the VPU just switched to writing samples + * from the current to the next buffer. + */ + u64 next_buffer_addr; + u64 next_buffer_size; +}; + +struct vpu_ipc_msg_payload_blob_deinit { + /* 64-bit unique ID for the blob to be de-initialized. */ + u64 blob_id; +}; + +struct vpu_ipc_msg_payload_job_done { + /* Engine to which the job was submitted. */ + u32 engine_idx; + /* Index of the doorbell to which the job was submitted */ + u32 db_idx; + /* ID of the completed job */ + u32 job_id; + /* Status of the completed job */ + u32 job_status; + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved_0; + /* Command queue id */ + u64 cmdq_id; +}; + +struct vpu_jsm_engine_reset_context { + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved_0; + /* Command queue id */ + u64 cmdq_id; + /* Flags: 0: cause of hang; 1: collateral damage of reset */ + u64 flags; +}; + +struct vpu_ipc_msg_payload_engine_reset_done { + /* Engine ordinal */ + u32 engine_idx; + /* Number of impacted contexts */ + u32 num_impacted_contexts; + /* Array of impacted command queue ids and their flags */ + struct vpu_jsm_engine_reset_context + impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS]; +}; + +struct vpu_ipc_msg_payload_engine_preempt_done { + /* Engine preempted. */ + u32 engine_idx; + /* ID of the preemption request. */ + u32 preempt_id; +}; + +/** + * Response structure for register doorbell command for both OS + * and HW scheduling. + * @see VPU_JSM_MSG_REGISTER_DB + * @see VPU_JSM_MSG_HWS_REGISTER_DB + */ +struct vpu_ipc_msg_payload_register_db_done { + /* Index of the registered doorbell. */ + u32 db_idx; + /* Reserved */ + u32 reserved_0; +}; + +/** + * Response structure for unregister doorbell command for both OS + * and HW scheduling. + * @see VPU_JSM_MSG_UNREGISTER_DB + */ +struct vpu_ipc_msg_payload_unregister_db_done { + /* Index of the unregistered doorbell. */ + u32 db_idx; + /* Reserved */ + u32 reserved_0; +}; + +struct vpu_ipc_msg_payload_query_engine_hb_done { + /* Engine returning heartbeat value. */ + u32 engine_idx; + /* Reserved */ + u32 reserved_0; + /* Heartbeat value. */ + u64 heartbeat; +}; + +struct vpu_ipc_msg_payload_get_power_level_count_done { + /** + * Number of supported power levels. The maximum possible + * value of power_level_count is 16 but this may vary across + * implementations. + */ + u32 power_level_count; + /* Reserved */ + u32 reserved_0; + /** + * Power consumption limit for each supported power level in + * [0-100%] range relative to power level 0. + */ + u8 power_limit[16]; +}; + +struct vpu_ipc_msg_payload_blob_deinit_done { + /* 64-bit unique ID for the blob de-initialized. */ + u64 blob_id; +}; + +/* HWS priority band setup request / response */ +struct vpu_ipc_msg_payload_hws_priority_band_setup { + /* + * Grace period in 100ns units when preempting another priority band for + * this priority band + */ + u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS]; + /* + * Default quantum in 100ns units for scheduling across processes + * within a priority band + */ + u64 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS]; + /* + * Default grace period in 100ns units for processes that preempt each + * other within a priority band + */ + u64 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS]; + /* + * For normal priority band, specifies the target VPU percentage + * in situations when it's starved by the focus band. + */ + u32 normal_band_percentage; + /* Reserved */ + u32 reserved_0; +}; + +/* HWS create command queue request */ +struct vpu_ipc_msg_payload_hws_create_cmdq { + /* Process id */ + u64 process_id; + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved; + /* Command queue id */ + u64 cmdq_id; + /* Command queue base */ + u64 cmdq_base; + /* Command queue size */ + u32 cmdq_size; + /* Reserved */ + u32 reserved_0; +}; + +/* HWS create command queue response */ +struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { + /* Process id */ + u64 process_id; + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved; + /* Command queue id */ + u64 cmdq_id; +}; + +/* HWS destroy command queue request / response */ +struct vpu_ipc_msg_payload_hws_destroy_cmdq { + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved; + /* Command queue id */ + u64 cmdq_id; +}; + +/* HWS set context scheduling properties request / response */ +struct vpu_ipc_msg_payload_hws_set_context_sched_properties { + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved_0; + /* Command queue id */ + u64 cmdq_id; + /* Priority band to assign to work of this context */ + u32 priority_band; + /* Inside realtime band assigns a further priority */ + u32 realtime_priority_level; + /* Priority relative to other contexts in the same process */ + u32 in_process_priority; + /* Zero padding / Reserved */ + u32 reserved_1; + /* Context quantum relative to other contexts of same priority in the same process */ + u64 context_quantum; + /* Grace period when preempting context of the same priority within the same process */ + u64 grace_period_same_priority; + /* Grace period when preempting context of a lower priority within the same process */ + u64 grace_period_lower_priority; +}; + +/* + * @brief Register doorbell command structure. + * This structure supports doorbell registration for both HW and OS scheduling. + * Note: Queue base and size are added here so that the same structure can be used for + * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored + * and cmdq_base and cmdq_size will be used. For HW scheduling, cmdq_base and cmdq_size will be + * ignored and cmdq_id is used. + * @see VPU_JSM_MSG_HWS_REGISTER_DB + */ +struct vpu_jsm_hws_register_db { + /* Index of the doorbell to register. */ + u32 db_id; + /* Host sub-stream ID for the context assigned to the doorbell. */ + u32 host_ssid; + /* ID of the command queue associated with the doorbell. */ + u64 cmdq_id; + /* Virtual address pointing to the start of command queue. */ + u64 cmdq_base; + /* Size of the command queue in bytes. */ + u64 cmdq_size; +}; + +/** + * Payload for VPU_JSM_MSG_TRACE_SET_CONFIG[_RSP] and + * VPU_JSM_MSG_TRACE_GET_CONFIG_RSP messages. + * + * The payload is interpreted differently depending on the type of message: + * + * - For VPU_JSM_MSG_TRACE_SET_CONFIG, the payload specifies the desired + * logging configuration to be set. + * + * - For VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, the payload reports the logging + * configuration that was set after a VPU_JSM_MSG_TRACE_SET_CONFIG request. + * The host can compare this payload with the one it sent in the + * VPU_JSM_MSG_TRACE_SET_CONFIG request to check whether or not the + * configuration was set as desired. + * + * - VPU_JSM_MSG_TRACE_GET_CONFIG_RSP, the payload reports the current logging + * configuration. + */ +struct vpu_ipc_msg_payload_trace_config { + /** + * Logging level (currently set or to be set); see 'mvLog_t' enum for + * acceptable values. The specified logging level applies to all + * destinations and HW components + */ + u32 trace_level; + /** + * Bitmask of logging destinations (currently enabled or to be enabled); + * bitwise OR of values defined in logging_destination enum. + */ + u32 trace_destination_mask; + /** + * Bitmask of loggable HW components (currently enabled or to be enabled); + * bitwise OR of values defined in loggable_hw_component enum. + */ + u64 trace_hw_component_mask; + u64 reserved_0; /**< Reserved for future extensions. */ +}; + +/** + * Payload for VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP messages. + */ +struct vpu_ipc_msg_payload_trace_capability_rsp { + u32 trace_destination_mask; /**< Bitmask of supported logging destinations. */ + u32 reserved_0; + u64 trace_hw_component_mask; /**< Bitmask of supported loggable HW components. */ + u64 reserved_1; /**< Reserved for future extensions. */ +}; + +/** + * Payload for VPU_JSM_MSG_TRACE_GET_NAME requests. + */ +struct vpu_ipc_msg_payload_trace_get_name { + /** + * The type of the entity to query name for; see logging_entity_type for + * possible values. + */ + u32 entity_type; + u32 reserved_0; + /** + * The ID of the entity to query name for; possible values depends on the + * entity type. + */ + u64 entity_id; +}; + +/** + * Payload for VPU_JSM_MSG_TRACE_GET_NAME_RSP responses. + */ +struct vpu_ipc_msg_payload_trace_get_name_rsp { + /** + * The type of the entity whose name was queried; see logging_entity_type + * for possible values. + */ + u32 entity_type; + u32 reserved_0; + /** + * The ID of the entity whose name was queried; possible values depends on + * the entity type. + */ + u64 entity_id; + /** Reserved for future extensions. */ + u64 reserved_1; + /** The name of the entity. */ + char entity_name[VPU_TRACE_ENTITY_NAME_MAX_LEN]; +}; + +/** + * Data sent from the VPU to the host in all metric streamer response messages + * and in asynchronous notification. + * @see VPU_JSM_MSG_METRIC_STREAMER_START_DONE + * @see VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE + * @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE + * @see VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE + * @see VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION + */ +struct vpu_jsm_metric_streamer_done { + /** Metric group mask that identifies metric streamer instance. */ + u64 metric_group_mask; + /** + * Size in bytes of single sample - total size of all enabled counters. + * Some VPU implementations may align sample_size to more than 8 bytes. + */ + u32 sample_size; + u32 reserved_0; + /** + * Number of samples collected since the metric streamer was started. + * This will be 0 if the metric streamer was not started. + */ + u32 samples_collected; + /** + * Number of samples dropped since the metric streamer was started. This + * is incremented every time the metric streamer is not able to write + * collected samples because the current buffer is full and there is no + * next buffer to switch to. + */ + u32 samples_dropped; + /** Address of the buffer that contains the latest metric data. */ + u64 buffer_addr; + /** + * Number of bytes written into the metric data buffer. In response to the + * VPU_JSM_MSG_METRIC_STREAMER_INFO request this field contains the size of + * all group and counter descriptors. The size is updated even if the buffer + * in the request was NULL or too small to hold descriptors of all counters + */ + u64 bytes_written; +}; + +/** + * Metric group description placed in the metric buffer after successful completion + * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more + * @vpu_jsm_metric_counter_descriptor records. + * @see VPU_JSM_MSG_METRIC_STREAMER_INFO + */ +struct vpu_jsm_metric_group_descriptor { + /** + * Offset to the next metric group (8-byte aligned). If this offset is 0 this + * is the last descriptor. The value of metric_info_size must be greater than + * or equal to sizeof(struct vpu_jsm_metric_group_descriptor) + name_string_size + * + description_string_size and must be 8-byte aligned. + */ + u32 next_metric_group_info_offset; + /** + * Offset to the first metric counter description record (8-byte aligned). + * @see vpu_jsm_metric_counter_descriptor + */ + u32 next_metric_counter_info_offset; + /** Index of the group. This corresponds to bit index in metric_group_mask. */ + u32 group_id; + /** Number of counters in the metric group. */ + u32 num_counters; + /** Data size for all counters, must be a multiple of 8 bytes.*/ + u32 metric_group_data_size; + /** + * Metric group domain number. Cannot use multiple, simultaneous metric groups + * from the same domain. + */ + u32 domain; + /** + * Counter name string size. The string must include a null termination character. + * The FW may use a fixed size name or send a different name for each counter. + * If the VPU uses fixed size strings, all characters from the end of the name + * to the of the fixed size character array must be zeroed. + */ + u32 name_string_size; + /** Counter description string size, @see name_string_size */ + u32 description_string_size; + u64 reserved_0; + /** + * Right after this structure, the VPU writes name and description of + * the metric group. + */ +}; + +/** + * Metric counter description, placed in the buffer after vpu_jsm_metric_group_descriptor. + * @see VPU_JSM_MSG_METRIC_STREAMER_INFO + */ +struct vpu_jsm_metric_counter_descriptor { + /** + * Offset to the next counter in a group (8-byte aligned). If this offset is + * 0 this is the last counter in the group. + */ + u32 next_metric_counter_info_offset; + /** + * Offset to the counter data from the start of samples in this metric group. + * Note that metric_data_offset % metric_data_size must be 0. + */ + u32 metric_data_offset; + /** Size of the metric counter data in bytes. */ + u32 metric_data_size; + /** Metric type, see Level Zero API for definitions. */ + u32 tier; + /** Metric type, see set_metric_type_t for definitions. */ + u32 metric_type; + /** Metric type, see set_value_type_t for definitions. */ + u32 metric_value_type; + /** + * Counter name string size. The string must include a null termination character. + * The FW may use a fixed size name or send a different name for each counter. + * If the VPU uses fixed size strings, all characters from the end of the name + * to the of the fixed size character array must be zeroed. + */ + u32 name_string_size; + /** Counter description string size, @see name_string_size */ + u32 description_string_size; + /** Counter component name string size, @see name_string_size */ + u32 component_string_size; + /** Counter string size, @see name_string_size */ + u32 units_string_size; + u64 reserved_0; + /** + * Right after this structure, the VPU writes name, description + * component and unit strings. + */ +}; + +/** + * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests. + * + * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug + * feature, which allows developers to selectively enable / disable MVLOG_DEBUG + * messages. This is equivalent to the Dynamic Debug functionality provided by + * Linux + * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html) + * The host can control Dynamic Debug behavior by sending dyndbg commands, which + * have the same syntax as Linux + * dyndbg commands. + * + * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host + * still has to set the logging level to MVLOG_DEBUG, using the + * VPU_JSM_MSG_TRACE_SET_CONFIG command. + * + * The host can see the current dynamic debug configuration by executing a + * special 'show' command. The dyndbg configuration will be printed to the + * configured logging destination using MVLOG_INFO logging level. + */ +struct vpu_ipc_msg_payload_dyndbg_control { + /** + * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated + * string. + */ + char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN]; +}; + +/* + * Payloads union, used to define complete message format. + */ +union vpu_ipc_msg_payload { + struct vpu_ipc_msg_payload_engine_reset engine_reset; + struct vpu_ipc_msg_payload_engine_preempt engine_preempt; + struct vpu_ipc_msg_payload_register_db register_db; + struct vpu_ipc_msg_payload_unregister_db unregister_db; + struct vpu_ipc_msg_payload_query_engine_hb query_engine_hb; + struct vpu_ipc_msg_payload_power_level power_level; + struct vpu_jsm_metric_streamer_start metric_streamer_start; + struct vpu_jsm_metric_streamer_stop metric_streamer_stop; + struct vpu_jsm_metric_streamer_update metric_streamer_update; + struct vpu_ipc_msg_payload_blob_deinit blob_deinit; + struct vpu_ipc_msg_payload_ssid_release ssid_release; + struct vpu_jsm_hws_register_db hws_register_db; + struct vpu_ipc_msg_payload_job_done job_done; + struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done; + struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done; + struct vpu_ipc_msg_payload_register_db_done register_db_done; + struct vpu_ipc_msg_payload_unregister_db_done unregister_db_done; + struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done; + struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done; + struct vpu_jsm_metric_streamer_done metric_streamer_done; + struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done; + struct vpu_ipc_msg_payload_trace_config trace_config; + struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability; + struct vpu_ipc_msg_payload_trace_get_name trace_get_name; + struct vpu_ipc_msg_payload_trace_get_name_rsp trace_get_name_rsp; + struct vpu_ipc_msg_payload_dyndbg_control dyndbg_control; + struct vpu_ipc_msg_payload_hws_priority_band_setup hws_priority_band_setup; + struct vpu_ipc_msg_payload_hws_create_cmdq hws_create_cmdq; + struct vpu_ipc_msg_payload_hws_create_cmdq_rsp hws_create_cmdq_rsp; + struct vpu_ipc_msg_payload_hws_destroy_cmdq hws_destroy_cmdq; + struct vpu_ipc_msg_payload_hws_set_context_sched_properties + hws_set_context_sched_properties; +}; + +/* + * Host <-> LRT IPC message base structure. + * + * NOTE: All instances of this object must be aligned on a 64B boundary + * to allow proper handling of VPU cache operations. + */ +struct vpu_jsm_msg { + /* Reserved */ + u64 reserved_0; + /* Message type, see vpu_ipc_msg_type enum. */ + u32 type; + /* Buffer status, see vpu_ipc_msg_status enum. */ + u32 status; + /* + * Request ID, provided by the host in a request message and passed + * back by VPU in the response message. + */ + u32 request_id; + /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ + u32 result; + u64 reserved_1; + /* Message payload depending on message type, see vpu_ipc_msg_payload union. */ + union vpu_ipc_msg_payload payload; +}; + +#pragma pack(pop) + +#endif + +///@} diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index fbf725fae7c1c3..6cfbbf0720bdc6 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -112,7 +112,7 @@ static void dma_buf_sysfs_release(struct kobject *kobj) kfree(sysfs_entry); } -static struct kobj_type dma_buf_ktype = { +static const struct kobj_type dma_buf_ktype = { .sysfs_ops = &dma_buf_stats_sysfs_ops, .release = dma_buf_sysfs_release, .default_groups = dma_buf_stats_default_groups, diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e6528767efc7c3..757c0fb77a6cb3 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1257,7 +1257,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); * * @dmabuf: [in] buffer which is moving * - * Informs all attachmenst that they need to destroy and recreated all their + * Informs all attachments that they need to destroy and recreate all their * mappings. */ void dma_buf_move_notify(struct dma_buf *dmabuf) @@ -1275,11 +1275,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); /** * DOC: cpu access * - * There are mutliple reasons for supporting CPU access to a dma buffer object: + * There are multiple reasons for supporting CPU access to a dma buffer object: * * - Fallback operations in the kernel, for example when a device is connected * over USB and the kernel needs to shuffle the data around first before - * sending it away. Cache coherency is handled by braketing any transactions + * sending it away. Cache coherency is handled by bracketing any transactions * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() * access. * @@ -1306,7 +1306,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); * replace ION buffers mmap support was needed. * * There is no special interfaces, userspace simply calls mmap on the dma-buf - * fd. But like for CPU access there's a need to braket the actual access, + * fd. But like for CPU access there's a need to bracket the actual access, * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must * be restarted. @@ -1380,10 +1380,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, * preparations. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to prepare cpu access for. - * @direction: [in] length of range for cpu access. + * @direction: [in] direction of access. * * After the cpu access is complete the caller should call - * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is + * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is * it guaranteed to be coherent with other DMA access. * * This function will also wait for any DMA transactions tracked through @@ -1423,7 +1423,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); * actions. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to complete cpu access for. - * @direction: [in] length of range for cpu access. + * @direction: [in] direction of access. * * This terminates CPU access started with dma_buf_begin_cpu_access(). * diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 283816fbd72fc3..740d6e426ee952 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include static int list_limit = 1024; module_param(list_limit, int, 0644); @@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) return 0; } +static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) +{ + struct udmabuf *ubuf = buf->priv; + void *vaddr; + + dma_resv_assert_held(buf->resv); + + vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1); + if (!vaddr) + return -EINVAL; + + iosys_map_set_vaddr(map, vaddr); + return 0; +} + +static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) +{ + struct udmabuf *ubuf = buf->priv; + + dma_resv_assert_held(buf->resv); + + vm_unmap_ram(map->vaddr, ubuf->pagecount); +} + static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, enum dma_data_direction direction) { @@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = { .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, .mmap = mmap_udmabuf, + .vmap = vmap_udmabuf, + .vunmap = vunmap_udmabuf, .begin_cpu_access = begin_cpu_udmabuf, .end_cpu_access = end_cpu_udmabuf, }; diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index a353e27f83f540..ce9c007ed66ff7 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -27,25 +27,56 @@ static const struct simplefb_format formats[] = SIMPLEFB_FORMATS; __init bool sysfb_parse_mode(const struct screen_info *si, struct simplefb_platform_data *mode) { - const struct simplefb_format *f; __u8 type; + u32 bits_per_pixel; unsigned int i; type = si->orig_video_isVGA; if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI) return false; + /* + * The meaning of depth and bpp for direct-color formats is + * inconsistent: + * + * - DRM format info specifies depth as the number of color + * bits; including alpha, but not including filler bits. + * - Linux' EFI platform code computes lfb_depth from the + * individual color channels, including the reserved bits. + * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later + * versions use 15. + * - On the kernel command line, 'bpp' of 32 is usually + * XRGB8888 including the filler bits, but 15 is XRGB1555 + * not including the filler bit. + * + * It's not easily possible to fix this in struct screen_info, + * as this could break UAPI. The best solution is to compute + * bits_per_pixel here and ignore lfb_depth. In the loop below, + * ignore simplefb formats with alpha bits, as EFI and VESA + * don't specify alpha channels. + */ + if (si->lfb_depth > 8) { + bits_per_pixel = max(max3(si->red_size + si->red_pos, + si->green_size + si->green_pos, + si->blue_size + si->blue_pos), + si->rsvd_size + si->rsvd_pos); + } else { + bits_per_pixel = si->lfb_depth; + } + for (i = 0; i < ARRAY_SIZE(formats); ++i) { - f = &formats[i]; - if (si->lfb_depth == f->bits_per_pixel && + const struct simplefb_format *f = &formats[i]; + + if (f->transp.length) + continue; /* transparent formats are unsupported by VESA/EFI */ + + if (bits_per_pixel == f->bits_per_pixel && si->red_size == f->red.length && si->red_pos == f->red.offset && si->green_size == f->green.length && si->green_pos == f->green.offset && si->blue_size == f->blue.length && - si->blue_pos == f->blue.offset && - si->rsvd_size == f->transp.length && - si->rsvd_pos == f->transp.offset) { + si->blue_pos == f->blue.offset) { mode->format = f->name; mode->width = si->lfb_width; mode->height = si->lfb_height; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9abfb482b615e7..dc0f94f02a82ed 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -12,7 +12,6 @@ menuconfig DRM select HDMI select FB_CMDLINE select I2C - select I2C_ALGOBIT select DMA_SHARED_BUFFER select SYNC_FILE # gallium uses SYS_kcmp for os_same_file_description() to de-duplicate @@ -64,6 +63,12 @@ config DRM_USE_DYNAMIC_DEBUG bytes per callsite, the .data costs can be substantial, and are therefore configurable. +config DRM_KUNIT_TEST_HELPERS + tristate + depends on DRM && KUNIT + help + KUnit Helpers for KMS drivers. + config DRM_KUNIT_TEST tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS depends on DRM && KUNIT @@ -74,6 +79,7 @@ config DRM_KUNIT_TEST select DRM_KMS_HELPER select DRM_BUDDY select DRM_EXPORT_FOR_TESTS if m + select DRM_KUNIT_TEST_HELPERS default KUNIT_ALL_TESTS help This builds unit tests for DRM. This option is not useful for @@ -392,64 +398,7 @@ menuconfig DRM_LEGACY Unless you have strong reasons to go rogue, say "N". if DRM_LEGACY - -config DRM_TDFX - tristate "3dfx Banshee/Voodoo3+" - depends on DRM && PCI - help - Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), - graphics card. If M is selected, the module will be called tdfx. - -config DRM_R128 - tristate "ATI Rage 128" - depends on DRM && PCI - select FW_LOADER - help - Choose this option if you have an ATI Rage 128 graphics card. If M - is selected, the module will be called r128. AGP support for - this card is strongly suggested (unless you have a PCI version). - -config DRM_I810 - tristate "Intel I810" - # !PREEMPTION because of missing ioctl locking - depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) - help - Choose this option if you have an Intel I810 graphics card. If M is - selected, the module will be called i810. AGP support is required - for this driver to work. - -config DRM_MGA - tristate "Matrox g200/g400" - depends on DRM && PCI - select FW_LOADER - help - Choose this option if you have a Matrox G200, G400 or G450 graphics - card. If M is selected, the module will be called mga. AGP - support is required for this driver to work. - -config DRM_SIS - tristate "SiS video cards" - depends on DRM && AGP - depends on FB_SIS || FB_SIS=n - help - Choose this option if you have a SiS 630 or compatible video - chipset. If M is selected the module will be called sis. AGP - support is required for this driver to work. - -config DRM_VIA - tristate "Via unichrome video cards" - depends on DRM && PCI - help - Choose this option if you have a Via unichrome or compatible video - chipset. If M is selected the module will be called via. - -config DRM_SAVAGE - tristate "Savage video cards" - depends on DRM && PCI - help - Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister - chipset. If M is selected the module will be called savage. - +# leave here to list legacy drivers endif # DRM_LEGACY config DRM_EXPORT_FOR_TESTS diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index cc637343d87b09..ab4460fcd63f6b 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -126,7 +126,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o # Drivers and the rest # -obj-$(CONFIG_DRM_KUNIT_TEST) += tests/ +obj-y += tests/ obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o @@ -134,21 +134,14 @@ obj-y += arm/ obj-y += display/ obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_SCHED) += scheduler/ -obj-$(CONFIG_DRM_TDFX) += tdfx/ -obj-$(CONFIG_DRM_R128) += r128/ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ -obj-$(CONFIG_DRM_MGA) += mga/ -obj-$(CONFIG_DRM_I810) += i810/ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ obj-$(CONFIG_DRM_V3D) += v3d/ obj-$(CONFIG_DRM_VC4) += vc4/ -obj-$(CONFIG_DRM_SIS) += sis/ -obj-$(CONFIG_DRM_SAVAGE)+= savage/ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ -obj-$(CONFIG_DRM_VIA) +=via/ obj-$(CONFIG_DRM_VGEM) += vgem/ obj-$(CONFIG_DRM_VKMS) += vkms/ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 5fcd510f1abba3..5341b6b242c3bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -13,6 +13,8 @@ config DRM_AMDGPU select DRM_TTM_HELPER select POWER_SUPPLY select HWMON + select I2C + select I2C_ALGOBIT select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE select DRM_BUDDY diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 798d0e9a60b7dc..1d72cbc853480a 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -34,6 +34,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/acp/include \ -I$(FULL_AMD_DISPLAY_PATH) \ -I$(FULL_AMD_DISPLAY_PATH)/include \ + -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \ -I$(FULL_AMD_DISPLAY_PATH)/dc \ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ -I$(FULL_AMD_PATH)/amdkfd @@ -76,12 +77,13 @@ amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ - sienna_cichlid.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o + sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o # add DF block amdgpu-y += \ df_v1_7.o \ - df_v3_6.o + df_v3_6.o \ + df_v4_3.o # add GMC block amdgpu-y += \ @@ -136,6 +138,7 @@ amdgpu-y += \ gfx_v10_0.o \ imu_v11_0.o \ gfx_v11_0.o \ + gfx_v11_0_3.o \ imu_v11_0_3.o # add async DMA block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d148a1bd85e67a..164141bc8b4ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -52,8 +52,7 @@ #include #include -#include -#include +#include #include #include @@ -150,7 +149,7 @@ struct amdgpu_watchdog_timer * Modules parameters. */ extern int amdgpu_modeset; -extern int amdgpu_vram_limit; +extern unsigned int amdgpu_vram_limit; extern int amdgpu_vis_vram_limit; extern int amdgpu_gart_size; extern int amdgpu_gtt_size; @@ -610,7 +609,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); /* VRAM scratch page for HDP bug, default vram page */ -struct amdgpu_vram_scratch { +struct amdgpu_mem_scratch { struct amdgpu_bo *robj; volatile uint32_t *ptr; u64 gpu_addr; @@ -757,6 +756,11 @@ struct amdgpu_mqd { #define AMDGPU_PRODUCT_NAME_LEN 64 struct amdgpu_reset_domain; +/* + * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. + */ +#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size) + struct amdgpu_device { struct device *dev; struct pci_dev *pdev; @@ -850,7 +854,7 @@ struct amdgpu_device { /* memory management */ struct amdgpu_mman mman; - struct amdgpu_vram_scratch vram_scratch; + struct amdgpu_mem_scratch mem_scratch; struct amdgpu_wb wb; atomic64_t num_bytes_moved; atomic64_t num_evictions; @@ -872,7 +876,7 @@ struct amdgpu_device { struct amdgpu_vkms_output *amdgpu_vkms_output; struct amdgpu_mode_info mode_info; /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct amdgpu_irq_src crtc_irq; struct amdgpu_irq_src vline0_irq; struct amdgpu_irq_src vupdate_irq; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 57b5e11446c65a..458362e4ea0112 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -31,7 +32,6 @@ #include #include -#include #include "amdgpu.h" #include "amdgpu_pm.h" #include "amdgpu_display.h" @@ -1079,20 +1079,16 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) * S0ix even though the system is suspending to idle, so return false * in that case. */ - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) dev_warn_once(adev->dev, "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); - return false; - } #if !IS_ENABLED(CONFIG_AMD_PMC) dev_warn_once(adev->dev, "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); - return false; -#else - return true; #endif /* CONFIG_AMD_PMC */ + return true; } #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 0040deaf8a83a3..333780491867cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -97,7 +97,7 @@ struct amdgpu_amdkfd_fence { struct amdgpu_kfd_dev { struct kfd_dev *dev; - uint64_t vram_used; + int64_t vram_used; uint64_t vram_used_aligned; bool init_complete; struct work_struct reset_work; @@ -271,9 +271,9 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_ ((struct drm_file *)(drm_priv))->driver_priv)->vm) int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, - struct file *filp, u32 pasid); + struct amdgpu_vm *avm, u32 pasid); int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, - struct file *filp, + struct amdgpu_vm *avm, void **process_info, struct dma_fence **ef); void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 3b5c53712d319c..d6320c8362514c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "amdgpu_object.h" #include "amdgpu_gem.h" @@ -1430,18 +1431,11 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) } int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, - struct file *filp, u32 pasid) + struct amdgpu_vm *avm, u32 pasid) { - struct amdgpu_fpriv *drv_priv; - struct amdgpu_vm *avm; int ret; - ret = amdgpu_file_to_fpriv(filp, &drv_priv); - if (ret) - return ret; - avm = &drv_priv->vm; - /* Free the original amdgpu allocated pasid, * will be replaced with kfd allocated pasid. */ @@ -1458,19 +1452,12 @@ int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, } int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, - struct file *filp, + struct amdgpu_vm *avm, void **process_info, struct dma_fence **ef) { - struct amdgpu_fpriv *drv_priv; - struct amdgpu_vm *avm; int ret; - ret = amdgpu_file_to_fpriv(filp, &drv_priv); - if (ret) - return ret; - avm = &drv_priv->vm; - /* Already a compute VM? */ if (avm->process_info) return -EINVAL; @@ -1612,6 +1599,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct amdgpu_bo *bo; struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; + uint64_t aligned_size; u64 alloc_flags; int ret; @@ -1667,22 +1655,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( * the memory. */ if ((*mem)->aql_queue) - size = size >> 1; + size >>= 1; + aligned_size = PAGE_ALIGN(size); (*mem)->alloc_flags = flags; amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; } pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", - va, size, domain_string(alloc_domain)); + va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain)); - ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, + ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, bo_type, NULL, &gobj); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", @@ -1739,7 +1728,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags); err_reserve_limit: mutex_destroy(&(*mem)->lock); if (gobj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index e4d78491bcc7e7..ededdc01ca282b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -28,6 +28,8 @@ struct hmm_range; +struct drm_file; + struct amdgpu_device; struct amdgpu_bo; struct amdgpu_bo_va; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index f1a05037919058..456e385333b6b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -411,17 +411,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, return -EINVAL; } - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) { - DRM_ERROR("Failed to request firmware\n"); - return err; - } - - err = amdgpu_ucode_validate(adev->pm.fw); + err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); if (err) { DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; + amdgpu_ucode_release(&adev->pm.fw); return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 2ebbc6382a0613..6be30dcb029d57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -25,7 +25,9 @@ */ #include +#include #include +#include #include #include #include "amdgpu.h" @@ -996,13 +998,33 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) } } + if (amdgpu_connector->detected_hpd_without_ddc) { + force = true; + amdgpu_connector->detected_hpd_without_ddc = false; + } + if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; goto exit; } - if (amdgpu_connector->ddc_bus) + if (amdgpu_connector->ddc_bus) { dret = amdgpu_display_ddc_probe(amdgpu_connector, false); + + /* Sometimes the pins required for the DDC probe on DVI + * connectors don't make contact at the same time that the ones + * for HPD do. If the DDC probe fails even though we had an HPD + * signal, try again later + */ + if (!dret && !force && + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { + DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n"); + amdgpu_connector->detected_hpd_without_ddc = true; + schedule_delayed_work(&adev->hotplug_work, + msecs_to_jiffies(1000)); + goto exit; + } + } if (dret) { amdgpu_connector->detected_by_load = false; amdgpu_connector_free_edid(connector); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 7af3041ccd0e82..08eced097bd8e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -32,6 +32,8 @@ #include #include +#include + #include "amdgpu_cs.h" #include "amdgpu.h" #include "amdgpu_trace.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h index 113f39510a7274..fb3e3d56d427da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h @@ -23,6 +23,8 @@ #ifndef __AMDGPU_CS_H__ #define __AMDGPU_CS_H__ +#include + #include "amdgpu_job.h" #include "amdgpu_bo_list.h" #include "amdgpu_ring.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 0f16d3c093091d..f60753f97ac598 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1717,7 +1717,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) static int amdgpu_debugfs_ib_preempt(void *data, u64 val) { - int r, resched, length; + int r, length; struct amdgpu_ring *ring; struct dma_fence **fences = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)data; @@ -1747,8 +1747,6 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) /* stop the scheduler */ kthread_park(ring->sched.thread); - resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - /* preempt the IB */ r = amdgpu_ring_preempt_ib(ring); if (r) { @@ -1785,8 +1783,6 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) up_read(&adev->reset_domain->sem); - ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - pro_end: kfree(fences); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fbf2f24169eb5f..c4a4e2fe66814c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -163,7 +164,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); * * The amdgpu driver provides a sysfs API for reporting the product name * for the device - * The file serial_number is used for this and returns the product name + * The file product_name is used for this and returns the product name * as returned from the FRU. * NOTE: This is only available for certain server cards */ @@ -185,7 +186,7 @@ static DEVICE_ATTR(product_name, S_IRUGO, * * The amdgpu driver provides a sysfs API for reporting the part number * for the device - * The file serial_number is used for this and returns the part number + * The file product_number is used for this and returns the part number * as returned from the FRU. * NOTE: This is only available for certain server cards */ @@ -927,32 +928,33 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) } /** - * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page + * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page * * @adev: amdgpu_device pointer * * Allocates a scratch page of VRAM for use by various things in the * driver. */ -static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) +static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev) { - return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->vram_scratch.robj, - &adev->vram_scratch.gpu_addr, - (void **)&adev->vram_scratch.ptr); + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->mem_scratch.robj, + &adev->mem_scratch.gpu_addr, + (void **)&adev->mem_scratch.ptr); } /** - * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page + * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page * * @adev: amdgpu_device pointer * * Frees the VRAM scratch page. */ -static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) +static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev) { - amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); } /** @@ -1984,17 +1986,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); - err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name); if (err) { dev_err(adev->dev, - "Failed to load gpu_info firmware \"%s\"\n", - fw_name); - goto out; - } - err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); - if (err) { - dev_err(adev->dev, - "Failed to validate gpu_info firmware \"%s\"\n", + "Failed to get gpu_info firmware \"%s\"\n", fw_name); goto out; } @@ -2081,6 +2076,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct pci_dev *parent; int i, r; + bool total; amdgpu_device_enable_virtual_display(adev); @@ -2164,6 +2160,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; + total = true; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d <%s>\n", @@ -2177,7 +2174,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } else if (r) { DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); - return r; + total = false; } else { adev->ip_blocks[i].status.valid = true; } @@ -2208,6 +2205,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } } + if (!total) + return -ENODEV; adev->cg_flags &= amdgpu_cg_mask; adev->pg_flags &= amdgpu_pg_mask; @@ -2393,9 +2392,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_exchange_data(adev); - r = amdgpu_device_vram_scratch_init(adev); + r = amdgpu_device_mem_scratch_init(adev); if (r) { - DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); + DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); goto init_failed; } r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); @@ -2413,8 +2412,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* right after GMC hw init, we create CSA */ if (amdgpu_mcbp) { r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_CSA_SIZE); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("allocate CSA failed %d\n", r); goto init_failed; @@ -2584,9 +2584,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; - /* skip CG for GFX on S0ix */ + /* skip CG for GFX, SDMA on S0ix */ if (adev->in_s0ix && - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && @@ -2620,9 +2621,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; - /* skip PG for GFX on S0ix */ + /* skip PG for GFX, SDMA on S0ix */ if (adev->in_s0ix && - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && @@ -2874,7 +2876,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ucode_free_bo(adev); amdgpu_free_static_csa(&adev->virt.csa_obj); amdgpu_device_wb_fini(adev); - amdgpu_device_vram_scratch_fini(adev); + amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); } @@ -3030,6 +3032,24 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) continue; + /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ + if (adev->in_s0ix && + (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) && + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) + continue; + + /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. + * These are in TMR, hence are expected to be reused by PSP-TOS to reload + * from this location and RLC Autoload automatically also gets loaded + * from here based on PMFW -> PSP message during re-init sequence. + * Therefore, the psp suspend & resume should be skipped to avoid destroy + * the TMR and reload FWs again for IMU enabled APU ASICs. + */ + if (amdgpu_in_reset(adev) && + (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) + continue; + /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -3230,15 +3250,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) return r; } adev->ip_blocks[i].status.hw = true; - - if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - /* disable gfxoff for IP resume. The gfxoff will be re-enabled in - * amdgpu_device_resume() after IP resume. - */ - amdgpu_gfx_off_ctrl(adev, false); - DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); - } - } return 0; @@ -3997,10 +4008,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); - if (adev->mman.initialized) { - flush_delayed_work(&adev->mman.bdev.wq); - ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - } + if (adev->mman.initialized) + drain_workqueue(adev->mman.bdev.wq); if (adev->pm_sysfs_en) amdgpu_pm_sysfs_fini(adev); @@ -4022,7 +4031,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_gart_dummy_page_fini(adev); - amdgpu_device_unmap_mmio(adev); + if (drm_dev_is_unplugged(adev_to_drm(adev))) + amdgpu_device_unmap_mmio(adev); } @@ -4032,8 +4042,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); - release_firmware(adev->firmware.gpu_info_fw); - adev->firmware.gpu_info_fw = NULL; + amdgpu_ucode_release(&adev->firmware.gpu_info_fw); adev->accel_working = false; dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); @@ -4231,13 +4240,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - if (adev->in_s0ix) { - /* re-enable gfxoff after IP resume. This re-enables gfxoff after - * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). - */ - amdgpu_gfx_off_ctrl(adev, true); - DRM_DEBUG("will enable gfxoff for the mission mode\n"); - } if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); @@ -4621,11 +4623,6 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) if (!amdgpu_ras_is_poison_mode_supported(adev)) return true; - if (!amdgpu_device_ip_check_soft_reset(adev)) { - dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); - return false; - } - if (amdgpu_sriov_vf(adev)) return true; @@ -4750,7 +4747,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!need_full_reset) need_full_reset = amdgpu_device_ip_need_full_reset(adev); - if (!need_full_reset && amdgpu_gpu_recovery) { + if (!need_full_reset && amdgpu_gpu_recovery && + amdgpu_device_ip_check_soft_reset(adev)) { amdgpu_device_ip_pre_soft_reset(adev); r = amdgpu_device_ip_soft_reset(adev); amdgpu_device_ip_post_soft_reset(adev); @@ -5876,8 +5874,8 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, int amdgpu_in_reset(struct amdgpu_device *adev) { return atomic_read(&adev->reset_domain->in_gpu_reset); - } - +} + /** * amdgpu_device_halt() - bring hardware to some kind of halt state * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 1bbd56029a4f90..b719852daa071a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -33,6 +33,7 @@ #include "gmc_v9_0.h" #include "df_v1_7.h" #include "df_v3_6.h" +#include "df_v4_3.h" #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" @@ -2329,6 +2330,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(3, 5, 2): adev->df.funcs = &df_v1_7_funcs; break; + case IP_VERSION(4, 3, 0): + adev->df.funcs = &df_v4_3_funcs; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b22471b3bd63f4..503f89a766c377 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -42,6 +42,7 @@ #include #include #include +#include #include /** @@ -63,7 +64,7 @@ void amdgpu_display_hotplug_work_func(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, - hotplug_work); + hotplug_work.work); struct drm_device *dev = adev_to_drm(adev); struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 271e30e34d9321..0c001bb8fc2b03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -37,6 +37,7 @@ #include "amdgpu_dma_buf.h" #include "amdgpu_xgmi.h" #include +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3fe277bc233f4f..86fbb413828540 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include "amdgpu.h" @@ -104,13 +103,16 @@ * - 3.46.0 - To enable hot plug amdgpu tests in libdrm * - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags * - 3.48.0 - Add IP discovery version info to HW INFO - * 3.49.0 - Add gang submit into CS IOCTL + * - 3.49.0 - Add gang submit into CS IOCTL + * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock + * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock + * 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 49 +#define KMS_DRIVER_MINOR 51 #define KMS_DRIVER_PATCHLEVEL 0 -int amdgpu_vram_limit; +unsigned int amdgpu_vram_limit = UINT_MAX; int amdgpu_vis_vram_limit; int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ @@ -2236,6 +2238,8 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); + drm_dev_unplug(dev); + if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); @@ -2275,8 +2279,6 @@ amdgpu_pci_remove(struct pci_dev *pdev) amdgpu_driver_unload_kms(dev); - drm_dev_unplug(dev); - /* * Flush any in flight DMA operations from device. * Clear the Bus Master Enable bit and then wait on the PCIe Device diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index c96e458ed0889e..27a782a9dc7281 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -24,7 +24,6 @@ * Alex Deucher */ -#include #include #include "amdgpu.h" #include "amdgpu_connectors.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h index 41a4c705672978..e86834bfea1d5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h @@ -30,7 +30,6 @@ #include #include #include -#include #include #include "amdgpu_sync.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index bb7350ea1d7594..ed1164a87fced0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_display.h" @@ -61,10 +62,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) goto unlock; } - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT); + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); - drm_dev_exit(idx); + drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 3380daf42da8af..35ed46b9249c13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -375,8 +375,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, * KIQ MQD no matter SRIOV or Bare-metal */ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj, - &ring->mqd_gpu_addr, &ring->mqd_ptr); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &ring->mqd_obj, + &ring->mqd_gpu_addr, + &ring->mqd_ptr); if (r) { dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); return r; @@ -696,6 +699,50 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r return r; } +int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) +{ + int err = 0; + struct amdgpu_gfx_ras *ras = NULL; + + /* adev->gfx.ras is NULL, which means gfx does not + * support ras function, then do nothing here. + */ + if (!adev->gfx.ras) + return 0; + + ras = adev->gfx.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register gfx ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "gfx"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gfx.ras_if = &ras->ras_block.ras_comm; + + /* If not define special ras_late_init function, use gfx default ras_late_init */ + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + + /* If not defined special ras_cb function, use default ras_cb */ + if (!ras->ras_block.ras_cb) + ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; + + return 0; +} + +int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) + return adev->gfx.ras->poison_consumption_handler(adev, entry); + + return 0; +} + int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index b3df4787877e7c..86ec9d0d12c8ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -210,6 +210,11 @@ struct amdgpu_gfx_ras { struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); bool (*query_utcl2_poison_status)(struct amdgpu_device *adev); + int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + int (*poison_consumption_handler)(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry); }; struct amdgpu_gfx_funcs { @@ -323,6 +328,7 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_inst_irq; struct amdgpu_irq_src cp_ecc_error_irq; struct amdgpu_irq_src sq_irq; + struct amdgpu_irq_src rlc_gc_fed_irq; struct sq_work sq_work; /* gfx status */ @@ -432,4 +438,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); +int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 02a4c93673ce26..94f10ac0eef743 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -35,6 +35,7 @@ #include "amdgpu_xgmi.h" #include +#include /** * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 @@ -201,13 +202,20 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base) { + uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20; uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; mc->vram_start = base; mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - if (limit && limit < mc->real_vram_size) + if (limit < mc->real_vram_size) mc->real_vram_size = limit; + if (vis_limit && vis_limit < mc->visible_vram_size) + mc->visible_vram_size = vis_limit; + + if (mc->real_vram_size < mc->visible_vram_size) + mc->visible_vram_size = mc->real_vram_size; + if (mc->xgmi.num_physical_nodes == 0) { mc->fb_start = mc->vram_start; mc->fb_end = mc->vram_end; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index a6aef488a8228f..d0a1cc88832cc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -45,7 +45,6 @@ #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 7aa7e52ca78446..ca945055e68365 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -43,6 +43,7 @@ #include "amdgpu_gem.h" #include "amdgpu_display.h" #include "amdgpu_ras.h" +#include "amd_pcie.h" void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) { @@ -767,6 +768,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_INFO_DEV_INFO: { struct drm_amdgpu_info_device *dev_info; uint64_t vm_size; + uint32_t pcie_gen_mask; int ret; dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); @@ -785,15 +787,20 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (adev->pm.dpm_enabled) { dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; + dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10; + dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10; } else { - dev_info->max_engine_clock = adev->clock.default_sclk * 10; - dev_info->max_memory_clock = adev->clock.default_mclk * 10; + dev_info->max_engine_clock = + dev_info->min_engine_clock = + adev->clock.default_sclk * 10; + dev_info->max_memory_clock = + dev_info->min_memory_clock = + adev->clock.default_mclk * 10; } dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se * adev->gfx.config.max_shader_engines; dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; - dev_info->_pad = 0; dev_info->ids_flags = 0; if (adev->flags & AMD_IS_APU) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; @@ -847,6 +854,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; + /* Combine the chip gen mask with the platform (CPU/mobo) mask. */ + pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16); + dev_info->pcie_gen = fls(pcie_gen_mask); + dev_info->pcie_num_lanes = + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1; + ret = copy_to_user(out, dev_info, min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; kfree(dev_info); @@ -1014,6 +1032,24 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } ui32 /= 100; break; + case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK: + /* get peak pstate sclk in Mhz */ + if (amdgpu_dpm_read_sensor(adev, + AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, + (void *)&ui32, &ui32_size)) { + return -EINVAL; + } + ui32 /= 100; + break; + case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK: + /* get peak pstate mclk in Mhz */ + if (amdgpu_dpm_read_sensor(adev, + AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, + (void *)&ui32, &ui32_size)) { + return -EINVAL; + } + ui32 /= 100; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->sensor_info.type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 0c546245793b9c..82e27bd4f03836 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -21,6 +21,8 @@ * */ +#include + #include "amdgpu_mes.h" #include "amdgpu.h" #include "soc15_common.h" @@ -1423,3 +1425,60 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) kfree(vm); return 0; } + +int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) +{ + const struct mes_firmware_header_v1_0 *mes_hdr; + struct amdgpu_firmware_info *info; + char ucode_prefix[30]; + char fw_name[40]; + int r; + + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", + ucode_prefix, + pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); + r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name); + if (r) + goto out; + + mes_hdr = (const struct mes_firmware_header_v1_0 *) + adev->mes.fw[pipe]->data; + adev->mes.uc_start_addr[pipe] = + le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | + ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); + adev->mes.data_start_addr[pipe] = + le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | + ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + int ucode, ucode_data; + + if (pipe == AMDGPU_MES_SCHED_PIPE) { + ucode = AMDGPU_UCODE_ID_CP_MES; + ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; + } else { + ucode = AMDGPU_UCODE_ID_CP_MES1; + ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; + } + + info = &adev->firmware.ucode[ucode]; + info->ucode_id = ucode; + info->fw = adev->mes.fw[pipe]; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), + PAGE_SIZE); + + info = &adev->firmware.ucode[ucode_data]; + info->ucode_id = ucode_data; + info->fw = adev->mes.fw[pipe]; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), + PAGE_SIZE); + } + + return 0; +out: + amdgpu_ucode_release(&adev->mes.fw[pipe]); + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 97c05d08a551a3..547ec35691fac6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -306,6 +306,7 @@ struct amdgpu_mes_funcs { int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs); +int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe); int amdgpu_mes_init(struct amdgpu_device *adev); void amdgpu_mes_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 8a39300b1a845c..32fe05c810c6fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -534,6 +533,7 @@ struct amdgpu_connector { void *con_priv; bool dac_load_detect; bool detected_by_load; /* if the connection status was determined by load */ + bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ uint16_t connector_object_id; struct amdgpu_hpd hpd; struct amdgpu_router router; @@ -549,8 +549,8 @@ struct amdgpu_mst_connector { struct drm_dp_mst_topology_mgr mst_mgr; struct amdgpu_dm_dp_aux dm_dp_aux; - struct drm_dp_mst_port *port; - struct amdgpu_connector *mst_port; + struct drm_dp_mst_port *mst_output_port; + struct amdgpu_connector *mst_root; bool is_mst_connector; struct amdgpu_encoder *mst_encoder; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25a68d8888e0d5..981010de0a2829 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1574,9 +1574,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) attachment = READ_ONCE(bo->tbo.base.import_attach); if (attachment) - seq_printf(m, " imported from %p", dma_buf); + seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino); else if (dma_buf) - seq_printf(m, " exported as %p", dma_buf); + seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino); amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 7a2fc920739bb0..15e601f0964860 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -66,7 +66,8 @@ static int psp_ring_init(struct psp_context *psp, /* allocate 4k Page of Local Frame Buffer memory for ring */ ring->ring_size = 0x1000; ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->firmware.rbuf, &ring->ring_mem_mc_addr, (void **)&ring->ring_mem); @@ -122,6 +123,38 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp } } +static int psp_init_sriov_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + char ucode_prefix[30]; + int ret = 0; + + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); + + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 9): + adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; + ret = psp_init_cap_microcode(psp, ucode_prefix); + break; + case IP_VERSION(13, 0, 2): + adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; + ret = psp_init_cap_microcode(psp, ucode_prefix); + ret &= psp_init_ta_microcode(psp, ucode_prefix); + break; + case IP_VERSION(13, 0, 0): + adev->virt.autoload_ucode_id = 0; + break; + case IP_VERSION(13, 0, 10): + adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; + break; + default: + return -EINVAL; + } + return ret; +} + static int psp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -192,7 +225,10 @@ static int psp_early_init(void *handle) psp_check_pmfw_centralized_cstate_management(psp); - return 0; + if (amdgpu_sriov_vf(adev)) + return psp_init_sriov_microcode(psp); + else + return psp_init_microcode(psp); } void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) @@ -300,7 +336,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { /* runtime db doesn't exist, exit */ - dev_warn(adev->dev, "PSP runtime database doesn't exist\n"); + dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); return false; } @@ -350,42 +386,6 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, return ret; } -static int psp_init_sriov_microcode(struct psp_context *psp) -{ - struct amdgpu_device *adev = psp->adev; - int ret = 0; - - switch (adev->ip_versions[MP0_HWIP][0]) { - case IP_VERSION(9, 0, 0): - adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; - ret = psp_init_cap_microcode(psp, "vega10"); - break; - case IP_VERSION(11, 0, 9): - adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; - ret = psp_init_cap_microcode(psp, "navi12"); - break; - case IP_VERSION(11, 0, 7): - adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; - ret = psp_init_cap_microcode(psp, "sienna_cichlid"); - break; - case IP_VERSION(13, 0, 2): - adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; - ret = psp_init_cap_microcode(psp, "aldebaran"); - ret &= psp_init_ta_microcode(psp, "aldebaran"); - break; - case IP_VERSION(13, 0, 0): - adev->virt.autoload_ucode_id = 0; - break; - case IP_VERSION(13, 0, 10): - adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; - break; - default: - BUG(); - break; - } - return ret; -} - static int psp_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -401,15 +401,6 @@ static int psp_sw_init(void *handle) ret = -ENOMEM; } - if (amdgpu_sriov_vf(adev)) - ret = psp_init_sriov_microcode(psp); - else - ret = psp_init_microcode(psp); - if (ret) { - DRM_ERROR("Failed to load psp firmware!\n"); - return ret; - } - adev->psp.xgmi_context.supports_extended_data = !adev->gmc.xgmi.connected_to_cpu && adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2); @@ -514,20 +505,11 @@ static int psp_sw_fini(void *handle) psp_memory_training_fini(psp); - release_firmware(psp->sos_fw); - psp->sos_fw = NULL; - - release_firmware(psp->asd_fw); - psp->asd_fw = NULL; - - release_firmware(psp->ta_fw); - psp->ta_fw = NULL; - - release_firmware(psp->cap_fw); - psp->cap_fw = NULL; - - release_firmware(psp->toc_fw); - psp->toc_fw = NULL; + amdgpu_ucode_release(&psp->sos_fw); + amdgpu_ucode_release(&psp->asd_fw); + amdgpu_ucode_release(&psp->ta_fw); + amdgpu_ucode_release(&psp->cap_fw); + amdgpu_ucode_release(&psp->toc_fw); if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) @@ -624,12 +606,22 @@ psp_cmd_submit_buf(struct psp_context *psp, int timeout = 20000; bool ras_intr = false; bool skip_unsupport = false; + bool dev_entered; if (psp->adev->no_hw_access) return 0; - if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) - return 0; + dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx); + /* + * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring + * a lock in drm_dev_enter during driver unload because we must call + * drm_dev_unplug as the beginning of unload driver sequence . It is very + * crucial that userspace can't access device instances anymore. + */ + if (!dev_entered) + WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD && + psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA && + psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD); memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); @@ -694,7 +686,8 @@ psp_cmd_submit_buf(struct psp_context *psp, } exit: - drm_dev_exit(idx); + if (dev_entered) + drm_dev_exit(idx); return ret; } @@ -797,9 +790,13 @@ static int psp_tmr_init(struct psp_context *psp) if (!psp->tmr_bo) { pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, - AMDGPU_GEM_DOMAIN_VRAM, - &psp->tmr_bo, &psp->tmr_mc_addr, pptr); + ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, + PSP_TMR_ALIGNMENT, + AMDGPU_HAS_VRAM(psp->adev) ? + AMDGPU_GEM_DOMAIN_VRAM : + AMDGPU_GEM_DOMAIN_GTT, + &psp->tmr_bo, &psp->tmr_mc_addr, + pptr); } return ret; @@ -1092,7 +1089,8 @@ int psp_ta_init_shared_buf(struct psp_context *psp, * physical) for ta to host memory */ return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, &mem_ctx->shared_buf); @@ -1901,7 +1899,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat static int psp_securedisplay_initialize(struct psp_context *psp) { int ret; - struct securedisplay_cmd *securedisplay_cmd; + struct ta_securedisplay_cmd *securedisplay_cmd; /* * TODO: bypass the initialize in sriov for now @@ -2908,25 +2906,15 @@ int psp_ring_cmd_submit(struct psp_context *psp, return 0; } -int psp_init_asd_microcode(struct psp_context *psp, - const char *chip_name) +int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) { struct amdgpu_device *adev = psp->adev; char fw_name[PSP_FW_NAME_LEN]; const struct psp_firmware_header_v1_0 *asd_hdr; int err = 0; - if (!chip_name) { - dev_err(adev->dev, "invalid chip name for asd microcode\n"); - return -EINVAL; - } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.asd_fw); + err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); if (err) goto out; @@ -2938,31 +2926,19 @@ int psp_init_asd_microcode(struct psp_context *psp, le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); return 0; out: - dev_err(adev->dev, "fail to initialize asd microcode\n"); - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; + amdgpu_ucode_release(&adev->psp.asd_fw); return err; } -int psp_init_toc_microcode(struct psp_context *psp, - const char *chip_name) +int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) { struct amdgpu_device *adev = psp->adev; char fw_name[PSP_FW_NAME_LEN]; const struct psp_firmware_header_v1_0 *toc_hdr; int err = 0; - if (!chip_name) { - dev_err(adev->dev, "invalid chip name for toc microcode\n"); - return -EINVAL; - } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); - err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.toc_fw); + err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); if (err) goto out; @@ -2974,9 +2950,7 @@ int psp_init_toc_microcode(struct psp_context *psp, le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); return 0; out: - dev_err(adev->dev, "fail to request/validate toc microcode\n"); - release_firmware(adev->psp.toc_fw); - adev->psp.toc_fw = NULL; + amdgpu_ucode_release(&adev->psp.toc_fw); return err; } @@ -3107,8 +3081,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev) return 0; } -int psp_init_sos_microcode(struct psp_context *psp, - const char *chip_name) +int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) { struct amdgpu_device *adev = psp->adev; char fw_name[PSP_FW_NAME_LEN]; @@ -3121,17 +3094,8 @@ int psp_init_sos_microcode(struct psp_context *psp, uint8_t *ucode_array_start_addr; int fw_index = 0; - if (!chip_name) { - dev_err(adev->dev, "invalid chip name for sos microcode\n"); - return -EINVAL; - } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); - err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.sos_fw); + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); if (err) goto out; @@ -3203,10 +3167,7 @@ int psp_init_sos_microcode(struct psp_context *psp, return 0; out: - dev_err(adev->dev, - "failed to init sos firmware\n"); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; + amdgpu_ucode_release(&adev->psp.sos_fw); return err; } @@ -3272,41 +3233,76 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, return 0; } -int psp_init_ta_microcode(struct psp_context *psp, - const char *chip_name) +static int parse_ta_v1_microcode(struct psp_context *psp) { + const struct ta_firmware_header_v1_0 *ta_hdr; struct amdgpu_device *adev = psp->adev; - char fw_name[PSP_FW_NAME_LEN]; - const struct ta_firmware_header_v2_0 *ta_hdr; - int err = 0; - int ta_index = 0; - if (!chip_name) { - dev_err(adev->dev, "invalid chip name for ta microcode\n"); + ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; + + if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) return -EINVAL; - } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); - err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) - goto out; + adev->psp.xgmi_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->xgmi.fw_version); + adev->psp.xgmi_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->xgmi.size_bytes); + adev->psp.xgmi_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ras_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->ras.fw_version); + adev->psp.ras_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->ras.size_bytes); + adev->psp.ras_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->ras.offset_bytes); + + adev->psp.hdcp_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->hdcp.fw_version); + adev->psp.hdcp_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->hdcp.size_bytes); + adev->psp.hdcp_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.dtm_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->dtm.fw_version); + adev->psp.dtm_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->dtm.size_bytes); + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->dtm.offset_bytes); + + adev->psp.securedisplay_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->securedisplay.fw_version); + adev->psp.securedisplay_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->securedisplay.size_bytes); + adev->psp.securedisplay_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->securedisplay.offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out; + return 0; +} + +static int parse_ta_v2_microcode(struct psp_context *psp) +{ + const struct ta_firmware_header_v2_0 *ta_hdr; + struct amdgpu_device *adev = psp->adev; + int err = 0; + int ta_index = 0; ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; - if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) { - dev_err(adev->dev, "unsupported TA header version\n"); - err = -EINVAL; - goto out; - } + if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) + return -EINVAL; if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); - err = -EINVAL; - goto out; + return -EINVAL; } for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { @@ -3314,19 +3310,44 @@ int psp_init_ta_microcode(struct psp_context *psp, &ta_hdr->ta_fw_bin[ta_index], ta_hdr); if (err) - goto out; + return err; } return 0; -out: - dev_err(adev->dev, "fail to initialize ta microcode\n"); - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; +} + +int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) +{ + const struct common_firmware_header *hdr; + struct amdgpu_device *adev = psp->adev; + char fw_name[PSP_FW_NAME_LEN]; + int err; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); + if (err) + return err; + + hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; + switch (le16_to_cpu(hdr->header_version_major)) { + case 1: + err = parse_ta_v1_microcode(psp); + break; + case 2: + err = parse_ta_v2_microcode(psp); + break; + default: + dev_err(adev->dev, "unsupported TA header version\n"); + err = -EINVAL; + } + + if (err) + amdgpu_ucode_release(&adev->psp.ta_fw); + return err; } -int psp_init_cap_microcode(struct psp_context *psp, - const char *chip_name) +int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) { struct amdgpu_device *adev = psp->adev; char fw_name[PSP_FW_NAME_LEN]; @@ -3334,28 +3355,20 @@ int psp_init_cap_microcode(struct psp_context *psp, struct amdgpu_firmware_info *info = NULL; int err = 0; - if (!chip_name) { - dev_err(adev->dev, "invalid chip name for cap microcode\n"); - return -EINVAL; - } - if (!amdgpu_sriov_vf(adev)) { dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); - err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev); - if (err) { - dev_warn(adev->dev, "cap microcode does not exist, skip\n"); - err = 0; - goto out; - } - - err = amdgpu_ucode_validate(adev->psp.cap_fw); + err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); if (err) { + if (err == -ENODEV) { + dev_warn(adev->dev, "cap microcode does not exist, skip\n"); + err = 0; + goto out; + } dev_err(adev->dev, "fail to initialize cap microcode\n"); - goto out; } info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; @@ -3372,8 +3385,7 @@ int psp_init_cap_microcode(struct psp_context *psp, return 0; out: - release_firmware(adev->psp.cap_fw); - adev->psp.cap_fw = NULL; + amdgpu_ucode_release(&adev->psp.cap_fw); return err; } @@ -3444,10 +3456,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, /* LFB address which is aligned to 1MB boundary per PSP request */ ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, - AMDGPU_GEM_DOMAIN_VRAM, - &fw_buf_bo, - &fw_pri_mc_addr, - &fw_pri_cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &fw_buf_bo, &fw_pri_mc_addr, + &fw_pri_cpu_addr); if (ret) goto rel_buf; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index ad490c1e2f579b..6e543558386da3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -706,13 +706,23 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, return 0; } +static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev, + struct ras_common_if *head) +{ + if (amdgpu_ras_is_feature_allowed(adev, head) || + amdgpu_ras_is_poison_mode_supported(adev)) + return 1; + else + return 0; +} + /* wrapper of psp_ras_enable_features */ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); union ta_ras_cmd_input *info; - int ret; + int ret = 0; if (!con) return -EINVAL; @@ -736,7 +746,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, } /* Do not enable if it is not allowed. */ - WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); + if (enable && !amdgpu_ras_check_feature_allowed(adev, head)) + goto out; /* Only enable ras feature operation handle on host side */ if (head->block == AMDGPU_RAS_BLOCK__GFX && @@ -754,7 +765,6 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, /* setup the obj */ __amdgpu_ras_feature_enable(adev, head, enable); - ret = 0; out: if (head->block == AMDGPU_RAS_BLOCK__GFX) kfree(info); @@ -910,9 +920,6 @@ static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_de if (block >= AMDGPU_RAS_BLOCK__LAST) return NULL; - if (!amdgpu_ras_is_supported(adev, block)) - return NULL; - list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { if (!node->ras_obj) { dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); @@ -1087,6 +1094,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, info->head.block, info->head.sub_block_index); + /* inject on guest isn't allowed, return success directly */ + if (amdgpu_sriov_vf(adev)) + return 0; + if (!obj) return -EINVAL; @@ -1122,11 +1133,54 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, } /** - * amdgpu_ras_query_error_count -- Get error counts of all IPs + * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP + * @adev: pointer to AMD GPU device + * @ce_count: pointer to an integer to be set to the count of correctible errors. + * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. + * @query_info: pointer to ras_query_if + * + * Return 0 for query success or do nothing, otherwise return an error + * on failures + */ +static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count, + struct ras_query_if *query_info) +{ + int ret; + + if (!query_info) + /* do nothing if query_info is not specified */ + return 0; + + ret = amdgpu_ras_query_error_status(adev, query_info); + if (ret) + return ret; + + *ce_count += query_info->ce_count; + *ue_count += query_info->ue_count; + + /* some hardware/IP supports read to clear + * no need to explictly reset the err status after the query call */ + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) + dev_warn(adev->dev, + "Failed to reset error counter and error status\n"); + } + + return 0; +} + +/** + * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP * @adev: pointer to AMD GPU device * @ce_count: pointer to an integer to be set to the count of correctible errors. * @ue_count: pointer to an integer to be set to the count of uncorrectible * errors. + * @query_info: pointer to ras_query_if if the query request is only for + * specific ip block; if info is NULL, then the qurey request is for + * all the ip blocks that support query ras error counters/status * * If set, @ce_count or @ue_count, count and return the corresponding * error counts in those integer pointers. Return 0 if the device @@ -1134,11 +1188,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, */ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, unsigned long *ce_count, - unsigned long *ue_count) + unsigned long *ue_count, + struct ras_query_if *query_info) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; unsigned long ce, ue; + int ret; if (!adev->ras_enabled || !con) return -EOPNOTSUPP; @@ -1150,26 +1206,23 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, ce = 0; ue = 0; - list_for_each_entry(obj, &con->head, node) { - struct ras_query_if info = { - .head = obj->head, - }; - int res; - - res = amdgpu_ras_query_error_status(adev, &info); - if (res) - return res; + if (!query_info) { + /* query all the ip blocks that support ras query interface */ + list_for_each_entry(obj, &con->head, node) { + struct ras_query_if info = { + .head = obj->head, + }; - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { - if (amdgpu_ras_reset_error_status(adev, info.head.block)) - dev_warn(adev->dev, "Failed to reset error counter and error status"); + ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); } - - ce += info.ce_count; - ue += info.ue_count; + } else { + /* query specific ip block */ + ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); } + if (ret) + return ret; + if (ce_count) *ce_count = ce; @@ -1564,14 +1617,14 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, obj->head.block, 0); - if (!block_obj || !block_obj->hw_ops) + if (!block_obj) return; /* both query_poison_status and handle_poison_consumption are optional, * but at least one of them should be implemented if we need poison * consumption handler */ - if (block_obj->hw_ops->query_poison_status) { + if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { poison_stat = block_obj->hw_ops->query_poison_status(adev); if (!poison_stat) { /* Not poison consumption interrupt, no need to handle it */ @@ -1585,7 +1638,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * if (!adev->gmc.xgmi.connected_to_cpu) amdgpu_umc_poison_handler(adev, false); - if (block_obj->hw_ops->handle_poison_consumption) + if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); /* gpu reset is fallback for failed and default cases */ @@ -1593,6 +1646,8 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", block_obj->ras_comm.name); amdgpu_ras_reset_gpu(adev); + } else { + amdgpu_gfx_poison_consumption_handler(adev, entry); } } @@ -2344,22 +2399,24 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { dev_info(adev->dev, "SRAM ECC is active.\n"); - if (!amdgpu_sriov_vf(adev)) { + if (!amdgpu_sriov_vf(adev)) adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 1 << AMDGPU_RAS_BLOCK__DF); - - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || - adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) - adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | - 1 << AMDGPU_RAS_BLOCK__JPEG); - else - adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | - 1 << AMDGPU_RAS_BLOCK__JPEG); - } else { + else adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 1 << AMDGPU_RAS_BLOCK__SDMA | 1 << AMDGPU_RAS_BLOCK__GFX); - } + + /* VCN/JPEG RAS can be supported on both bare metal and + * SRIOV environment + */ + if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || + adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | + 1 << AMDGPU_RAS_BLOCK__JPEG); + else + adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | + 1 << AMDGPU_RAS_BLOCK__JPEG); } else { dev_info(adev->dev, "SRAM ECC is not presented.\n"); } @@ -2395,7 +2452,7 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) /* Cache new values. */ - if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { atomic_set(&con->ras_ce_count, ce_count); atomic_set(&con->ras_ue_count, ue_count); } @@ -2405,11 +2462,42 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) pm_runtime_put_autosuspend(dev->dev); } +static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + bool df_poison, umc_poison; + + /* poison setting is useless on SRIOV guest */ + if (amdgpu_sriov_vf(adev) || !con) + return; + + /* Init poison supported flag, the default value is false */ + if (adev->gmc.xgmi.connected_to_cpu) { + /* enabled by default when GPU is connected to CPU */ + con->poison_supported = true; + } else if (adev->df.funcs && + adev->df.funcs->query_ras_poison_mode && + adev->umc.ras && + adev->umc.ras->query_ras_poison_mode) { + df_poison = + adev->df.funcs->query_ras_poison_mode(adev); + umc_poison = + adev->umc.ras->query_ras_poison_mode(adev); + + /* Only poison is set in both DF and UMC, we can support it */ + if (df_poison && umc_poison) + con->poison_supported = true; + else if (df_poison != umc_poison) + dev_warn(adev->dev, + "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", + df_poison, umc_poison); + } +} + int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int r; - bool df_poison, umc_poison; if (con) return 0; @@ -2484,26 +2572,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev) goto release_con; } - /* Init poison supported flag, the default value is false */ - if (adev->gmc.xgmi.connected_to_cpu) { - /* enabled by default when GPU is connected to CPU */ - con->poison_supported = true; - } - else if (adev->df.funcs && - adev->df.funcs->query_ras_poison_mode && - adev->umc.ras && - adev->umc.ras->query_ras_poison_mode) { - df_poison = - adev->df.funcs->query_ras_poison_mode(adev); - umc_poison = - adev->umc.ras->query_ras_poison_mode(adev); - /* Only poison is set in both DF and UMC, we can support it */ - if (df_poison && umc_poison) - con->poison_supported = true; - else if (df_poison != umc_poison) - dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", - df_poison, umc_poison); - } + amdgpu_ras_query_poison_mode(adev); if (amdgpu_ras_fs_init(adev)) { r = -EINVAL; @@ -2564,6 +2633,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev, { struct amdgpu_ras_block_object *ras_obj = NULL; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_query_if *query_info; unsigned long ue_count, ce_count; int r; @@ -2605,11 +2675,17 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev, /* Those are the cached values at init. */ - if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL); + if (!query_info) + return -ENOMEM; + memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); + + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { atomic_set(&con->ras_ce_count, ce_count); atomic_set(&con->ras_ue_count, ue_count); } + kfree(query_info); return 0; interrupt: @@ -2946,11 +3022,26 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co int amdgpu_ras_is_supported(struct amdgpu_device *adev, unsigned int block) { + int ret = 0; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if (block >= AMDGPU_RAS_BLOCK_COUNT) return 0; - return ras && (adev->ras_enabled & (1 << block)); + + ret = ras && (adev->ras_enabled & (1 << block)); + + /* For the special asic with mem ecc enabled but sram ecc + * not enabled, even if the ras block is not supported on + * .ras_enabled, if the asic supports poison mode and the + * ras block has ras configuration, it can be considered + * that the ras block supports ras function. + */ + if (!ret && + amdgpu_ras_is_poison_mode_supported(adev) && + amdgpu_ras_get_ras_block(adev, block, 0)) + ret = 1; + + return ret; } int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index bf5a95104ec115..f2ad999993f66c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -540,7 +540,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev); int amdgpu_ras_query_error_count(struct amdgpu_device *adev, unsigned long *ce_count, - unsigned long *ue_count); + unsigned long *ue_count, + struct ras_query_if *query_info); /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index f778466bb9dbdf..6437ead87e5fb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -24,6 +24,7 @@ #include "amdgpu_reset.h" #include "aldebaran.h" #include "sienna_cichlid.h" +#include "smu_v13_0_10.h" int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_handler *handler) @@ -44,6 +45,9 @@ int amdgpu_reset_init(struct amdgpu_device *adev) case IP_VERSION(11, 0, 7): ret = sienna_cichlid_reset_init(adev); break; + case IP_VERSION(13, 0, 10): + ret = smu_v13_0_10_reset_init(adev); + break; default: break; } @@ -62,6 +66,9 @@ int amdgpu_reset_fini(struct amdgpu_device *adev) case IP_VERSION(11, 0, 7): ret = sienna_cichlid_reset_fini(adev); break; + case IP_VERSION(13, 0, 10): + ret = smu_v13_0_10_reset_fini(adev); + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index 012b72d00e0400..85fb730d9fc84f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -93,7 +93,8 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) /* allocate save restore block */ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.rlc.save_restore_obj, &adev->gfx.rlc.save_restore_gpu_addr, (void **)&adev->gfx.rlc.sr_ptr); @@ -130,7 +131,8 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) /* allocate clear state block */ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); @@ -156,7 +158,8 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) int r; r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index ea5278f094c081..231ca06bc9c78a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -154,16 +154,11 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) { - int err = 0; uint16_t version_major; const struct common_firmware_header *header = NULL; const struct sdma_firmware_header_v1_0 *hdr; const struct sdma_firmware_header_v2_0 *hdr_v2; - err = amdgpu_ucode_validate(sdma_inst->fw); - if (err) - return err; - header = (const struct common_firmware_header *) sdma_inst->fw->data; version_major = le16_to_cpu(header->header_version_major); @@ -195,7 +190,7 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, int i; for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); + amdgpu_ucode_release(&adev->sdma.instance[i].fw); if (duplicate) break; } @@ -205,16 +200,22 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, } int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, - char *fw_name, u32 instance, - bool duplicate) + u32 instance, bool duplicate) { struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; - int err = 0, i; + int err, i; const struct sdma_firmware_header_v2_0 *sdma_hdr; uint16_t version_major; - - err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev); + char ucode_prefix[30]; + char fw_name[40]; + + amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix)); + if (instance == 0) + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); + else + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name); if (err) goto out; @@ -279,10 +280,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, } out: - if (err) { - DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name); + if (err) amdgpu_sdma_destroy_inst_ctx(adev, duplicate); - } return err; } @@ -306,3 +305,38 @@ void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev) } } } + +int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) +{ + int err = 0; + struct amdgpu_sdma_ras *ras = NULL; + + /* adev->sdma.ras is NULL, which means sdma does not + * support ras function, then do nothing here. + */ + if (!adev->sdma.ras) + return 0; + + ras = adev->sdma.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register sdma ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "sdma"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->sdma.ras_if = &ras->ras_block.ras_comm; + + /* If not define special ras_late_init function, use default ras_late_init */ + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init; + + /* If not defined special ras_cb function, use default ras_cb */ + if (!ras->ras_block.ras_cb) + ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 7d99205c2e018b..fc8528812598b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -124,10 +124,11 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, - char *fw_name, u32 instance, bool duplicate); +int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, + bool duplicate); void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev); +int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c index 2c1d82fc4c3456..8ed0e073656f88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -77,11 +77,11 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp, } } -void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd, enum ta_securedisplay_command command_id) { - *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf; - memset(*cmd, 0, sizeof(struct securedisplay_cmd)); + *cmd = (struct ta_securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf; + memset(*cmd, 0, sizeof(struct ta_securedisplay_cmd)); (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE; (*cmd)->cmd_id = command_id; } @@ -93,7 +93,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u { struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; struct psp_context *psp = &adev->psp; - struct securedisplay_cmd *securedisplay_cmd; + struct ta_securedisplay_cmd *securedisplay_cmd; struct drm_device *dev = adev_to_drm(adev); uint32_t phy_id; uint32_t op; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h index fe98574748f440..456ad68ed4b2f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h @@ -30,7 +30,7 @@ void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev); void psp_securedisplay_parse_resp_status(struct psp_context *psp, enum ta_securedisplay_status status); -void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd, enum ta_securedisplay_command command_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 677ad2016976db..98d91ebf5c26bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = to_amdgpu_ring(job->base.sched)->idx; + __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx; __entry->dw = ib->length_dw; __entry->fences = amdgpu_fence_count_emitted( - to_amdgpu_ring(job->base.sched)); + to_amdgpu_ring(job->base.entity->rq->sched)); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 55e0284b2bdddc..c5ef7f7bdc15cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -44,10 +44,10 @@ #include #include -#include -#include +#include #include #include +#include #include #include @@ -1679,10 +1679,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) /* reserve vram for mem train according to TMR location */ amdgpu_ttm_training_data_block_init(adev); ret = amdgpu_bo_create_kernel_at(adev, - ctx->c2p_train_data_offset, - ctx->train_data_size, - &ctx->c2p_bo, - NULL); + ctx->c2p_train_data_offset, + ctx->train_data_size, + &ctx->c2p_bo, + NULL); if (ret) { DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); amdgpu_ttm_training_reserve_vram_fini(adev); @@ -1692,10 +1692,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) } ret = amdgpu_bo_create_kernel_at(adev, - adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, - adev->mman.discovery_tmr_size, - &adev->mman.discovery_memory, - NULL); + adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, + adev->mman.discovery_tmr_size, + &adev->mman.discovery_memory, + NULL); if (ret) { DRM_ERROR("alloc tmr failed(%d)!\n", ret); amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); @@ -1718,7 +1718,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) { uint64_t gtt_size; int r; - u64 vis_vram_limit; mutex_init(&adev->mman.gtt_window_lock); @@ -1741,12 +1740,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } - /* Reduce size of CPU-visible VRAM if requested */ - vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; - if (amdgpu_vis_vram_limit > 0 && - vis_vram_limit <= adev->gmc.visible_vram_size) - adev->gmc.visible_vram_size = vis_vram_limit; - /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_buffer_funcs_status(adev, false); #ifdef CONFIG_64BIT diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 5cb62e6249c231..380b89114341d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -504,7 +504,7 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr) } } -int amdgpu_ucode_validate(const struct firmware *fw) +static int amdgpu_ucode_validate(const struct firmware *fw) { const struct common_firmware_header *hdr = (const struct common_firmware_header *)fw->data; @@ -1059,12 +1059,229 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) return 0; } +static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type) +{ + if (block_type == MP0_HWIP) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): + switch (adev->asic_type) { + case CHIP_VEGA10: + return "vega10"; + case CHIP_VEGA12: + return "vega12"; + default: + return NULL; + } + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + if (adev->asic_type == CHIP_RAVEN) { + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + return "raven2"; + else if (adev->apu_flags & AMD_APU_IS_PICASSO) + return "picasso"; + return "raven"; + } + break; + case IP_VERSION(11, 0, 0): + return "navi10"; + case IP_VERSION(11, 0, 2): + return "vega20"; + case IP_VERSION(11, 0, 3): + return "renoir"; + case IP_VERSION(11, 0, 4): + return "arcturus"; + case IP_VERSION(11, 0, 5): + return "navi14"; + case IP_VERSION(11, 0, 7): + return "sienna_cichlid"; + case IP_VERSION(11, 0, 9): + return "navi12"; + case IP_VERSION(11, 0, 11): + return "navy_flounder"; + case IP_VERSION(11, 0, 12): + return "dimgrey_cavefish"; + case IP_VERSION(11, 0, 13): + return "beige_goby"; + case IP_VERSION(11, 5, 0): + return "vangogh"; + case IP_VERSION(12, 0, 1): + return "green_sardine"; + case IP_VERSION(13, 0, 2): + return "aldebaran"; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): + return "yellow_carp"; + } + } else if (block_type == MP1_HWIP) { + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_ARCTURUS) + return "arcturus_smc"; + return NULL; + case IP_VERSION(11, 0, 0): + return "navi10_smc"; + case IP_VERSION(11, 0, 5): + return "navi14_smc"; + case IP_VERSION(11, 0, 9): + return "navi12_smc"; + case IP_VERSION(11, 0, 7): + return "sienna_cichlid_smc"; + case IP_VERSION(11, 0, 11): + return "navy_flounder_smc"; + case IP_VERSION(11, 0, 12): + return "dimgrey_cavefish_smc"; + case IP_VERSION(11, 0, 13): + return "beige_goby_smc"; + case IP_VERSION(13, 0, 2): + return "aldebaran_smc"; + } + } else if (block_type == SDMA0_HWIP) { + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): + return "vega10_sdma"; + case IP_VERSION(4, 0, 1): + return "vega12_sdma"; + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + return "raven2_sdma"; + else if (adev->apu_flags & AMD_APU_IS_PICASSO) + return "picasso_sdma"; + return "raven_sdma"; + case IP_VERSION(4, 1, 2): + if (adev->apu_flags & AMD_APU_IS_RENOIR) + return "renoir_sdma"; + return "green_sardine_sdma"; + case IP_VERSION(4, 2, 0): + return "vega20_sdma"; + case IP_VERSION(4, 2, 2): + return "arcturus_sdma"; + case IP_VERSION(4, 4, 0): + return "aldebaran_sdma"; + case IP_VERSION(5, 0, 0): + return "navi10_sdma"; + case IP_VERSION(5, 0, 1): + return "cyan_skillfish2_sdma"; + case IP_VERSION(5, 0, 2): + return "navi14_sdma"; + case IP_VERSION(5, 0, 5): + return "navi12_sdma"; + case IP_VERSION(5, 2, 0): + return "sienna_cichlid_sdma"; + case IP_VERSION(5, 2, 2): + return "navy_flounder_sdma"; + case IP_VERSION(5, 2, 4): + return "dimgrey_cavefish_sdma"; + case IP_VERSION(5, 2, 5): + return "beige_goby_sdma"; + case IP_VERSION(5, 2, 3): + return "yellow_carp_sdma"; + case IP_VERSION(5, 2, 1): + return "vangogh_sdma"; + } + } else if (block_type == UVD_HWIP) { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + return "raven2_vcn"; + else if (adev->apu_flags & AMD_APU_IS_PICASSO) + return "picasso_vcn"; + return "raven_vcn"; + case IP_VERSION(2, 5, 0): + return "arcturus_vcn"; + case IP_VERSION(2, 2, 0): + if (adev->apu_flags & AMD_APU_IS_RENOIR) + return "renoir_vcn"; + return "green_sardine_vcn"; + case IP_VERSION(2, 6, 0): + return "aldebaran_vcn"; + case IP_VERSION(2, 0, 0): + return "navi10_vcn"; + case IP_VERSION(2, 0, 2): + if (adev->asic_type == CHIP_NAVI12) + return "navi12_vcn"; + return "navi14_vcn"; + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + return "sienna_cichlid_vcn"; + return "navy_flounder_vcn"; + case IP_VERSION(3, 0, 2): + return "vangogh_vcn"; + case IP_VERSION(3, 0, 16): + return "dimgrey_cavefish_vcn"; + case IP_VERSION(3, 0, 33): + return "beige_goby_vcn"; + case IP_VERSION(3, 1, 1): + return "yellow_carp_vcn"; + } + } else if (block_type == GC_HWIP) { + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + return "vega10"; + case IP_VERSION(9, 2, 1): + return "vega12"; + case IP_VERSION(9, 4, 0): + return "vega20"; + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + return "raven2"; + else if (adev->apu_flags & AMD_APU_IS_PICASSO) + return "picasso"; + return "raven"; + case IP_VERSION(9, 4, 1): + return "arcturus"; + case IP_VERSION(9, 3, 0): + if (adev->apu_flags & AMD_APU_IS_RENOIR) + return "renoir"; + return "green_sardine"; + case IP_VERSION(9, 4, 2): + return "aldebaran"; + case IP_VERSION(10, 1, 10): + return "navi10"; + case IP_VERSION(10, 1, 1): + return "navi14"; + case IP_VERSION(10, 1, 2): + return "navi12"; + case IP_VERSION(10, 3, 0): + return "sienna_cichlid"; + case IP_VERSION(10, 3, 2): + return "navy_flounder"; + case IP_VERSION(10, 3, 1): + return "vangogh"; + case IP_VERSION(10, 3, 4): + return "dimgrey_cavefish"; + case IP_VERSION(10, 3, 5): + return "beige_goby"; + case IP_VERSION(10, 3, 3): + return "yellow_carp"; + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 1, 4): + return "cyan_skillfish2"; + } + } + return NULL; +} + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) { int maj, min, rev; char *ip_name; + const char *legacy; uint32_t version = adev->ip_versions[block_type][0]; + legacy = amdgpu_ucode_legacy_naming(adev, block_type); + if (legacy) { + snprintf(ucode_prefix, len, "%s", legacy); + return; + } + switch (block_type) { case GC_HWIP: ip_name = "gc"; @@ -1091,3 +1308,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev); } + +/* + * amdgpu_ucode_request - Fetch and validate amdgpu microcode + * + * @adev: amdgpu device + * @fw: pointer to load firmware to + * @fw_name: firmware to load + * + * This is a helper that will use request_firmware and amdgpu_ucode_validate + * to load and run basic validation on firmware. If the load fails, remap + * the error code to -ENODEV, so that early_init functions will fail to load. + */ +int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw, + const char *fw_name) +{ + int err = request_firmware(fw, fw_name, adev->dev); + + if (err) + return -ENODEV; + err = amdgpu_ucode_validate(*fw); + if (err) + dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name); + + return err; +} + +/* + * amdgpu_ucode_release - Release firmware microcode + * + * @fw: pointer to firmware to release + */ +void amdgpu_ucode_release(const struct firmware **fw) +{ + release_firmware(*fw); + *fw = NULL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 552e06929229c9..b03321e7d2d893 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -538,12 +538,15 @@ struct amdgpu_firmware { void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr); +void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr); -int amdgpu_ucode_validate(const struct firmware *fw); +int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw, + const char *fw_name); +void amdgpu_ucode_release(const struct firmware **fw); bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr, uint16_t hdr_major, uint16_t hdr_minor); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index f76c19fc03926f..1c7fcb4f238089 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -169,25 +169,33 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset) { int ret = AMDGPU_RAS_SUCCESS; - if (!adev->gmc.xgmi.connected_to_cpu) { - struct ras_err_data err_data = {0, 0, 0, NULL}; - struct ras_common_if head = { - .block = AMDGPU_RAS_BLOCK__UMC, - }; - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); - - ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); - - if (ret == AMDGPU_RAS_SUCCESS && obj) { - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; + if (!amdgpu_sriov_vf(adev)) { + if (!adev->gmc.xgmi.connected_to_cpu) { + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__UMC, + }; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); + + ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); + + if (ret == AMDGPU_RAS_SUCCESS && obj) { + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + } + } else if (reset) { + /* MCA poison handler is only responsible for GPU reset, + * let MCA notifier do page retirement. + */ + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + amdgpu_ras_reset_gpu(adev); } - } else if (reset) { - /* MCA poison handler is only responsible for GPU reset, - * let MCA notifier do page retirement. - */ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - amdgpu_ras_reset_gpu(adev); + } else { + if (adev->virt.ops && adev->virt.ops->ras_poison_handler) + adev->virt.ops->ras_poison_handler(adev); + else + dev_warn(adev->dev, + "No ras_poison_handler interface in SRIOV!\n"); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e00bb654e24b0c..632a6ded57355e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -260,19 +260,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) return -EINVAL; } - r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); - if (r) { - dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", - fw_name); - return r; - } - - r = amdgpu_ucode_validate(adev->uvd.fw); + r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name); if (r) { dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", fw_name); - release_firmware(adev->uvd.fw); - adev->uvd.fw = NULL; + amdgpu_ucode_release(&adev->uvd.fw); return r; } @@ -331,8 +323,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (adev->uvd.harvest_config & (1 << j)) continue; r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, - &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, + &adev->uvd.inst[j].cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; @@ -394,7 +389,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); } amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); - release_firmware(adev->uvd.fw); + amdgpu_ucode_release(&adev->uvd.fw); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b239e874f2d546..2fb61410b1c02e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -158,19 +158,11 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) return -EINVAL; } - r = request_firmware(&adev->vce.fw, fw_name, adev->dev); - if (r) { - dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", - fw_name); - return r; - } - - r = amdgpu_ucode_validate(adev->vce.fw); + r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name); if (r) { dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", fw_name); - release_firmware(adev->vce.fw); - adev->vce.fw = NULL; + amdgpu_ucode_release(&adev->vce.fw); return r; } @@ -186,7 +178,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) (binary_id << 8)); r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->vce.vcpu_bo, &adev->vce.gpu_addr, &adev->vce.cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); @@ -226,7 +220,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) for (i = 0; i < adev->vce.num_rings; i++) amdgpu_ring_fini(&adev->vce.ring[i]); - release_firmware(adev->vce.fw); + amdgpu_ucode_release(&adev->vce.fw); mutex_destroy(&adev->vce.idle_mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index b1622ac9949ffc..25217b05c0ea8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -36,26 +36,26 @@ #include "soc15d.h" /* Firmware Names */ -#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" -#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" -#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" -#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" -#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" -#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" -#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" -#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" -#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" -#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" -#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" -#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" +#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" +#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" +#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" +#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" +#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" +#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" +#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" +#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" +#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" +#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" +#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" +#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" -#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" -#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" -#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" -#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" -#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" -#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" -#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" +#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" +#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" +#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" +#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" +#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" +#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" +#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -80,10 +80,24 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); +int amdgpu_vcn_early_init(struct amdgpu_device *adev) +{ + char ucode_prefix[30]; + char fw_name[40]; + int r; + + amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix)); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); + r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name); + if (r) + amdgpu_ucode_release(&adev->vcn.fw); + + return r; +} + int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { unsigned long bo_size; - const char *fw_name; const struct common_firmware_header *hdr; unsigned char fw_check; unsigned int fw_shared_size, log_offset; @@ -96,131 +110,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); - switch (adev->ip_versions[UVD_HWIP][0]) { - case IP_VERSION(1, 0, 0): - case IP_VERSION(1, 0, 1): - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - fw_name = FIRMWARE_RAVEN2; - else if (adev->apu_flags & AMD_APU_IS_PICASSO) - fw_name = FIRMWARE_PICASSO; - else - fw_name = FIRMWARE_RAVEN; - break; - case IP_VERSION(2, 5, 0): - fw_name = FIRMWARE_ARCTURUS; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 2, 0): - if (adev->apu_flags & AMD_APU_IS_RENOIR) - fw_name = FIRMWARE_RENOIR; - else - fw_name = FIRMWARE_GREEN_SARDINE; - - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 6, 0): - fw_name = FIRMWARE_ALDEBARAN; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 0, 0): - fw_name = FIRMWARE_NAVI10; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 0, 2): - if (adev->asic_type == CHIP_NAVI12) - fw_name = FIRMWARE_NAVI12; - else - fw_name = FIRMWARE_NAVI14; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 0): - case IP_VERSION(3, 0, 64): - case IP_VERSION(3, 0, 192): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) - fw_name = FIRMWARE_SIENNA_CICHLID; - else - fw_name = FIRMWARE_NAVY_FLOUNDER; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 2): - fw_name = FIRMWARE_VANGOGH; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 16): - fw_name = FIRMWARE_DIMGREY_CAVEFISH; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 33): - fw_name = FIRMWARE_BEIGE_GOBY; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 1, 1): - fw_name = FIRMWARE_YELLOW_CARP; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 1, 2): - fw_name = FIRMWARE_VCN_3_1_2; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(4, 0, 0): - fw_name = FIRMWARE_VCN4_0_0; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(4, 0, 2): - fw_name = FIRMWARE_VCN4_0_2; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(4, 0, 4): - fw_name = FIRMWARE_VCN4_0_4; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - default: - return -EINVAL; - } - - r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); - if (r) { - dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", - fw_name); - return r; - } - - r = amdgpu_ucode_validate(adev->vcn.fw); - if (r) { - dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", - fw_name); - release_firmware(adev->vcn.fw); - adev->vcn.fw = NULL; - return r; - } + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); @@ -274,8 +166,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) continue; r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, - &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->vcn.inst[i].vcpu_bo, + &adev->vcn.inst[i].gpu_addr, + &adev->vcn.inst[i].cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); return r; @@ -296,8 +191,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) if (adev->vcn.indirect_sram) { r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, - &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->vcn.inst[i].dpg_sram_bo, + &adev->vcn.inst[i].dpg_sram_gpu_addr, + &adev->vcn.inst[i].dpg_sram_cpu_addr); if (r) { dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); return r; @@ -333,7 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); } - release_firmware(adev->vcn.fw); + amdgpu_ucode_release(&adev->vcn.fw); mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); mutex_destroy(&adev->vcn.vcn_pg_lock); @@ -1250,8 +1148,16 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, if (!ras_if) return 0; - ih_data.head = *ras_if; - amdgpu_ras_interrupt_dispatch(adev, &ih_data); + if (!amdgpu_sriov_vf(adev)) { + ih_data.head = *ras_if; + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + } else { + if (adev->virt.ops && adev->virt.ops->ras_poison_handler) + adev->virt.ops->ras_poison_handler(adev); + else + dev_warn(adev->dev, + "No ras_poison_handler interface in SRIOV for VCN!\n"); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dbb8d68a30c610..d3e2af90290782 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -369,6 +369,7 @@ enum vcn_ring_type { VCN_UNIFIED_RING, }; +int amdgpu_vcn_early_init(struct amdgpu_device *adev); int amdgpu_vcn_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_sw_fini(struct amdgpu_device *adev); int amdgpu_vcn_suspend(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 2994b9db196ffd..f2e2cbaa7fde07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -232,7 +232,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) return 0; r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->virt.mm_table.bo, &adev->virt.mm_table.gpu_addr, (void *)&adev->virt.mm_table.cpu_addr); @@ -982,11 +983,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v if (offset == reg_access_ctrl->grbm_cntl) { /* if the target reg offset is grbm_cntl, write to scratch_reg2 */ writel(v, scratch_reg2); - writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY) + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); } else if (offset == reg_access_ctrl->grbm_idx) { /* if the target reg offset is grbm_idx, write to scratch_reg3 */ writel(v, scratch_reg3); - writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY) + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); } else { /* * SCRATCH_REG0 = read/write value diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 2b9d806e23afb4..b9e9480448afe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -88,6 +88,7 @@ struct amdgpu_virt_ops { int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3); + void (*ras_poison_handler)(struct amdgpu_device *adev); }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index dc379dc22c77b3..b9441ab457ea7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -33,6 +33,7 @@ #include #include +#include #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 094bb48073031e..856a64bc7a89fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include "amdgpu_sync.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 4b9e7b050ccd25..4340d08f760737 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -29,13 +29,16 @@ #include "df/df_3_6_offset.h" #include "xgmi/xgmi_4_0_0_smn.h" #include "xgmi/xgmi_4_0_0_sh_mask.h" +#include "xgmi/xgmi_6_1_0_sh_mask.h" #include "wafl/wafl2_4_0_0_smn.h" #include "wafl/wafl2_4_0_0_sh_mask.h" #include "amdgpu_reset.h" #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c +#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218 #define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210 +#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218 static DEFINE_MUTEX(xgmi_mutex); @@ -79,11 +82,27 @@ static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = { smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000 }; +static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = { + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000 +}; + static const int walf_pcs_err_status_reg_aldebaran[] = { smnPCS_GOPX1_PCS_ERROR_STATUS, smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000 }; +static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = { + smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK, + smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 +}; + static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { {"XGMI PCS DataLossErr", SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, @@ -162,6 +181,67 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, }; +static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = { + {"XGMI3X16 PCS DataLossErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)}, + {"XGMI3X16 PCS TrainingErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)}, + {"XGMI3X16 PCS FlowCtrlAckErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)}, + {"XGMI3X16 PCS RxFifoUnderflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)}, + {"XGMI3X16 PCS RxFifoOverflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)}, + {"XGMI3X16 PCS CRCErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)}, + {"XGMI3X16 PCS BERExceededErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)}, + {"XGMI3X16 PCS TxVcidDataErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)}, + {"XGMI3X16 PCS ReplayBufParityErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"XGMI3X16 PCS DataParityErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)}, + {"XGMI3X16 PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"XGMI3X16 PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"XGMI3X16 PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"XGMI3X16 PCS DeskewErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)}, + {"XGMI3X16 PCS FlowCtrlCRCErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)}, + {"XGMI3X16 PCS DataStartupLimitErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"XGMI3X16 PCS FCInitTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"XGMI3X16 PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"XGMI3X16 PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"XGMI3X16 PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"XGMI3X16 PCS RecoveryAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"XGMI3X16 PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, + {"XGMI3X16 PCS ReplayAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)}, + {"XGMI3X16 PCS SyncHdrErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)}, + {"XGMI3X16 PCS TxReplayTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)}, + {"XGMI3X16 PCS RxReplayTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)}, + {"XGMI3X16 PCS LinkSubTxTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)}, + {"XGMI3X16 PCS LinkSubRxTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)}, + {"XGMI3X16 PCS RxCMDPktErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)}, +}; + /** * DOC: AMDGPU XGMI Support * @@ -809,39 +889,47 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, uint32_t value, + uint32_t mask_value, uint32_t *ue_count, uint32_t *ce_count, - bool is_xgmi_pcs) + bool is_xgmi_pcs, + bool check_mask) { int i; - int ue_cnt; + int ue_cnt = 0; + const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL; + uint32_t field_array_size = 0; if (is_xgmi_pcs) { - /* query xgmi pcs error status, - * only ue is supported */ - for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) { - ue_cnt = (value & - xgmi_pcs_ras_fields[i].pcs_err_mask) >> - xgmi_pcs_ras_fields[i].pcs_err_shift; - if (ue_cnt) { - dev_info(adev->dev, "%s detected\n", - xgmi_pcs_ras_fields[i].err_name); - *ue_count += ue_cnt; - } + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { + pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0]; + field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields); + } else { + pcs_ras_fields = &xgmi_pcs_ras_fields[0]; + field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields); } } else { - /* query wafl pcs error status, - * only ue is supported */ - for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) { - ue_cnt = (value & - wafl_pcs_ras_fields[i].pcs_err_mask) >> - wafl_pcs_ras_fields[i].pcs_err_shift; - if (ue_cnt) { - dev_info(adev->dev, "%s detected\n", - wafl_pcs_ras_fields[i].err_name); - *ue_count += ue_cnt; - } + pcs_ras_fields = &wafl_pcs_ras_fields[0]; + field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields); + } + + if (check_mask) + value = value & ~mask_value; + + /* query xgmi/walf pcs error status, + * only ue is supported */ + for (i = 0; value && i < field_array_size; i++) { + ue_cnt = (value & + pcs_ras_fields[i].pcs_err_mask) >> + pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; } + + /* reset bit value if the bit is checked */ + value &= ~(pcs_ras_fields[i].pcs_err_mask); } return 0; @@ -852,7 +940,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; int i; - uint32_t data; + uint32_t data, mask_data = 0; uint32_t ue_cnt = 0, ce_cnt = 0; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) @@ -867,15 +955,15 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, true); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, false); } /* check wafl pcs error */ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, false); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, false, false); } break; case CHIP_VEGA20: @@ -883,31 +971,35 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, true); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, false); } /* check wafl pcs error */ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, false); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, false, false); } break; case CHIP_ALDEBARAN: /* check xgmi3x16 pcs error */ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) { data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]); + mask_data = + RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, true); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, true); } /* check wafl pcs error */ for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) { data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]); + mask_data = + RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, false); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, false, true); } break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index afad094f84c2d8..10098fdd33fc47 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -24,7 +24,6 @@ * Alex Deucher */ -#include #include #include #include "amdgpu.h" diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 18ae9433e463d2..d95b2dc7806341 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -28,7 +28,6 @@ #include -#include #include #include "amdgpu.h" #include "amdgpu_connectors.h" diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index cbca9866645c5d..67d16236b2168d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -73,10 +73,9 @@ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); static void cik_sdma_free_microcode(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } /* @@ -137,18 +136,15 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); } out: if (err) { pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 248f1a4e915f7c..9a24ed463abdf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -21,8 +21,9 @@ * */ -#include #include +#include +#include #include #include "amdgpu.h" @@ -2837,7 +2838,7 @@ static int dce_v10_0_sw_init(void *handle) if (r) return r; - INIT_WORK(&adev->hotplug_work, + INIT_DELAYED_WORK(&adev->hotplug_work, amdgpu_display_hotplug_work_func); drm_kms_helper_poll_init(adev_to_drm(adev)); @@ -2902,7 +2903,7 @@ static int dce_v10_0_hw_fini(void *handle) dce_v10_0_pageflip_interrupt_fini(adev); - flush_work(&adev->hotplug_work); + flush_delayed_work(&adev->hotplug_work); return 0; } @@ -3302,7 +3303,7 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, if (disp_int & mask) { dce_v10_0_hpd_int_ack(adev, hpd); - schedule_work(&adev->hotplug_work); + schedule_delayed_work(&adev->hotplug_work, 0); DRM_DEBUG("IH: HPD%d\n", hpd + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index cd9c19060d8988..c14b70350a51ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -21,8 +21,9 @@ * */ -#include #include +#include +#include #include #include "amdgpu.h" @@ -2956,7 +2957,7 @@ static int dce_v11_0_sw_init(void *handle) if (r) return r; - INIT_WORK(&adev->hotplug_work, + INIT_DELAYED_WORK(&adev->hotplug_work, amdgpu_display_hotplug_work_func); drm_kms_helper_poll_init(adev_to_drm(adev)); @@ -3032,7 +3033,7 @@ static int dce_v11_0_hw_fini(void *handle) dce_v11_0_pageflip_interrupt_fini(adev); - flush_work(&adev->hotplug_work); + flush_delayed_work(&adev->hotplug_work); return 0; } @@ -3426,7 +3427,7 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, if (disp_int & mask) { dce_v11_0_hpd_int_ack(adev, hpd); - schedule_work(&adev->hotplug_work); + schedule_delayed_work(&adev->hotplug_work, 0); DRM_DEBUG("IH: HPD%d\n", hpd + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 76323deecc589e..7f85ba5b726f68 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -23,8 +23,9 @@ #include -#include #include +#include +#include #include #include "amdgpu.h" @@ -2715,7 +2716,7 @@ static int dce_v6_0_sw_init(void *handle) return r; /* Pre-DCE11 */ - INIT_WORK(&adev->hotplug_work, + INIT_DELAYED_WORK(&adev->hotplug_work, amdgpu_display_hotplug_work_func); drm_kms_helper_poll_init(adev_to_drm(adev)); @@ -2776,7 +2777,7 @@ static int dce_v6_0_hw_fini(void *handle) dce_v6_0_pageflip_interrupt_fini(adev); - flush_work(&adev->hotplug_work); + flush_delayed_work(&adev->hotplug_work); return 0; } @@ -3103,7 +3104,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); - schedule_work(&adev->hotplug_work); + schedule_delayed_work(&adev->hotplug_work, 0); DRM_DEBUG("IH: HPD%d\n", hpd + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 01cf3ab111cbef..d421a268c9ffeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -21,8 +21,9 @@ * */ -#include #include +#include +#include #include #include "amdgpu.h" @@ -2739,7 +2740,7 @@ static int dce_v8_0_sw_init(void *handle) return r; /* Pre-DCE11 */ - INIT_WORK(&adev->hotplug_work, + INIT_DELAYED_WORK(&adev->hotplug_work, amdgpu_display_hotplug_work_func); drm_kms_helper_poll_init(adev_to_drm(adev)); @@ -2802,7 +2803,7 @@ static int dce_v8_0_hw_fini(void *handle) dce_v8_0_pageflip_interrupt_fini(adev); - flush_work(&adev->hotplug_work); + flush_delayed_work(&adev->hotplug_work); return 0; } @@ -3195,7 +3196,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); - schedule_work(&adev->hotplug_work); + schedule_delayed_work(&adev->hotplug_work, 0); DRM_DEBUG("IH: HPD%d\n", hpd + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index b991609f46c108..5dfab80ffff213 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -94,7 +94,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); } - /* Exit boradcast mode */ + /* Exit broadcast mode */ adev->df.funcs->enable_broadcast_mode(adev, false); } diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_3.c b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c new file mode 100644 index 00000000000000..e8b9e19ede2e11 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v4_3.h" + +#include "df/df_4_3_offset.h" +#include "df/df_4_3_sh_mask.h" + +static bool df_v4_3_query_ras_poison_mode(struct amdgpu_device *adev) +{ + uint32_t hw_assert_msklo, hw_assert_mskhi; + uint32_t v0, v1, v28, v31; + + hw_assert_msklo = RREG32_SOC15(DF, 0, + regDF_CS_UMC_AON0_HardwareAssertMaskLow); + hw_assert_mskhi = RREG32_SOC15(DF, 0, + regDF_NCS_PG0_HardwareAssertMaskHigh); + + v0 = REG_GET_FIELD(hw_assert_msklo, + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0); + v1 = REG_GET_FIELD(hw_assert_msklo, + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1); + v28 = REG_GET_FIELD(hw_assert_mskhi, + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28); + v31 = REG_GET_FIELD(hw_assert_mskhi, + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31); + + if (v0 && v1 && v28 && v31) + return true; + else if (!v0 && !v1 && !v28 && !v31) + return false; + else { + dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n", + v0, v1, v28, v31); + return false; + } +} + +const struct amdgpu_df_funcs df_v4_3_funcs = { + .query_ras_poison_mode = df_v4_3_query_ras_poison_mode, +}; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h similarity index 79% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h rename to drivers/gpu/drm/amd/amdgpu/df_v4_3.h index ea8d9760132fd7..06ef0724edd3d7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h @@ -19,16 +19,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: AMD - * */ -#ifndef __LINK_HWSS_HPO_FRL_H__ -#define __LINK_HWSS_HPO_FRL_H__ -#include "link_hwss.h" +#ifndef __DF_V4_3_H__ +#define __DF_V4_3_H__ + +#include "soc15_common.h" -bool can_use_hpo_frl_link_hwss(const struct dc_link *link, - const struct link_resource *link_res); -const struct link_hwss *get_hpo_frl_link_hwss(void); +extern const struct amdgpu_df_funcs df_v4_3_funcs; -#endif /* __LINK_HWSS_HPO_FRL_H__ */ +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 49d34c7bbf20f0..6983acc456b28f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3891,18 +3891,12 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) static void gfx_v10_0_free_microcode(struct amdgpu_device *adev) { - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); kfree(adev->gfx.rlc.register_list_format); } @@ -3974,9 +3968,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) { - const char *chip_name; char fw_name[40]; - char *wks = ""; + char ucode_prefix[30]; + const char *wks = ""; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -3984,90 +3978,40 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(10, 1, 10): - chip_name = "navi10"; - break; - case IP_VERSION(10, 1, 1): - chip_name = "navi14"; - if (!(adev->pdev->device == 0x7340 && - adev->pdev->revision != 0x00)) - wks = "_wks"; - break; - case IP_VERSION(10, 1, 2): - chip_name = "navi12"; - break; - case IP_VERSION(10, 3, 0): - chip_name = "sienna_cichlid"; - break; - case IP_VERSION(10, 3, 2): - chip_name = "navy_flounder"; - break; - case IP_VERSION(10, 3, 1): - chip_name = "vangogh"; - break; - case IP_VERSION(10, 3, 4): - chip_name = "dimgrey_cavefish"; - break; - case IP_VERSION(10, 3, 5): - chip_name = "beige_goby"; - break; - case IP_VERSION(10, 3, 3): - chip_name = "yellow_carp"; - break; - case IP_VERSION(10, 3, 6): - chip_name = "gc_10_3_6"; - break; - case IP_VERSION(10, 1, 3): - case IP_VERSION(10, 1, 4): - chip_name = "cyan_skillfish2"; - break; - case IP_VERSION(10, 3, 7): - chip_name = "gc_10_3_7"; - break; - default: - BUG(); - } + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) && + (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00))) + wks = "_wks"; + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE); if (!amdgpu_sriov_vf(adev)) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); /* don't check this. There are apparently firmwares in the wild with * incorrect size in the header */ - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + if (err == -ENODEV) + goto out; if (err) dev_dbg(adev->dev, - "gfx10: amdgpu_ucode_validate() failed \"%s\"\n", + "gfx10: amdgpu_ucode_request() failed \"%s\"\n", fw_name); rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; version_major = le16_to_cpu(rlc_hdr->header.header_version_major); @@ -4077,47 +4021,34 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) goto out; } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks); + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); if (!err) { - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); - if (err) - goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); } else { err = 0; adev->gfx.mec2_fw = NULL; } + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); gfx_v10_0_check_fw_write_wait(adev); out: if (err) { - dev_err(adev->dev, - "gfx10: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); } gfx_v10_0_check_gfxoff_flag(adev); @@ -4270,19 +4201,11 @@ static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); } -static int gfx_v10_0_me_init(struct amdgpu_device *adev) +static void gfx_v10_0_me_init(struct amdgpu_device *adev) { - int r; - bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); amdgpu_gfx_graphics_queue_acquire(adev); - - r = gfx_v10_0_init_microcode(adev); - if (r) - DRM_ERROR("Failed to load gfx firmware!\n"); - - return r; } static int gfx_v10_0_mec_init(struct amdgpu_device *adev) @@ -4650,9 +4573,7 @@ static int gfx_v10_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - r = gfx_v10_0_me_init(adev); - if (r) - return r; + gfx_v10_0_me_init(adev); if (adev->gfx.rlc.funcs) { if (adev->gfx.rlc.funcs->init) { @@ -7630,7 +7551,7 @@ static int gfx_v10_0_early_init(void *handle) /* init rlcg reg access ctrl */ gfx_v10_0_init_rlcg_reg_access_ctrl(adev); - return 0; + return gfx_v10_0_init_microcode(adev); } static int gfx_v10_0_late_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 66eb102cd88fbd..8ad8a0bffcacbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -46,6 +46,7 @@ #include "clearstate_gfx11.h" #include "v11_structs.h" #include "gfx_v11_0.h" +#include "gfx_v11_0_3.h" #include "nbio_v4_3.h" #include "mes_v11_0.h" @@ -431,18 +432,37 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) { - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); kfree(adev->gfx.rlc.register_list_format); } +static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) +{ + const struct psp_firmware_header_v1_0 *toc_hdr; + int err = 0; + char fw_name[40]; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); + err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); + if (err) + goto out; + + toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; + adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); + adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); + adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); + adev->psp.toc.start_addr = (uint8_t *)toc_hdr + + le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); + return 0; +out: + amdgpu_ucode_release(&adev->psp.toc_fw); + return err; +} + static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { char fw_name[40]; @@ -457,10 +477,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); if (err) goto out; /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ @@ -477,10 +494,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err) goto out; if (adev->gfx.rs64_enable) { @@ -493,10 +507,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -508,10 +519,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); if (err) goto out; if (adev->gfx.rs64_enable) { @@ -525,59 +533,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); } + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) + err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix); + /* only one MEC for gfx 11.0.0. */ adev->gfx.mec2_fw = NULL; out: if (err) { - dev_err(adev->dev, - "gfx11: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); } return err; } -static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev) -{ - const struct psp_firmware_header_v1_0 *toc_hdr; - int err = 0; - char fw_name[40]; - char ucode_prefix[30]; - - amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); - err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.toc_fw); - if (err) - goto out; - - toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; - adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); - adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); - adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); - adev->psp.toc.start_addr = (uint8_t *)toc_hdr + - le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); - return 0; -out: - dev_err(adev->dev, "Failed to load TOC microcode\n"); - release_firmware(adev->psp.toc_fw); - adev->psp.toc_fw = NULL; - return err; -} - static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) { u32 count = 0; @@ -714,19 +686,11 @@ static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); } -static int gfx_v11_0_me_init(struct amdgpu_device *adev) +static void gfx_v11_0_me_init(struct amdgpu_device *adev) { - int r; - bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); amdgpu_gfx_graphics_queue_acquire(adev); - - r = gfx_v11_0_init_microcode(adev); - if (r) - DRM_ERROR("Failed to load gfx firmware!\n"); - - return r; } static int gfx_v11_0_mec_init(struct amdgpu_device *adev) @@ -852,7 +816,14 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + break; case IP_VERSION(11, 0, 3): + adev->gfx.ras = &gfx_v11_0_3_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -987,10 +958,11 @@ static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) total_size = gfx_v11_0_calc_toc_total_size(adev); r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.rlc_autoload_bo, - &adev->gfx.rlc.rlc_autoload_gpu_addr, - (void **)&adev->gfx.rlc.rlc_autoload_ptr); + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.rlc.rlc_autoload_bo, + &adev->gfx.rlc.rlc_autoload_gpu_addr, + (void **)&adev->gfx.rlc.rlc_autoload_ptr); if (r) { dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); @@ -1336,6 +1308,20 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; + /* ECC error */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_ECC_ERROR, + &adev->gfx.cp_ecc_error_irq); + if (r) + return r; + + /* FED error */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, + GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, + &adev->gfx.rlc_gc_fed_irq); + if (r) + return r; + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; if (adev->gfx.imu.funcs) { @@ -1346,9 +1332,7 @@ static int gfx_v11_0_sw_init(void *handle) } } - r = gfx_v11_0_me_init(adev); - if (r) - return r; + gfx_v11_0_me_init(adev); r = gfx_v11_0_rlc_init(adev); if (r) { @@ -1416,9 +1400,6 @@ static int gfx_v11_0_sw_init(void *handle) /* allocate visible FB for rlc auto-loading fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { - r = gfx_v11_0_init_toc_microcode(adev); - if (r) - dev_err(adev->dev, "Failed to load toc firmware!\n"); r = gfx_v11_0_rlc_autoload_buffer_init(adev); if (r) return r; @@ -1428,6 +1409,11 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; + if (amdgpu_gfx_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); + return -EINVAL; + } + return 0; } @@ -2656,7 +2642,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) /* 64kb align */ r = amdgpu_bo_create_reserved(adev, fw_ucode_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.pfp.pfp_fw_obj, &adev->gfx.pfp.pfp_fw_gpu_addr, (void **)&adev->gfx.pfp.pfp_fw_ptr); @@ -2667,7 +2655,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) } r = amdgpu_bo_create_reserved(adev, fw_data_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.pfp.pfp_fw_data_obj, &adev->gfx.pfp.pfp_fw_data_gpu_addr, (void **)&adev->gfx.pfp.pfp_fw_data_ptr); @@ -2870,7 +2860,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) /* 64kb align*/ r = amdgpu_bo_create_reserved(adev, fw_ucode_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.me.me_fw_obj, &adev->gfx.me.me_fw_gpu_addr, (void **)&adev->gfx.me.me_fw_ptr); @@ -2881,7 +2873,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) } r = amdgpu_bo_create_reserved(adev, fw_data_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.me.me_fw_data_obj, &adev->gfx.me.me_fw_data_gpu_addr, (void **)&adev->gfx.me.me_fw_data_ptr); @@ -3387,7 +3381,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); r = amdgpu_bo_create_reserved(adev, fw_ucode_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.mec_fw_obj, &adev->gfx.mec.mec_fw_gpu_addr, (void **)&fw_ucode_ptr); @@ -3398,7 +3394,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) } r = amdgpu_bo_create_reserved(adev, fw_data_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.mec_fw_data_obj, &adev->gfx.mec.mec_fw_data_gpu_addr, (void **)&fw_data_ptr); @@ -4408,6 +4406,7 @@ static int gfx_v11_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4687,7 +4686,7 @@ static int gfx_v11_0_early_init(void *handle) gfx_v11_0_init_rlcg_reg_access_ctrl(adev); - return 0; + return gfx_v11_0_init_microcode(adev); } static int gfx_v11_0_ras_late_init(void *handle) @@ -5839,6 +5838,36 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev } } +#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 +#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ + do { \ + uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ + tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ + WREG32_SOC15_IP(GC, reg_addr, tmp); \ + } while (0) + +static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + uint32_t ecc_irq_state = 0; + uint32_t pipe0_int_cntl_addr = 0; + int i = 0; + + ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; + + pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); + + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) + SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, + ecc_irq_state); + + return 0; +} + static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -6015,6 +6044,16 @@ static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) + return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); + + return 0; +} + #if 0 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, @@ -6245,6 +6284,15 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .process = gfx_v11_0_priv_inst_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { + .set = gfx_v11_0_set_cp_ecc_error_state, + .process = amdgpu_gfx_cp_ecc_error_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { + .process = gfx_v11_0_rlc_gc_fed_irq, +}; + static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; @@ -6255,6 +6303,13 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; + + adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ + adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; + + adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ + adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; + } static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c new file mode 100644 index 00000000000000..b07a72ca25d9e7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c @@ -0,0 +1,88 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "soc21.h" +#include "gc/gc_11_0_3_offset.h" +#include "gc/gc_11_0_3_sh_mask.h" +#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" +#include "soc15.h" +#include "soc15d.h" +#include "gfx_v11_0.h" + + +static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t rlc_status0 = 0, rlc_status1 = 0; + struct ras_common_if *ras_if = NULL; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + rlc_status0 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_0)); + rlc_status1 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_1)); + + if (!rlc_status0 && !rlc_status1) { + dev_warn(adev->dev, "RLC_GC_FED irq is generated, but rlc_status0 and rlc_status1 are empty!\n"); + return 0; + } + + /* Use RLC_RLCS_FED_STATUS_0/1 to distinguish FED error block. */ + if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) || + REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR)) + ras_if = adev->sdma.ras_if; + else + ras_if = adev->gfx.ras_if; + + if (!ras_if) { + dev_err(adev->dev, "Gfx or sdma ras block not initialized, rlc_status0:0x%x.\n", + rlc_status0); + return -EINVAL; + } + + ih_data.head = *ras_if; + + dev_warn(adev->dev, "RLC %s FED IRQ\n", ras_if->name); + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + + return 0; +} + +static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + /* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */ + if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) && + (entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) && + !entry->vmid && !entry->pasid) + amdgpu_ras_reset_gpu(adev); + + return 0; +} + +struct amdgpu_gfx_ras gfx_v11_0_3_ras = { + .rlc_gc_fed_irq = gfx_v11_0_3_rlc_gc_fed_irq, + .poison_consumption_handler = gfx_v11_0_3_poison_consumption_handler, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h new file mode 100644 index 00000000000000..672c7920b3d09c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V11_0_3_H__ +#define __GFX_V11_0_3_H__ + +extern struct amdgpu_gfx_ras gfx_v11_0_3_ras; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 204b246f0e3f94..c41219e23151c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -338,10 +338,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; @@ -349,10 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev) adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; @@ -360,10 +354,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev) adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; @@ -371,10 +362,9 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev) adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); @@ -382,14 +372,10 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev) out: if (err) { pr_err("gfx6: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); } return err; } @@ -2375,7 +2361,8 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) dws = adev->gfx.rlc.clear_state_size + (256 / 4); r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0f2976507e484d..9d5c1e29b4a358 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -887,6 +887,16 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *bu static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); +static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) +{ + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); +} + /* * Core functions */ @@ -927,88 +937,44 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); if (err) goto out; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err) goto out; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); if (err) goto out; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); if (err) goto out; if (adev->asic_type == CHIP_KAVERI) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); if (err) goto out; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); - out: if (err) { pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; + gfx_v7_0_free_microcode(adev); } return err; } -static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) -{ - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; -} - /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * @@ -2772,7 +2738,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) * GFX7_MEC_HPD_SIZE * 2; r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d47135606e3ef7..b1f2684d854ad3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -924,20 +924,14 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); if ((adev->asic_type != CHIP_STONEY) && (adev->asic_type != CHIP_TOPAZ)) - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.mec2_fw); kfree(adev->gfx.rlc.register_list_format); } @@ -989,18 +983,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err == -ENOENT) { + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); + if (err == -ENODEV) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); } } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); } - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; @@ -1009,18 +1000,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err == -ENOENT) { + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); + if (err == -ENODEV) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); } } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); } - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; @@ -1030,18 +1018,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err == -ENOENT) { + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); + if (err == -ENODEV) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); } } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); } - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; @@ -1060,10 +1045,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) adev->virt.chained_ib_support = false; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); @@ -1110,18 +1094,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err == -ENOENT) { + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); + if (err == -ENODEV) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); } } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); } - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); if (err) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; @@ -1132,19 +1113,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) (adev->asic_type != CHIP_TOPAZ)) { if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); - if (err == -ENOENT) { + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); + if (err == -ENODEV) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); } } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); } if (!err) { - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); - if (err) - goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *) adev->gfx.mec2_fw->data; adev->gfx.mec2_fw_version = @@ -1219,18 +1197,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) dev_err(adev->dev, "gfx8: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); } return err; } @@ -1340,7 +1312,8 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; if (mec_hpd_size) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5dde6f82a1ca09..ae09fc1cfe6b7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1078,18 +1078,12 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) { - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); kfree(adev->gfx.rlc.register_list_format); } @@ -1251,55 +1245,40 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) } static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev, - const char *chip_name) + char *chip_name) { char fw_name[30]; int err; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE); out: if (err) { - dev_err(adev->dev, - "gfx9: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); } return err; } static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, - const char *chip_name) + char *chip_name) { char fw_name[30]; int err; @@ -1328,10 +1307,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -1340,13 +1316,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); out: - if (err) { - dev_err(adev->dev, - "gfx9: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; - } + if (err) + amdgpu_ucode_release(&adev->gfx.rlc_fw); + return err; } @@ -1361,7 +1333,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) } static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, - const char *chip_name) + char *chip_name) { char fw_name[30]; int err; @@ -1371,10 +1343,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); @@ -1386,91 +1355,49 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + /* ignore failures to load */ + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); if (!err) { - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); - if (err) - goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); } else { err = 0; - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.mec2_fw); } } else { adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; } -out: gfx_v9_0_check_if_need_gfxoff(adev); gfx_v9_0_check_fw_write_wait(adev); - if (err) { - dev_err(adev->dev, - "gfx9: Failed to init firmware \"%s\"\n", - fw_name); - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; - } + +out: + if (err) + amdgpu_ucode_release(&adev->gfx.mec_fw); return err; } static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) { - const char *chip_name; + char ucode_prefix[30]; int r; DRM_DEBUG("\n"); - - switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(9, 0, 1): - chip_name = "vega10"; - break; - case IP_VERSION(9, 2, 1): - chip_name = "vega12"; - break; - case IP_VERSION(9, 4, 0): - chip_name = "vega20"; - break; - case IP_VERSION(9, 2, 2): - case IP_VERSION(9, 1, 0): - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - chip_name = "raven2"; - else if (adev->apu_flags & AMD_APU_IS_PICASSO) - chip_name = "picasso"; - else - chip_name = "raven"; - break; - case IP_VERSION(9, 4, 1): - chip_name = "arcturus"; - break; - case IP_VERSION(9, 3, 0): - if (adev->apu_flags & AMD_APU_IS_RENOIR) - chip_name = "renoir"; - else - chip_name = "green_sardine"; - break; - case IP_VERSION(9, 4, 2): - chip_name = "aldebaran"; - break; - default: - BUG(); - } + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); /* No CPG in Arcturus */ if (adev->gfx.num_gfx_rings) { - r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); + r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix); if (r) return r; } - r = gfx_v9_0_init_rlc_microcode(adev, chip_name); + r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix); if (r) return r; - r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name); + r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix); if (r) return r; @@ -1783,7 +1710,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; if (mec_hpd_size) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); @@ -2008,27 +1936,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) break; } - if (adev->gfx.ras) { - err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block); - if (err) { - DRM_ERROR("Failed to register gfx ras block!\n"); - return err; - } - - strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx"); - adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; - adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm; - - /* If not define special ras_late_init function, use gfx default ras_late_init */ - if (!adev->gfx.ras->ras_block.ras_late_init) - adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->gfx.ras->ras_block.ras_cb) - adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; - } - adev->gfx.config.gb_addr_config = gb_addr_config; adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << @@ -2158,12 +2065,6 @@ static int gfx_v9_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - r = gfx_v9_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load gfx firmware!\n"); - return r; - } - if (adev->gfx.rlc.funcs) { if (adev->gfx.rlc.funcs->init) { r = adev->gfx.rlc.funcs->init(adev); @@ -2276,6 +2177,11 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; + if (amdgpu_gfx_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); + return -EINVAL; + } + return 0; } @@ -4605,7 +4511,7 @@ static int gfx_v9_0_early_init(void *handle) /* init rlcg reg access ctrl */ gfx_v9_0_init_rlcg_reg_access_ctrl(adev); - return 0; + return gfx_v9_0_init_microcode(adev); } static int gfx_v9_0_ecc_late_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ec4d5e15b766a3..ab2325f6c7ac5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -120,7 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 34513e8e151916..9b3a0252731818 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -165,7 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 3f8676d23a5ed8..4aacbbec31e28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -167,7 +167,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index 0e13370c205724..be0d0f47415e85 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -151,19 +151,20 @@ static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Disable AGP. */ + /* Program the AGP BAR */ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF); + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + /* Program the system aperture low logical page number. */ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index 080ff11ca305e7..6e0bd628c8895a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -159,17 +159,17 @@ static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev) /* Disable AGP. */ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF); + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); /* Program the system aperture low logical page number. */ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 21e46817d82d98..7db1f1a7e33c30 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -78,13 +78,25 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, /* MM HUB */ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); /* GFX HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (!adev->in_s0ix) + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); /* GFX HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (!adev->in_s0ix) + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); break; default: break; @@ -835,10 +847,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) } #endif - /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ if (amdgpu_gart_size == -1) { @@ -1061,9 +1070,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) } amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - r = adev->gfxhub.funcs->gart_enable(adev); - if (r) - return r; + + if (!adev->in_s0ix) { + r = adev->gfxhub.funcs->gart_enable(adev); + if (r) + return r; + } r = adev->mmhub.funcs->gart_enable(adev); if (r) @@ -1077,10 +1089,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; - adev->gfxhub.funcs->set_fault_enable_default(adev, value); + if (!adev->in_s0ix) + adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + if (!adev->in_s0ix) + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), @@ -1101,7 +1115,7 @@ static int gmc_v10_0_hw_init(void *handle) * harvestable groups in gc_utcl2 need to be programmed before any GFX block * register setup within GMC, or else system hang when harvesting SA. */ - if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) + if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) adev->gfxhub.funcs->utcl2_harvest(adev); r = gmc_v10_0_gart_enable(adev); @@ -1129,7 +1143,8 @@ static int gmc_v10_0_hw_init(void *handle) */ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) { - adev->gfxhub.funcs->gart_disable(adev); + if (!adev->in_s0ix) + adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 4326078689cd69..0a31a341aa43b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -64,13 +64,25 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, /* MM HUB */ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); /* GFX HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (!adev->in_s0ix) + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); /* GFX HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (!adev->in_s0ix) + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); break; default: break; @@ -661,6 +673,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); + amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ if (amdgpu_sriov_vf(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index ec291d28edffd8..b7dad4e6781354 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -131,19 +131,12 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin"); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); - err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->gmc.fw); - -out: + err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name); if (err) { dev_err(adev->dev, "si_mc: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gmc.fw); - adev->gmc.fw = NULL; + amdgpu_ucode_release(&adev->gmc.fw); } return err; } @@ -258,7 +251,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, - adev->vram_scratch.gpu_addr >> 12); + adev->mem_scratch.gpu_addr >> 12); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); @@ -894,8 +887,7 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_vm_manager_fini(adev); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - release_firmware(adev->gmc.fw); - adev->gmc.fw = NULL; + amdgpu_ucode_release(&adev->gmc.fw); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 979da6f510e886..402960b0174e2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -156,16 +156,10 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); - err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gmc.fw); - -out: + err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name); if (err) { pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gmc.fw); - adev->gmc.fw = NULL; + amdgpu_ucode_release(&adev->gmc.fw); } return err; } @@ -292,7 +286,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, - adev->vram_scratch.gpu_addr >> 12); + adev->mem_scratch.gpu_addr >> 12); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); @@ -389,10 +383,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) } #endif - /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ if (amdgpu_gart_size == -1) { @@ -1081,8 +1072,7 @@ static int gmc_v7_0_sw_fini(void *handle) kfree(adev->gmc.vm_fault_info); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - release_firmware(adev->gmc.fw); - adev->gmc.fw = NULL; + amdgpu_ucode_release(&adev->gmc.fw); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 382dde1ce74c07..504c1b34dab7de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -264,16 +264,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); - err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gmc.fw); - -out: + err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name); if (err) { pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gmc.fw); - adev->gmc.fw = NULL; + amdgpu_ucode_release(&adev->gmc.fw); } return err; } @@ -474,7 +468,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, - adev->vram_scratch.gpu_addr >> 12); + adev->mem_scratch.gpu_addr >> 12); if (amdgpu_sriov_vf(adev)) { tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; @@ -587,10 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) } #endif - /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ if (amdgpu_gart_size == -1) { @@ -1203,8 +1194,7 @@ static int gmc_v8_0_sw_fini(void *handle) kfree(adev->gmc.vm_fault_info); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - release_firmware(adev->gmc.fw); - adev->gmc.fw = NULL; + amdgpu_ucode_release(&adev->gmc.fw); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 08d6cf79fb15df..d65c6cea344515 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -484,6 +484,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + continue; + if (j == AMDGPU_GFXHUB_0) tmp = RREG32_SOC15_IP(GC, reg); else @@ -504,6 +512,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + continue; + if (j == AMDGPU_GFXHUB_0) tmp = RREG32_SOC15_IP(GC, reg); else @@ -1536,10 +1552,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) } #endif - /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ if (amdgpu_gart_size == -1) { @@ -1862,9 +1875,12 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) } amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - r = adev->gfxhub.funcs->gart_enable(adev); - if (r) - return r; + + if (!adev->in_s0ix) { + r = adev->gfxhub.funcs->gart_enable(adev); + if (r) + return r; + } r = adev->mmhub.funcs->gart_enable(adev); if (r) @@ -1911,11 +1927,15 @@ static int gmc_v9_0_hw_init(void *handle) value = true; if (!amdgpu_sriov_vf(adev)) { - adev->gfxhub.funcs->set_fault_enable_default(adev, value); + if (!adev->in_s0ix) + adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); } - for (i = 0; i < adev->num_vmhubs; ++i) + for (i = 0; i < adev->num_vmhubs; ++i) { + if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0)) + continue; gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); + } if (adev->umc.funcs && adev->umc.funcs->init_registers) adev->umc.funcs->init_registers(adev); @@ -1939,7 +1959,8 @@ static int gmc_v9_0_hw_init(void *handle) */ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) { - adev->gfxhub.funcs->gart_disable(adev); + if (!adev->in_s0ix) + adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 077c53c6cc08b0..4ab90c7852c3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -50,10 +50,7 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix); - err = request_firmware(&adev->gfx.imu_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.imu_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, fw_name); if (err) goto out; imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data; @@ -78,7 +75,7 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) dev_err(adev->dev, "gfx11: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gfx.imu_fw); + amdgpu_ucode_release(&adev->gfx.imu_fw); } return err; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 614394118a53e2..2e2062636d5f68 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -379,89 +379,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = { .resume_gang = mes_v10_1_resume_gang, }; -static int mes_v10_1_init_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) -{ - const char *chip_name; - char fw_name[30]; - int err; - const struct mes_firmware_header_v1_0 *mes_hdr; - struct amdgpu_firmware_info *info; - - switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(10, 1, 10): - chip_name = "navi10"; - break; - case IP_VERSION(10, 3, 0): - chip_name = "sienna_cichlid"; - break; - default: - BUG(); - } - - if (pipe == AMDGPU_MES_SCHED_PIPE) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", - chip_name); - else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin", - chip_name); - - err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev); - if (err) - return err; - - err = amdgpu_ucode_validate(adev->mes.fw[pipe]); - if (err) { - release_firmware(adev->mes.fw[pipe]); - adev->mes.fw[pipe] = NULL; - return err; - } - - mes_hdr = (const struct mes_firmware_header_v1_0 *) - adev->mes.fw[pipe]->data; - adev->mes.uc_start_addr[pipe] = - le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | - ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); - adev->mes.data_start_addr[pipe] = - le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | - ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); - - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - int ucode, ucode_data; - - if (pipe == AMDGPU_MES_SCHED_PIPE) { - ucode = AMDGPU_UCODE_ID_CP_MES; - ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; - } else { - ucode = AMDGPU_UCODE_ID_CP_MES1; - ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; - } - - info = &adev->firmware.ucode[ucode]; - info->ucode_id = ucode; - info->fw = adev->mes.fw[pipe]; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), - PAGE_SIZE); - - info = &adev->firmware.ucode[ucode_data]; - info->ucode_id = ucode_data; - info->fw = adev->mes.fw[pipe]; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), - PAGE_SIZE); - } - - return 0; -} - -static void mes_v10_1_free_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) -{ - release_firmware(adev->mes.fw[pipe]); - adev->mes.fw[pipe] = NULL; -} - static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev, enum admgpu_mes_pipe pipe) { @@ -1007,7 +924,6 @@ static int mes_v10_1_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int pipe, r; - adev->mes.adev = adev; adev->mes.funcs = &mes_v10_1_funcs; adev->mes.kiq_hw_init = &mes_v10_1_kiq_hw_init; @@ -1019,10 +935,6 @@ static int mes_v10_1_sw_init(void *handle) if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) continue; - r = mes_v10_1_init_microcode(adev, pipe); - if (r) - return r; - r = mes_v10_1_allocate_eop_buf(adev, pipe); if (r) return r; @@ -1059,8 +971,7 @@ static int mes_v10_1_sw_fini(void *handle) amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe], &adev->mes.eop_gpu_addr[pipe], NULL); - - mes_v10_1_free_microcode(adev, pipe); + amdgpu_ucode_release(&adev->mes.fw[pipe]); } amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj, @@ -1229,6 +1140,22 @@ static int mes_v10_1_resume(void *handle) return amdgpu_mes_resume(adev); } +static int mes_v10_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int pipe, r; + + for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { + if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) + continue; + r = amdgpu_mes_init_microcode(adev, pipe); + if (r) + return r; + } + + return 0; +} + static int mes_v10_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1241,6 +1168,7 @@ static int mes_v10_0_late_init(void *handle) static const struct amd_ip_funcs mes_v10_1_ip_funcs = { .name = "mes_v10_1", + .early_init = mes_v10_0_early_init, .late_init = mes_v10_0_late_init, .sw_init = mes_v10_1_sw_init, .sw_fini = mes_v10_1_sw_fini, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 1c4787000a5f39..5826eac270d795 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -460,80 +460,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .misc_op = mes_v11_0_misc_op, }; -static int mes_v11_0_init_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) -{ - char fw_name[30]; - char ucode_prefix[30]; - int err; - const struct mes_firmware_header_v1_0 *mes_hdr; - struct amdgpu_firmware_info *info; - - amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - if (pipe == AMDGPU_MES_SCHED_PIPE) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", - ucode_prefix); - else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin", - ucode_prefix); - - err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev); - if (err) - return err; - - err = amdgpu_ucode_validate(adev->mes.fw[pipe]); - if (err) { - release_firmware(adev->mes.fw[pipe]); - adev->mes.fw[pipe] = NULL; - return err; - } - - mes_hdr = (const struct mes_firmware_header_v1_0 *) - adev->mes.fw[pipe]->data; - adev->mes.uc_start_addr[pipe] = - le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | - ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); - adev->mes.data_start_addr[pipe] = - le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | - ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); - - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - int ucode, ucode_data; - - if (pipe == AMDGPU_MES_SCHED_PIPE) { - ucode = AMDGPU_UCODE_ID_CP_MES; - ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; - } else { - ucode = AMDGPU_UCODE_ID_CP_MES1; - ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; - } - - info = &adev->firmware.ucode[ucode]; - info->ucode_id = ucode; - info->fw = adev->mes.fw[pipe]; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), - PAGE_SIZE); - - info = &adev->firmware.ucode[ucode_data]; - info->ucode_id = ucode_data; - info->fw = adev->mes.fw[pipe]; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), - PAGE_SIZE); - } - - return 0; -} - -static void mes_v11_0_free_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) -{ - release_firmware(adev->mes.fw[pipe]); - adev->mes.fw[pipe] = NULL; -} - static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev, enum admgpu_mes_pipe pipe) { @@ -550,7 +476,9 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev, fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); r = amdgpu_bo_create_reserved(adev, fw_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->mes.ucode_fw_obj[pipe], &adev->mes.ucode_fw_gpu_addr[pipe], (void **)&adev->mes.ucode_fw_ptr[pipe]); @@ -583,7 +511,9 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev, fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); r = amdgpu_bo_create_reserved(adev, fw_size, - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->mes.data_fw_obj[pipe], &adev->mes.data_fw_gpu_addr[pipe], (void **)&adev->mes.data_fw_ptr[pipe]); @@ -1088,7 +1018,6 @@ static int mes_v11_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int pipe, r; - adev->mes.adev = adev; adev->mes.funcs = &mes_v11_0_funcs; adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init; adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini; @@ -1101,10 +1030,6 @@ static int mes_v11_0_sw_init(void *handle) if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) continue; - r = mes_v11_0_init_microcode(adev, pipe); - if (r) - return r; - r = mes_v11_0_allocate_eop_buf(adev, pipe); if (r) return r; @@ -1141,8 +1066,7 @@ static int mes_v11_0_sw_fini(void *handle) amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe], &adev->mes.eop_gpu_addr[pipe], NULL); - - mes_v11_0_free_microcode(adev, pipe); + amdgpu_ucode_release(&adev->mes.fw[pipe]); } amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj, @@ -1339,6 +1263,22 @@ static int mes_v11_0_resume(void *handle) return amdgpu_mes_resume(adev); } +static int mes_v11_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int pipe, r; + + for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { + if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) + continue; + r = amdgpu_mes_init_microcode(adev, pipe); + if (r) + return r; + } + + return 0; +} + static int mes_v11_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1353,6 +1293,7 @@ static int mes_v11_0_late_init(void *handle) static const struct amd_ip_funcs mes_v11_0_ip_funcs = { .name = "mes_v11_0", + .early_init = mes_v11_0_early_init, .late_init = mes_v11_0_late_init, .sw_init = mes_v11_0_sw_init, .sw_fini = mes_v11_0_sw_fini, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 3e51e773f92be0..15e7cbeae75b81 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -114,7 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) return; /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 6fa7090bc6cbe3..73afbf2facc9e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -134,7 +134,7 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev) } /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 0e664d0cc8d51f..278e32db878d7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -234,7 +234,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) } /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 4638ea7c2eec5c..fcf2813e70db80 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -164,7 +164,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 16cc82215e2e16..164948c50ac331 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -169,26 +169,27 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; uint32_t tmp; - if (!amdgpu_sriov_vf(adev)) { - /* - * the new L1 policy will block SRIOV guest from writing - * these regs, and they will be programed at host. - * so skip programing these regs. - */ - /* Disable AGP. */ - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); - } + if (amdgpu_sriov_vf(adev)) + return; + + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Program the AGP BAR */ + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 6bdf2ef0298d6e..26509b6b8c2402 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -183,12 +183,12 @@ static void mmhub_v3_0_1_init_system_aperture_regs(struct amdgpu_device *adev) */ /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c index 45465acaa943af..26abbc6a47ab28 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c @@ -162,10 +162,10 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; uint32_t tmp; - /* Disable AGP. */ + /* Program the AGP BAR */ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); if (!amdgpu_sriov_vf(adev)) { /* @@ -175,13 +175,13 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev) */ /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); } /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 445cb06b9d264b..72083e96222f03 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -136,7 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15_OFFSET( MMHUB, 0, mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 12906ba74462fb..63725b2ebc0373 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -404,6 +404,11 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev) return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); } +static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev) +{ + xgpu_ai_send_access_requests(adev, IDH_RAS_POISON); +} + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, @@ -411,4 +416,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, .req_init_data = xgpu_ai_request_init_data, + .ras_poison_handler = xgpu_ai_ras_poison_handler, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index fa7e13e0459ee3..af1a784696bd25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -39,6 +39,7 @@ enum idh_request { IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, + IDH_RAS_POISON = 202, }; enum idh_event { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index e07757eea7adf9..cae1aaa4ddb68c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -426,6 +426,11 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); } +static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev) +{ + xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -433,4 +438,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .reset_gpu = xgpu_nv_request_reset, .wait_reset = NULL, .trans_msg = xgpu_nv_mailbox_trans_msg, + .ras_poison_handler = xgpu_nv_ras_poison_handler, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 73887b0aa1d6e6..d0221ce087690e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -39,6 +39,7 @@ enum idh_request { IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, + IDH_RAS_POISON = 202, }; enum idh_event { diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 6853b93ac82e76..d972025f0d20f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -98,7 +98,7 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = }; /* Sienna Cichlid */ -static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, @@ -110,10 +110,27 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs sc_video_codecs_decode = +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = { - .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array), - .codec_array = sc_video_codecs_decode_array, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 = +{ + .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0), + .codec_array = sc_video_codecs_decode_array_vcn0, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1), + .codec_array = sc_video_codecs_decode_array_vcn1, }; /* SRIOV Sienna Cichlid, not const since data is controlled by host */ @@ -123,7 +140,7 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, @@ -135,16 +152,33 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = { .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array), .codec_array = sriov_sc_video_codecs_encode_array, }; -static struct amdgpu_video_codecs sriov_sc_video_codecs_decode = +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 = { - .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array), - .codec_array = sriov_sc_video_codecs_decode_array, + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0), + .codec_array = sriov_sc_video_codecs_decode_array_vcn0, +}; + +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1), + .codec_array = sriov_sc_video_codecs_decode_array_vcn1, }; /* Beige Goby*/ @@ -181,20 +215,37 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = { static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { + if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + return -EINVAL; + switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { - if (encode) - *codecs = &sriov_sc_video_codecs_encode; - else - *codecs = &sriov_sc_video_codecs_decode; + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &sriov_sc_video_codecs_encode; + else + *codecs = &sriov_sc_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &sriov_sc_video_codecs_encode; + else + *codecs = &sriov_sc_video_codecs_decode_vcn0; + } } else { - if (encode) - *codecs = &nv_video_codecs_encode; - else - *codecs = &sc_video_codecs_decode; + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode_vcn0; + } } return 0; case IP_VERSION(3, 0, 16): @@ -202,7 +253,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, if (encode) *codecs = &nv_video_codecs_encode; else - *codecs = &sc_video_codecs_decode; + *codecs = &sc_video_codecs_decode_vcn0; return 0; case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): @@ -993,9 +1044,19 @@ static int nv_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); - amdgpu_virt_update_sriov_video_codec(adev, - sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array), - sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array)); + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_sc_video_codecs_encode_array, + ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + sriov_sc_video_codecs_decode_array_vcn1, + ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1)); + } else { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_sc_video_codecs_encode_array, + ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + sriov_sc_video_codecs_decode_array_vcn1, + ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1)); + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 9de46fa8f46ce9..e1b7fca096660a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -47,83 +47,17 @@ MODULE_FIRMWARE("amdgpu/raven_ta.bin"); static int psp_v10_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - const char *chip_name; - char fw_name[30]; + char ucode_prefix[30]; int err = 0; - const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_RAVEN: - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - chip_name = "raven2"; - else if (adev->apu_flags & AMD_APU_IS_PICASSO) - chip_name = "picasso"; - else - chip_name = "raven"; - break; - default: BUG(); - } - - err = psp_init_asd_microcode(psp, chip_name); + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); + + err = psp_init_asd_microcode(psp, ucode_prefix); if (err) - goto out; - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); - err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) { - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; - dev_info(adev->dev, - "psp v10.0: Failed to load firmware \"%s\"\n", - fw_name); - } else { - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out2; - - ta_hdr = (const struct ta_firmware_header_v1_0 *) - adev->psp.ta_fw->data; - adev->psp.hdcp_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp_context.context.bin_desc.start_addr = - (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - - adev->psp.dtm_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm_context.context.bin_desc.start_addr = - (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + - le32_to_cpu(ta_hdr->dtm.offset_bytes); - - adev->psp.securedisplay_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->securedisplay.fw_version); - adev->psp.securedisplay_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->securedisplay.size_bytes); - adev->psp.securedisplay_context.context.bin_desc.start_addr = - (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + - le32_to_cpu(ta_hdr->securedisplay.offset_bytes); - - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - } - - return 0; - -out2: - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; -out: - if (err) { - dev_err(adev->dev, - "psp v10.0: Failed to load firmware \"%s\"\n", - fw_name); - } - - return err; + return err; + + return psp_init_ta_microcode(psp, ucode_prefix); } static int psp_v10_0_ring_create(struct psp_context *psp, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index bd3e3e23a93981..8f84fe40abbbbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -88,159 +88,56 @@ MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin"); static int psp_v11_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - const char *chip_name; - char fw_name[PSP_FW_NAME_LEN]; + char ucode_prefix[30]; int err = 0; - const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); - switch (adev->ip_versions[MP0_HWIP][0]) { - case IP_VERSION(11, 0, 2): - chip_name = "vega20"; - break; - case IP_VERSION(11, 0, 0): - chip_name = "navi10"; - break; - case IP_VERSION(11, 0, 5): - chip_name = "navi14"; - break; - case IP_VERSION(11, 0, 9): - chip_name = "navi12"; - break; - case IP_VERSION(11, 0, 4): - chip_name = "arcturus"; - break; - case IP_VERSION(11, 0, 7): - chip_name = "sienna_cichlid"; - break; - case IP_VERSION(11, 0, 11): - chip_name = "navy_flounder"; - break; - case IP_VERSION(11, 5, 0): - chip_name = "vangogh"; - break; - case IP_VERSION(11, 0, 12): - chip_name = "dimgrey_cavefish"; - break; - case IP_VERSION(11, 0, 13): - chip_name = "beige_goby"; - break; - default: - BUG(); - } - + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); switch (adev->ip_versions[MP0_HWIP][0]) { case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 4): - err = psp_init_sos_microcode(psp, chip_name); + err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; - err = psp_init_asd_microcode(psp, chip_name); + err = psp_init_asd_microcode(psp, ucode_prefix); if (err) return err; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); - err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) { - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; - dev_info(adev->dev, - "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); - } else { - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out2; - - ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.xgmi_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->xgmi.fw_version); - adev->psp.xgmi_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->xgmi.size_bytes); - adev->psp.xgmi_context.context.bin_desc.start_addr = - (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ras_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->ras.fw_version); - adev->psp.ras_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->ras.size_bytes); - adev->psp.ras_context.context.bin_desc.start_addr = - (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + - le32_to_cpu(ta_hdr->ras.offset_bytes); - } + err = psp_init_ta_microcode(psp, ucode_prefix); + adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 9): - err = psp_init_sos_microcode(psp, chip_name); + err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; - err = psp_init_asd_microcode(psp, chip_name); + err = psp_init_asd_microcode(psp, ucode_prefix); if (err) return err; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); - err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) { - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; - dev_info(adev->dev, - "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); - } else { - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out2; - - ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.hdcp_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp_context.context.bin_desc.start_addr = - (uint8_t *)ta_hdr + - le32_to_cpu( - ta_hdr->header.ucode_array_offset_bytes); - - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - - adev->psp.dtm_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm_context.context.bin_desc.start_addr = - (uint8_t *)adev->psp.hdcp_context.context - .bin_desc.start_addr + - le32_to_cpu(ta_hdr->dtm.offset_bytes); - } + err = psp_init_ta_microcode(psp, ucode_prefix); + adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; break; case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): - err = psp_init_sos_microcode(psp, chip_name); - if (err) - return err; - err = psp_init_ta_microcode(psp, chip_name); + err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; + err = psp_init_ta_microcode(psp, ucode_prefix); break; case IP_VERSION(11, 5, 0): - err = psp_init_asd_microcode(psp, chip_name); - if (err) - return err; - err = psp_init_toc_microcode(psp, chip_name); + err = psp_init_asd_microcode(psp, ucode_prefix); if (err) return err; + err = psp_init_toc_microcode(psp, ucode_prefix); break; default: BUG(); } - return 0; - -out2: - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 8ed2281b6557c3..fcd708eae75cce 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -48,83 +48,25 @@ MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin"); static int psp_v12_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - const char *chip_name; - char fw_name[30]; + char ucode_prefix[30]; int err = 0; - const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_RENOIR: - if (adev->apu_flags & AMD_APU_IS_RENOIR) - chip_name = "renoir"; - else - chip_name = "green_sardine"; - break; - default: - BUG(); - } + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = psp_init_asd_microcode(psp, chip_name); + err = psp_init_asd_microcode(psp, ucode_prefix); if (err) return err; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); - err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) { - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; - dev_info(adev->dev, - "psp v12.0: Failed to load firmware \"%s\"\n", - fw_name); - } else { - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out; - - ta_hdr = (const struct ta_firmware_header_v1_0 *) - adev->psp.ta_fw->data; - adev->psp.hdcp_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp_context.context.bin_desc.start_addr = - (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - - adev->psp.dtm_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm_context.context.bin_desc.start_addr = - (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + - le32_to_cpu(ta_hdr->dtm.offset_bytes); - - if (adev->apu_flags & AMD_APU_IS_RENOIR) { - adev->psp.securedisplay_context.context.bin_desc.fw_version = - le32_to_cpu(ta_hdr->securedisplay.fw_version); - adev->psp.securedisplay_context.context.bin_desc.size_bytes = - le32_to_cpu(ta_hdr->securedisplay.size_bytes); - adev->psp.securedisplay_context.context.bin_desc.start_addr = - (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + - le32_to_cpu(ta_hdr->securedisplay.offset_bytes); - } - } - - return 0; + err = psp_init_ta_microcode(psp, ucode_prefix); + if (err) + return err; -out: - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; - if (err) { - dev_err(adev->dev, - "psp v12.0: Failed to load firmware \"%s\"\n", - fw_name); - } + /* only supported on renoir */ + if (!(adev->apu_flags & AMD_APU_IS_RENOIR)) + adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; - return err; + return 0; } static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index e6a26a7e5e5ef5..d62fcc77af9584 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -70,32 +70,19 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); static int psp_v13_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - const char *chip_name; char ucode_prefix[30]; int err = 0; - switch (adev->ip_versions[MP0_HWIP][0]) { - case IP_VERSION(13, 0, 2): - chip_name = "aldebaran"; - break; - case IP_VERSION(13, 0, 1): - case IP_VERSION(13, 0, 3): - chip_name = "yellow_carp"; - break; - default: - amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - chip_name = ucode_prefix; - break; - } + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); switch (adev->ip_versions[MP0_HWIP][0]) { case IP_VERSION(13, 0, 2): - err = psp_init_sos_microcode(psp, chip_name); + err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; /* It's not necessary to load ras ta on Guest side */ if (!amdgpu_sriov_vf(adev)) { - err = psp_init_ta_microcode(&adev->psp, chip_name); + err = psp_init_ta_microcode(psp, ucode_prefix); if (err) return err; } @@ -105,21 +92,21 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 11): - err = psp_init_toc_microcode(psp, chip_name); + err = psp_init_toc_microcode(psp, ucode_prefix); if (err) return err; - err = psp_init_ta_microcode(psp, chip_name); + err = psp_init_ta_microcode(psp, ucode_prefix); if (err) return err; break; case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): - err = psp_init_sos_microcode(psp, chip_name); + err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; /* It's not necessary to load ras ta on Guest side */ - err = psp_init_ta_microcode(psp, chip_name); + err = psp_init_ta_microcode(psp, ucode_prefix); if (err) return err; break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c index 9d4e24e518e875..d5ba58eba3e2b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c @@ -35,25 +35,17 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_4_ta.bin"); static int psp_v13_0_4_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - const char *chip_name; char ucode_prefix[30]; int err = 0; - switch (adev->ip_versions[MP0_HWIP][0]) { - case IP_VERSION(13, 0, 4): - amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - chip_name = ucode_prefix; - break; - default: - BUG(); - } + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); switch (adev->ip_versions[MP0_HWIP][0]) { case IP_VERSION(13, 0, 4): - err = psp_init_toc_microcode(psp, chip_name); + err = psp_init_toc_microcode(psp, ucode_prefix); if (err) return err; - err = psp_init_ta_microcode(psp, chip_name); + err = psp_init_ta_microcode(psp, ucode_prefix); if (err) return err; break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 157147c6c94e1d..f6b75e3e47ffb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -57,26 +57,18 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, static int psp_v3_1_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - const char *chip_name; + char ucode_prefix[30]; int err = 0; DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA10: - chip_name = "vega10"; - break; - case CHIP_VEGA12: - chip_name = "vega12"; - break; - default: BUG(); - } + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = psp_init_sos_microcode(psp, chip_name); + err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; - err = psp_init_asd_microcode(psp, chip_name); + err = psp_init_asd_microcode(psp, ucode_prefix); if (err) return err; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index c52d246a1d965c..fd2a7b66ac5621 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -113,10 +113,9 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } /** @@ -151,10 +150,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name); if (err) goto out; hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; @@ -176,10 +172,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) out: if (err) { pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 486d9b5c1b9e75..e572389089d249 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -250,10 +250,9 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } /** @@ -309,10 +308,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name); if (err) goto out; hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; @@ -332,10 +328,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) out: if (err) { pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4d780e4430e782..b5affba2215696 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -575,60 +575,17 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev) // vega10 real chip need to use PSP to load firmware static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) { - const char *chip_name; - char fw_name[30]; int ret, i; - DRM_DEBUG("\n"); - - switch (adev->ip_versions[SDMA0_HWIP][0]) { - case IP_VERSION(4, 0, 0): - chip_name = "vega10"; - break; - case IP_VERSION(4, 0, 1): - chip_name = "vega12"; - break; - case IP_VERSION(4, 2, 0): - chip_name = "vega20"; - break; - case IP_VERSION(4, 1, 0): - case IP_VERSION(4, 1, 1): - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - chip_name = "raven2"; - else if (adev->apu_flags & AMD_APU_IS_PICASSO) - chip_name = "picasso"; - else - chip_name = "raven"; - break; - case IP_VERSION(4, 2, 2): - chip_name = "arcturus"; - break; - case IP_VERSION(4, 1, 2): - if (adev->apu_flags & AMD_APU_IS_RENOIR) - chip_name = "renoir"; - else - chip_name = "green_sardine"; - break; - case IP_VERSION(4, 4, 0): - chip_name = "aldebaran"; - break; - default: - BUG(); - } - for (i = 0; i < adev->sdma.num_instances; i++) { - if (i == 0) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); - else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i); if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) { /* Acturus & Aldebaran will leverage the same FW memory for every SDMA instance */ - ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true); + ret = amdgpu_sdma_init_microcode(adev, 0, true); break; } else { - ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false); + ret = amdgpu_sdma_init_microcode(adev, i, false); if (ret) return ret; } @@ -1894,6 +1851,11 @@ static int sdma_v4_0_sw_init(void *handle) } } + if (amdgpu_sdma_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); + return -EINVAL; + } + return r; } @@ -2731,22 +2693,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) break; } - if (adev->sdma.ras) { - amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block); - - strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma"); - adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA; - adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->sdma.ras->ras_block.ras_late_init) - adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->sdma.ras->ras_block.ras_cb) - adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb; - } } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index d4d9f196db834e..1941b3b7c5d985 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -237,39 +237,13 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) // emulation only, won't work on real chip // navi10 real chip need to use PSP to load firmware static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) -{ - const char *chip_name; - char fw_name[40]; - int ret, i; +{ int ret, i; if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5))) return 0; - DRM_DEBUG("\n"); - - switch (adev->ip_versions[SDMA0_HWIP][0]) { - case IP_VERSION(5, 0, 0): - chip_name = "navi10"; - break; - case IP_VERSION(5, 0, 2): - chip_name = "navi14"; - break; - case IP_VERSION(5, 0, 5): - chip_name = "navi12"; - break; - case IP_VERSION(5, 0, 1): - chip_name = "cyan_skillfish2"; - break; - default: - BUG(); - } - for (i = 0; i < adev->sdma.num_instances; i++) { - if (i == 0) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); - else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); - ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false); + ret = amdgpu_sdma_init_microcode(adev, i, false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 809eca54fc617f..8e445eb9dd4916 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -89,59 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3 return base + internal_offset; } -/** - * sdma_v5_2_init_microcode - load ucode images from disk - * - * @adev: amdgpu_device pointer - * - * Use the firmware interface to load the ucode images into - * the driver (not loaded into hw). - * Returns 0 on success, error on failure. - */ - -// emulation only, won't work on real chip -// navi10 real chip need to use PSP to load firmware -static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) -{ - const char *chip_name; - char fw_name[40]; - - DRM_DEBUG("\n"); - - switch (adev->ip_versions[SDMA0_HWIP][0]) { - case IP_VERSION(5, 2, 0): - chip_name = "sienna_cichlid_sdma"; - break; - case IP_VERSION(5, 2, 2): - chip_name = "navy_flounder_sdma"; - break; - case IP_VERSION(5, 2, 1): - chip_name = "vangogh_sdma"; - break; - case IP_VERSION(5, 2, 4): - chip_name = "dimgrey_cavefish_sdma"; - break; - case IP_VERSION(5, 2, 5): - chip_name = "beige_goby_sdma"; - break; - case IP_VERSION(5, 2, 3): - chip_name = "yellow_carp_sdma"; - break; - case IP_VERSION(5, 2, 6): - chip_name = "sdma_5_2_6"; - break; - case IP_VERSION(5, 2, 7): - chip_name = "sdma_5_2_7"; - break; - default: - BUG(); - } - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name); - - return amdgpu_sdma_init_microcode(adev, fw_name, 0, true); -} - static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring) { unsigned ret; @@ -809,12 +756,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) msleep(1000); } - /* TODO: check whether can submit a doorbell request to raise - * a doorbell fence to exit gfxoff. - */ - if (adev->in_s0ix) - amdgpu_gfx_off_ctrl(adev, false); - sdma_v5_2_soft_reset(adev); /* unhalt the MEs */ sdma_v5_2_enable(adev, true); @@ -823,8 +764,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) /* start the gfx rings and rlc compute queues */ r = sdma_v5_2_gfx_resume(adev); - if (adev->in_s0ix) - amdgpu_gfx_off_ctrl(adev, true); if (r) return r; r = sdma_v5_2_rlc_resume(adev); @@ -1296,7 +1235,7 @@ static int sdma_v5_2_sw_init(void *handle) return r; } - r = sdma_v5_2_init_microcode(adev); + r = amdgpu_sdma_init_microcode(adev, 0, true); if (r) { DRM_ERROR("Failed to load sdma firmware!\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 049c26a45d8511..40e6b22daa226e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -78,29 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3 return base + internal_offset; } -/** - * sdma_v6_0_init_microcode - load ucode images from disk - * - * @adev: amdgpu_device pointer - * - * Use the firmware interface to load the ucode images into - * the driver (not loaded into hw). - * Returns 0 on success, error on failure. - */ -static int sdma_v6_0_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30]; - char ucode_prefix[30]; - - DRM_DEBUG("\n"); - - amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - return amdgpu_sdma_init_microcode(adev, fw_name, 0, true); -} - static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring) { unsigned ret; @@ -296,8 +273,6 @@ static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse * * @ring: amdgpu ring pointer - * @job: job to retrieve vmid from - * @ib: IB object to schedule * * flush the IB by graphics cache rinse. */ @@ -349,7 +324,9 @@ static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) * sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: fence seq number + * @flags: fence flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -1083,10 +1060,9 @@ static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA. */ @@ -1190,7 +1166,6 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer * * Update the page table base and flush the VM TLB * using sDMA. @@ -1234,6 +1209,24 @@ static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } +static struct amdgpu_sdma_ras sdma_v6_0_3_ras = { + .ras_block = { + .ras_late_init = amdgpu_ras_block_late_init, + }, +}; + +static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(6, 0, 3): + adev->sdma.ras = &sdma_v6_0_3_ras; + break; + default: + break; + } + +} + static int sdma_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1243,6 +1236,7 @@ static int sdma_v6_0_early_init(void *handle) sdma_v6_0_set_vm_pte_funcs(adev); sdma_v6_0_set_irq_funcs(adev); sdma_v6_0_set_mqd_funcs(adev); + sdma_v6_0_set_ras_funcs(adev); return 0; } @@ -1260,7 +1254,7 @@ static int sdma_v6_0_sw_init(void *handle) if (r) return r; - r = sdma_v6_0_init_microcode(adev); + r = amdgpu_sdma_init_microcode(adev, 0, true); if (r) { DRM_ERROR("Failed to load sdma firmware!\n"); return r; @@ -1287,6 +1281,11 @@ static int sdma_v6_0_sw_init(void *handle) return r; } + if (amdgpu_sdma_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); + return -EINVAL; + } + return r; } @@ -1426,10 +1425,12 @@ static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL); - sdma_cntl = RREG32(reg_offset); - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_cntl); + if (!amdgpu_sriov_vf(adev)) { + sdma_cntl = RREG32(reg_offset); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(reg_offset, sdma_cntl); + } return 0; } @@ -1588,10 +1589,11 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev) /** * sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill with commands * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: if a secure copy should be used * * Copy GPU buffers using the DMA engine. * Used by the amdgpu ttm implementation to move pages if @@ -1617,7 +1619,7 @@ static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib, /** * sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c new file mode 100644 index 00000000000000..ae29620b1ea405 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -0,0 +1,303 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smu_v13_0_10.h" +#include "amdgpu_reset.h" +#include "amdgpu_dpm.h" +#include "amdgpu_job.h" +#include "amdgpu_ring.h" +#include "amdgpu_ras.h" +#include "amdgpu_psp.h" + +static bool smu_v13_0_10_is_mode2_default(struct amdgpu_reset_control *reset_ctl) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + if (adev->pm.fw_version >= 0x00502005 && !amdgpu_sriov_vf(adev)) + return true; + + return false; +} + +static struct amdgpu_reset_handler * +smu_v13_0_10_get_reset_handler(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_reset_handler *handler; + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + if (reset_context->method != AMD_RESET_METHOD_NONE) { + list_for_each_entry(handler, &reset_ctl->reset_handlers, + handler_list) { + if (handler->reset_method == reset_context->method) + return handler; + } + } + + if (smu_v13_0_10_is_mode2_default(reset_ctl) && + amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_MODE2) { + list_for_each_entry (handler, &reset_ctl->reset_handlers, + handler_list) { + if (handler->reset_method == AMD_RESET_METHOD_MODE2) + return handler; + } + } + + return NULL; +} + +static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev) +{ + int r, i; + + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!(adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_MES)) + continue; + + r = adev->ip_blocks[i].version->funcs->suspend(adev); + + if (r) { + dev_err(adev->dev, + "suspend of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + adev->ip_blocks[i].status.hw = false; + } + + return r; +} + +static int +smu_v13_0_10_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + int r = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + if (!amdgpu_sriov_vf(adev)) + r = smu_v13_0_10_mode2_suspend_ip(adev); + + return r; +} + +static int smu_v13_0_10_mode2_reset(struct amdgpu_device *adev) +{ + return amdgpu_dpm_mode2_reset(adev); +} + +static void smu_v13_0_10_async_reset(struct work_struct *work) +{ + struct amdgpu_reset_handler *handler; + struct amdgpu_reset_control *reset_ctl = + container_of(work, struct amdgpu_reset_control, reset_work); + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + list_for_each_entry(handler, &reset_ctl->reset_handlers, + handler_list) { + if (handler->reset_method == reset_ctl->active_reset) { + dev_dbg(adev->dev, "Resetting device\n"); + handler->do_reset(adev); + break; + } + } +} +static int +smu_v13_0_10_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int r; + + r = smu_v13_0_10_mode2_reset(adev); + if (r) { + dev_err(adev->dev, + "ASIC reset failed with error, %d ", r); + } + return r; +} + +static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) +{ + int i, r; + struct psp_context *psp = &adev->psp; + struct amdgpu_firmware_info *ucode; + struct amdgpu_firmware_info *ucode_list[2]; + int ucode_count = 0; + + for (i = 0; i < adev->firmware.max_ucodes; i++) { + ucode = &adev->firmware.ucode[i]; + + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_IMU_I: + case AMDGPU_UCODE_ID_IMU_D: + ucode_list[ucode_count++] = ucode; + break; + default: + break; + } + } + + r = psp_load_fw_list(psp, ucode_list, ucode_count); + if (r) { + dev_err(adev->dev, "IMU ucode load failed after mode2 reset\n"); + return r; + } + + r = psp_rlc_autoload_start(psp); + if (r) { + DRM_ERROR("Failed to start rlc autoload after mode2 reset\n"); + return r; + } + + amdgpu_dpm_enable_gfx_features(adev); + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!(adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_MES || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA)) + continue; + r = adev->ip_blocks[i].version->funcs->resume(adev); + if (r) { + dev_err(adev->dev, + "resume of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + + adev->ip_blocks[i].status.hw = true; + } + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!(adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_MES || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA)) + continue; + + if (adev->ip_blocks[i].version->funcs->late_init) { + r = adev->ip_blocks[i].version->funcs->late_init( + (void *)adev); + if (r) { + dev_err(adev->dev, + "late_init of IP block <%s> failed %d after reset\n", + adev->ip_blocks[i].version->funcs->name, + r); + return r; + } + } + adev->ip_blocks[i].status.late_initialized = true; + } + + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); + + return r; +} + +static int +smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + int r; + struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle; + + dev_info(tmp_adev->dev, + "GPU reset succeeded, trying to resume\n"); + r = smu_v13_0_10_mode2_restore_ip(tmp_adev); + if (r) + goto end; + + amdgpu_register_gpu_instance(tmp_adev); + + /* Resume RAS */ + amdgpu_ras_resume(tmp_adev); + + amdgpu_irq_gpu_reset_resume_helper(tmp_adev); + + r = amdgpu_ib_ring_tests(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, + "ib ring test failed (%d).\n", r); + r = -EAGAIN; + goto end; + } + +end: + if (r) + return -EAGAIN; + else + return r; +} + +static struct amdgpu_reset_handler smu_v13_0_10_mode2_handler = { + .reset_method = AMD_RESET_METHOD_MODE2, + .prepare_env = NULL, + .prepare_hwcontext = smu_v13_0_10_mode2_prepare_hwcontext, + .perform_reset = smu_v13_0_10_mode2_perform_reset, + .restore_hwcontext = smu_v13_0_10_mode2_restore_hwcontext, + .restore_env = NULL, + .do_reset = smu_v13_0_10_mode2_reset, +}; + +int smu_v13_0_10_reset_init(struct amdgpu_device *adev) +{ + struct amdgpu_reset_control *reset_ctl; + + reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL); + if (!reset_ctl) + return -ENOMEM; + + reset_ctl->handle = adev; + reset_ctl->async_reset = smu_v13_0_10_async_reset; + reset_ctl->active_reset = AMD_RESET_METHOD_NONE; + reset_ctl->get_reset_handler = smu_v13_0_10_get_reset_handler; + + INIT_LIST_HEAD(&reset_ctl->reset_handlers); + INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset); + /* Only mode2 is handled through reset control now */ + amdgpu_reset_add_handler(reset_ctl, &smu_v13_0_10_mode2_handler); + + adev->reset_cntl = reset_ctl; + + return 0; +} + +int smu_v13_0_10_reset_fini(struct amdgpu_device *adev) +{ + kfree(adev->reset_cntl); + adev->reset_cntl = NULL; + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h new file mode 100644 index 00000000000000..e0cb72a0eec606 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SMU_V13_0_10_H__ +#define __SMU_V13_0_10_H__ + +#include "amdgpu.h" + +int smu_v13_0_10_reset_init(struct amdgpu_device *adev); +int smu_v13_0_10_reset_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 7050238c4c4899..620f7409825dfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -48,19 +48,32 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode = +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = { - .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array), - .codec_array = vcn_4_0_0_video_codecs_encode_array, + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0), + .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0, }; -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] = +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1), + .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1, +}; + +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, @@ -69,23 +82,46 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = { - .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array), - .codec_array = vcn_4_0_0_video_codecs_decode_array, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0), + .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1), + .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, }; static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - switch (adev->ip_versions[UVD_HWIP][0]) { + if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + return -EINVAL; + switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode; - else - *codecs = &vcn_4_0_0_video_codecs_decode; + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + } return 0; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h index cf8ff064dc72e9..00d8bdb8254fb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h @@ -55,10 +55,10 @@ enum ta_securedisplay_status { TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/ }; -/** @enum ta_securedisplay_max_phy +/** @enum ta_securedisplay_phy_ID * Physical ID number to use for reading corresponding DIO Scratch register for ROI */ -enum ta_securedisplay_max_phy { +enum ta_securedisplay_phy_ID { TA_SECUREDISPLAY_PHY0 = 0, TA_SECUREDISPLAY_PHY1 = 1, TA_SECUREDISPLAY_PHY2 = 2, @@ -139,16 +139,16 @@ union ta_securedisplay_cmd_output { uint32_t reserved[4]; }; -/** @struct securedisplay_cmd - * Secure Display Command which is shared buffer memory - */ -struct securedisplay_cmd { - uint32_t cmd_id; /* +0 Bytes Command ID */ - enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */ - uint32_t reserved[2]; /* +8 Bytes Reserved */ - union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */ - union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */ - /**@note Total 48 Bytes */ +/** @struct ta_securedisplay_cmd +* Secure display command which is shared buffer memory +*/ +struct ta_securedisplay_cmd { + uint32_t cmd_id; /**< +0 Bytes Command ID */ + enum ta_securedisplay_status status; /**< +4 Bytes Status code returned by the secure display TA */ + uint32_t reserved[2]; /**< +8 Bytes Reserved */ + union ta_securedisplay_cmd_input securedisplay_in_message; /**< +16 Bytes Command input buffer */ + union ta_securedisplay_cmd_output securedisplay_out_message; /**< +32 Bytes Command output buffer */ + /**@note Total 48 Bytes */ }; #endif //_TA_SECUREDISPLAY_IF_H diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 72fd963f178bc4..e08e25a3a1a916 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -57,13 +57,6 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev, return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst; } -static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev, - uint32_t umc_inst, - uint32_t ch_inst) -{ - return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; -} - static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev, uint64_t mc_umc_status, uint32_t umc_reg_offset) { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index b7da4528cf0a48..da394bc06bbaaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -340,29 +340,13 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev) } } -static uint32_t umc_v8_10_query_ras_poison_mode_per_channel( - struct amdgpu_device *adev, - uint32_t umc_reg_offset) -{ - uint32_t ecc_ctrl_addr, ecc_ctrl; - - ecc_ctrl_addr = - SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl); - ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr + - umc_reg_offset) * 4); - - return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn); -} - static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev) { - uint32_t umc_reg_offset = 0; - - /* Enabling fatal error in umc node0 instance0 channel0 will be - * considered as fatal error mode + /* + * Force return true, because UMCCH0_0_GeccCtrl + * is not accessible from host side */ - umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0); - return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset); + return true; } const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index f0fbcda76f5e39..c305b2cb8490ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -57,11 +57,12 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work); static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); /** - * vcn_v1_0_early_init - set function pointers + * vcn_v1_0_early_init - set function pointers and load microcode * * @handle: amdgpu_device pointer * * Set ring and irq function pointers + * Load microcode from filesystem */ static int vcn_v1_0_early_init(void *handle) { @@ -75,7 +76,7 @@ static int vcn_v1_0_early_init(void *handle) jpeg_v1_0_early_init(handle); - return 0; + return amdgpu_vcn_early_init(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 08871bad9994a2..4b4cd88414e092 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -62,11 +62,12 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** - * vcn_v2_0_early_init - set function pointers + * vcn_v2_0_early_init - set function pointers and load microcode * * @handle: amdgpu_device pointer * * Set ring and irq function pointers + * Load microcode from filesystem */ static int vcn_v2_0_early_init(void *handle) { @@ -81,7 +82,7 @@ static int vcn_v2_0_early_init(void *handle) vcn_v2_0_set_enc_ring_funcs(adev); vcn_v2_0_set_irq_funcs(adev); - return 0; + return amdgpu_vcn_early_init(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ec87b00f2e052b..b0b0e69c6a9438 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -71,11 +71,12 @@ static int amdgpu_ih_clientid_vcns[] = { }; /** - * vcn_v2_5_early_init - set function pointers + * vcn_v2_5_early_init - set function pointers and load microcode * * @handle: amdgpu_device pointer * * Set ring and irq function pointers + * Load microcode from filesystem */ static int vcn_v2_5_early_init(void *handle) { @@ -107,7 +108,7 @@ static int vcn_v2_5_early_init(void *handle) vcn_v2_5_set_irq_funcs(adev); vcn_v2_5_set_ras_funcs(adev); - return 0; + return amdgpu_vcn_early_init(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 9c8b5fd9903720..66439388faee64 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -78,11 +78,12 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); /** - * vcn_v3_0_early_init - set function pointers + * vcn_v3_0_early_init - set function pointers and load microcode * * @handle: amdgpu_device pointer * * Set ring and irq function pointers + * Load microcode from filesystem */ static int vcn_v3_0_early_init(void *handle) { @@ -109,7 +110,7 @@ static int vcn_v3_0_early_init(void *handle) vcn_v3_0_set_enc_ring_funcs(adev); vcn_v3_0_set_irq_funcs(adev); - return 0; + return amdgpu_vcn_early_init(adev); } /** @@ -1770,6 +1771,10 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, if (atomic_read(&job->base.entity->fence_seq)) return -EINVAL; + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; drm_sched_entity_modify_sched(job->base.entity, scheds, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 1e2b22299975ff..22a41766a8c71c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -68,11 +68,12 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev); /** - * vcn_v4_0_early_init - set function pointers + * vcn_v4_0_early_init - set function pointers and load microcode * * @handle: amdgpu_device pointer * * Set ring and irq function pointers + * Load microcode from filesystem */ static int vcn_v4_0_early_init(void *handle) { @@ -88,7 +89,7 @@ static int vcn_v4_0_early_init(void *handle) vcn_v4_0_set_irq_funcs(adev); vcn_v4_0_set_ras_funcs(adev); - return 0; + return amdgpu_vcn_early_init(adev); } /** @@ -1631,6 +1632,10 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p, if (atomic_read(&job->base.entity->fence_seq)) return -EINVAL; + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] [AMDGPU_RING_PRIO_0].sched; drm_sched_entity_modify_sched(job->base.entity, scheds, 1); @@ -1705,7 +1710,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, create = ptr + addr + offset - start; - /* H246, HEVC and VP9 can run on any instance */ + /* H264, HEVC and VP9 can run on any instance */ if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) continue; @@ -1719,7 +1724,29 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, return r; } -#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) +#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) +#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) + +#define RADEON_VCN_ENGINE_INFO (0x30000001) +#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 + +#define RENCODE_ENCODE_STANDARD_AV1 2 +#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 +#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 + +/* return the offset in ib if id is found, -1 otherwise + * to speed up the searching we only search upto max_offset + */ +static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) +{ + int i; + + for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { + if (ib->ptr[i + 1] == id) + return i; + } + return -1; +} static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, @@ -1729,27 +1756,35 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_vcn_decode_buffer *decode_buffer; uint64_t addr; uint32_t val; + int idx; /* The first instance can decode anything */ if (!ring->me) return 0; - /* unified queue ib header has 8 double words. */ - if (ib->length_dw < 8) + /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ + idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, + RADEON_VCN_ENGINE_INFO_MAX_OFFSET); + if (idx < 0) /* engine info is missing */ return 0; - val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE - if (val != RADEON_VCN_ENGINE_TYPE_DECODE) - return 0; - - decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10]; - - if (!(decode_buffer->valid_buf_flag & 0x1)) - return 0; - - addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | - decode_buffer->msg_buffer_address_lo; - return vcn_v4_0_dec_msg(p, job, addr); + val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ + if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { + decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; + + if (!(decode_buffer->valid_buf_flag & 0x1)) + return 0; + + addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | + decode_buffer->msg_buffer_address_lo; + return vcn_v4_0_dec_msg(p, job, addr); + } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { + idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, + RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); + if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) + return vcn_v4_0_limit_sched(p, job); + } + return 0; } static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 6d291aa6386bd7..072fa4fbd27fc5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1065,6 +1065,20 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, mutex_unlock(&p->svms.lock); return -EADDRINUSE; } + + /* When register user buffer check if it has been registered by svm by + * buffer cpu virtual address. + */ + if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) && + interval_tree_iter_first(&p->svms.objects, + args->mmap_offset >> PAGE_SHIFT, + (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) { + pr_err("User Buffer Address: 0x%llx already allocated by SVM\n", + args->mmap_offset); + mutex_unlock(&p->svms.lock); + return -EADDRINUSE; + } + mutex_unlock(&p->svms.lock); #endif mutex_lock(&p->mutex); @@ -1127,8 +1141,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } /* Update the VRAM usage count */ - if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size); + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + uint64_t size = args->size; + + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) + size >>= 1; + WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); + } mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index b8936340742b4e..3de7f616a001cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -262,23 +262,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &gfx_v8_kfd2kgd; break; case CHIP_FIJI: - gfx_target_version = 80003; - f2g = &gfx_v8_kfd2kgd; - break; case CHIP_POLARIS10: gfx_target_version = 80003; f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS11: - gfx_target_version = 80003; - if (!vf) - f2g = &gfx_v8_kfd2kgd; - break; case CHIP_POLARIS12: - gfx_target_version = 80003; - if (!vf) - f2g = &gfx_v8_kfd2kgd; - break; case CHIP_VEGAM: gfx_target_version = 80003; if (!vf) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index d119070956fb63..8b2dd2670ab7fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -59,30 +59,27 @@ static int update_qpd_v9(struct device_queue_manager *dqm, /* check if sh_mem_config register already configured */ if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - SH_MEM_ALIGNMENT_MODE_UNALIGNED << + qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) { - /* Aldebaran can safely support different XNACK modes - * per process - */ - if (!pdd->process->xnack_enabled) - qpd->sh_mem_config |= - 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - } else if (dqm->dev->noretry && - !dqm->dev->use_iommu_v2) { - qpd->sh_mem_config |= - 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - } + if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) + qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; qpd->sh_mem_ape1_limit = 0; qpd->sh_mem_ape1_base = 0; } + if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { + if (!pdd->process->xnack_enabled) + qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; + else + qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT); + } + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); - pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); + pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases, + qpd->sh_mem_config); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 10048ce16aea46..de8ce72344fc57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -1027,8 +1027,7 @@ int svm_migrate_init(struct amdgpu_device *adev) /* Disable SVM support capability */ pgmap->type = 0; if (pgmap->type == MEMORY_DEVICE_PRIVATE) - devm_release_mem_region(adev->dev, res->start, - res->end - res->start + 1); + devm_release_mem_region(adev->dev, res->start, resource_size(res)); return PTR_ERR(r); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 552c3ac85a1329..bfa30d12406b35 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -206,6 +206,8 @@ enum cache_policy { #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) +#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ + (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) struct kfd_event_interrupt_class { bool (*interrupt_isr)(struct kfd_dev *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 51b1683ac5c1e1..72df6286e2407e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1330,7 +1330,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * per-process XNACK mode selection. But let the dev->noretry * setting still influence the default XNACK mode. */ - if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) + if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) continue; /* GFXv10 and later GPUs do not support shader preemption @@ -1563,6 +1563,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct file *drm_file) { + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; struct kfd_process *p; struct kfd_dev *dev; int ret; @@ -1573,10 +1575,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, if (pdd->drm_priv) return -EBUSY; + ret = amdgpu_file_to_fpriv(drm_file, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + p = pdd->process; dev = pdd->dev; - ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file, + ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, &p->kgd_process_info, &p->ef); if (ret) { @@ -1593,7 +1600,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, if (ret) goto err_init_cwsr; - ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid); + ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid); if (ret) goto err_set_pasid; @@ -1607,6 +1614,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, kfd_process_device_destroy_ib_mem(pdd); err_reserve_ib_mem: pdd->drm_priv = NULL; + amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm); return ret; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b94d2c1422ad82..dc6fd696705095 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -23,6 +23,7 @@ #include #include +#include #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9c7b69d377bd3c..c420bce47acb59 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -28,7 +28,6 @@ #include "dm_services_types.h" #include "dc.h" -#include "dc_link_dp.h" #include "link_enc_cfg.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" @@ -39,6 +38,9 @@ #include "dc/dc_edid_parser.h" #include "dc/dc_stat.h" #include "amdgpu_dm_trace.h" +#include "dpcd_defs.h" +#include "link/protocols/link_dpcd.h" +#include "link_service_types.h" #include "vid.h" #include "amdgpu.h" @@ -66,7 +68,7 @@ #include "ivsrcid/ivsrcid_vislands30.h" -#include "i2caux_interface.h" +#include #include #include #include @@ -104,7 +106,6 @@ #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" -#include "modules/inc/mod_info_packet.h" #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); @@ -210,7 +211,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *amdgpu_dm_connector, - uint32_t link_index, + u32 link_index, struct amdgpu_encoder *amdgpu_encoder); static int amdgpu_dm_encoder_init(struct drm_device *dev, struct amdgpu_encoder *aencoder, @@ -262,7 +263,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { - uint32_t v_blank_start, v_blank_end, h_position, v_position; + u32 v_blank_start, v_blank_end, h_position, v_position; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; @@ -361,7 +362,7 @@ static void dm_pflip_high_irq(void *interrupt_params) struct amdgpu_device *adev = irq_params->adev; unsigned long flags; struct drm_pending_vblank_event *e; - uint32_t vpos, hpos, v_blank_start, v_blank_end; + u32 vpos, hpos, v_blank_start, v_blank_end; bool vrr_active; amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); @@ -648,7 +649,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, struct drm_connector *connector; struct drm_connector_list_iter iter; struct dc_link *link; - uint8_t link_index = 0; + u8 link_index = 0; struct drm_device *dev; if (adev == NULL) @@ -749,7 +750,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct amdgpu_device *adev = irq_params->adev; struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; - uint32_t count = 0; + u32 count = 0; struct dmub_hpd_work *dmub_hpd_wrk; struct dc_link *plink = NULL; @@ -1015,7 +1016,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) struct dmub_srv_hw_params hw_params; enum dmub_status status; const unsigned char *fw_inst_const, *fw_bss_data; - uint32_t i, fw_inst_const_size, fw_bss_data_size; + u32 i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; if (!dmub_srv) @@ -1176,10 +1177,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) { - uint64_t pt_base; - uint32_t logical_addr_low; - uint32_t logical_addr_high; - uint32_t agp_base, agp_bot, agp_top; + u64 pt_base; + u32 logical_addr_low; + u32 logical_addr_high; + u32 agp_base, agp_bot, agp_top; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; memset(pa_config, 0, sizeof(*pa_config)); @@ -1190,7 +1191,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ /* AGP aperture is disabled */ if (agp_bot == agp_top) { - logical_addr_low = adev->gmc.vram_start >> 18; + logical_addr_low = adev->gmc.fb_start >> 18; if (adev->apu_flags & AMD_APU_IS_RAVEN2) /* * Raven2 has a HW issue that it is unable to use the vram which @@ -1200,9 +1201,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ */ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; else - logical_addr_high = adev->gmc.vram_end >> 18; + logical_addr_high = adev->gmc.fb_end >> 18; } else { - logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; if (adev->apu_flags & AMD_APU_IS_RAVEN2) /* * Raven2 has a HW issue that it is unable to use the vram which @@ -1239,10 +1240,25 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; - pa_config->is_hvm_enabled = 0; + pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; } +static void force_connector_state( + struct amdgpu_dm_connector *aconnector, + enum drm_connector_force force_state) +{ + struct drm_connector *connector = &aconnector->base; + + mutex_lock(&connector->dev->mode_config.mutex); + aconnector->base.force = force_state; + mutex_unlock(&connector->dev->mode_config.mutex); + + mutex_lock(&aconnector->hpd_lock); + drm_kms_helper_connector_hotplug_event(connector); + mutex_unlock(&aconnector->hpd_lock); +} + static void dm_handle_hpd_rx_offload_work(struct work_struct *work) { struct hpd_rx_irq_offload_work *offload_work; @@ -1251,6 +1267,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) struct amdgpu_device *adev; enum dc_connection_type new_connection_type = dc_connection_none; unsigned long flags; + union test_response test_response; + + memset(&test_response, 0, sizeof(test_response)); offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); aconnector = offload_work->offload_wq->aconnector; @@ -1264,7 +1283,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) dc_link = aconnector->dc_link; mutex_lock(&aconnector->hpd_lock); - if (!dc_link_detect_sink(dc_link, &new_connection_type)) + if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); mutex_unlock(&aconnector->hpd_lock); @@ -1275,15 +1294,49 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) goto skip; mutex_lock(&adev->dm.dc_lock); - if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) + if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { dc_link_dp_handle_automated_test(dc_link); + + if (aconnector->timing_changed) { + /* force connector disconnect and reconnect */ + force_connector_state(aconnector, DRM_FORCE_OFF); + msleep(100); + force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); + } + + test_response.bits.ACK = 1; + + core_link_write_dpcd( + dc_link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && - hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && + dc_link_check_link_loss_status(dc_link, &offload_work->data) && dc_link_dp_allow_hpd_rx_irq(dc_link)) { - dc_link_dp_handle_link_loss(dc_link); + /* offload_work->data is from handle_hpd_rx_irq-> + * schedule_hpd_rx_offload_work.this is defer handle + * for hpd short pulse. upon here, link status may be + * changed, need get latest link status from dpcd + * registers. if link status is good, skip run link + * training again. + */ + union hpd_irq_data irq_data; + + memset(&irq_data, 0, sizeof(irq_data)); + + /* before dc_link_dp_handle_link_loss, allow new link lost handle + * request be added to work queue if link lost at end of dc_link_ + * dp_handle_link_loss + */ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); offload_work->offload_wq->is_handling_link_loss = false; spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + + if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && + dc_link_check_link_loss_status(dc_link, &irq_data)) + dc_link_dp_handle_link_loss(dc_link); } mutex_unlock(&adev->dm.dc_lock); @@ -1551,6 +1604,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; + /* Disable SubVP + DRR config by default */ + init_data.flags.disable_subvp_drr = true; + if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR) + init_data.flags.disable_subvp_drr = false; + init_data.flags.seamless_boot_edp_requested = false; if (check_seamless_boot_capability(adev)) { @@ -1606,6 +1664,26 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; + /* TODO: There is a new drm mst change where the freedom of + * vc_next_start_slot update is revoked/moved into drm, instead of in + * driver. This forces us to make sure to get vc_next_start_slot updated + * in drm function each time without considering if mst_state is active + * or not. Otherwise, next time hotplug will give wrong start_slot + * number. We are implementing a temporary solution to even notify drm + * mst deallocation when link is no longer of MST type when uncommitting + * the stream so we will have more time to work on a proper solution. + * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we + * should notify drm to do a complete "reset" of its states and stop + * calling further drm mst functions when link is no longer of an MST + * type. This could happen when we unplug an MST hubs/displays. When + * uncommit stream comes later after unplug, we should just reset + * hardware states only. + */ + adev->dm.dc->debug.temp_mst_deallocation_sequence = true; + + if (adev->dm.dc->caps.dp_hdmi21_pcon_support) + DRM_INFO("DP-HDMI FRL PCON supported\n"); + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1659,7 +1737,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } #endif #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); + adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); + if (!adev->dm.secure_display_ctxs) { + DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n"); + } #endif if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); @@ -1750,10 +1831,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - if (adev->dm.crc_rd_wrk) { - flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); - kfree(adev->dm.crc_rd_wrk); - adev->dm.crc_rd_wrk = NULL; + if (adev->dm.secure_display_ctxs) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->dm.secure_display_ctxs[i].crtc) { + flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); + flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); + } + } + kfree(adev->dm.secure_display_ctxs); + adev->dm.secure_display_ctxs = NULL; } #endif #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -1888,25 +1974,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } - r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); - if (r == -ENOENT) { + r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); + if (r == -ENODEV) { /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); adev->dm.fw_dmcu = NULL; return 0; } - if (r) { - dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", - fw_name_dmcu); - return r; - } - - r = amdgpu_ucode_validate(adev->dm.fw_dmcu); if (r) { dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", fw_name_dmcu); - release_firmware(adev->dm.fw_dmcu); - adev->dm.fw_dmcu = NULL; + amdgpu_ucode_release(&adev->dm.fw_dmcu); return r; } @@ -1952,7 +2030,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) struct dmub_srv_fb_info *fb_info; struct dmub_srv *dmub_srv; const struct dmcub_firmware_header_v1_0 *hdr; - const char *fw_name_dmub; enum dmub_asic dmub_asic; enum dmub_status status; int r; @@ -1960,73 +2037,43 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): dmub_asic = DMUB_ASIC_DCN21; - fw_name_dmub = FIRMWARE_RENOIR_DMUB; - if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) - fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; break; case IP_VERSION(3, 0, 0): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { - dmub_asic = DMUB_ASIC_DCN30; - fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; - } else { - dmub_asic = DMUB_ASIC_DCN30; - fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; - } + dmub_asic = DMUB_ASIC_DCN30; break; case IP_VERSION(3, 0, 1): dmub_asic = DMUB_ASIC_DCN301; - fw_name_dmub = FIRMWARE_VANGOGH_DMUB; break; case IP_VERSION(3, 0, 2): dmub_asic = DMUB_ASIC_DCN302; - fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; break; case IP_VERSION(3, 0, 3): dmub_asic = DMUB_ASIC_DCN303; - fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; break; case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; - fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; case IP_VERSION(3, 1, 4): dmub_asic = DMUB_ASIC_DCN314; - fw_name_dmub = FIRMWARE_DCN_314_DMUB; break; case IP_VERSION(3, 1, 5): dmub_asic = DMUB_ASIC_DCN315; - fw_name_dmub = FIRMWARE_DCN_315_DMUB; break; case IP_VERSION(3, 1, 6): dmub_asic = DMUB_ASIC_DCN316; - fw_name_dmub = FIRMWARE_DCN316_DMUB; break; case IP_VERSION(3, 2, 0): dmub_asic = DMUB_ASIC_DCN32; - fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; break; case IP_VERSION(3, 2, 1): dmub_asic = DMUB_ASIC_DCN321; - fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; default: /* ASIC doesn't support DMUB. */ return 0; } - r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); - if (r) { - DRM_ERROR("DMUB firmware loading failed: %d\n", r); - return 0; - } - - r = amdgpu_ucode_validate(adev->dm.dmub_fw); - if (r) { - DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); - return 0; - } - hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); @@ -2093,7 +2140,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) * TODO: Move this into GART. */ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->dm.dmub_bo, &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); if (r) @@ -2148,11 +2197,8 @@ static int dm_sw_fini(void *handle) adev->dm.dmub_srv = NULL; } - release_firmware(adev->dm.dmub_fw); - adev->dm.dmub_fw = NULL; - - release_firmware(adev->dm.fw_dmcu); - adev->dm.fw_dmcu = NULL; + amdgpu_ucode_release(&adev->dm.dmub_fw); + amdgpu_ucode_release(&adev->dm.fw_dmcu); return 0; } @@ -2178,6 +2224,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) DRM_ERROR("DM_MST: Failed to start MST\n"); aconnector->dc_link->type = dc_connection_single; + ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); break; } } @@ -2246,7 +2294,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || - aconnector->mst_port) + aconnector->mst_root) continue; mgr = &aconnector->mst_mgr; @@ -2499,7 +2547,7 @@ struct amdgpu_dm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc) { - uint32_t i; + u32 i; struct drm_connector_state *new_con_state; struct drm_connector *connector; struct drm_crtc *crtc_from_state; @@ -2747,16 +2795,18 @@ static int dm_resume(void *handle) drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_link) + continue; + /* * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link && - aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); - if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3034,6 +3084,10 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid); } + aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); + if (!aconnector->timing_requested) + dm_error("%s: failed to create aconnector->requested_timing\n", __func__); + drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); @@ -3045,6 +3099,8 @@ void amdgpu_dm_update_connector_after_detect( dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; + kfree(aconnector->timing_requested); + aconnector->timing_requested = NULL; #ifdef CONFIG_DRM_AMD_DC_HDCP /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) @@ -3089,7 +3145,9 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->fake_enable) aconnector->fake_enable = false; - if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) + aconnector->timing_changed = false; + + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3130,8 +3188,8 @@ static void handle_hpd_irq(void *param) static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) { - uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; - uint8_t dret; + u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; + u8 dret; bool new_irq_handled = false; int dpcd_addr; int dpcd_bytes_to_read; @@ -3159,7 +3217,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) while (dret == dpcd_bytes_to_read && process_count < max_process_count) { - uint8_t retry; + u8 retry; dret = 0; process_count++; @@ -3178,7 +3236,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) dpcd_bytes_to_read - 1; for (retry = 0; retry < 3; retry++) { - uint8_t wret; + u8 wret; wret = drm_dp_dpcd_write( &aconnector->dm_dp_aux.aux, @@ -3238,7 +3296,7 @@ static void handle_hpd_rx_irq(void *param) union hpd_irq_data hpd_irq_data; bool link_loss = false; bool has_left_work = false; - int idx = aconnector->base.index; + int idx = dc_link->link_index; struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); @@ -3292,7 +3350,7 @@ static void handle_hpd_rx_irq(void *param) out: if (result && !is_mst_root_connector) { /* Downstream Port status changed. */ - if (!dc_link_detect_sink(dc_link, &new_connection_type)) + if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3380,7 +3438,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev) (void *) aconnector); if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector = aconnector; } } @@ -4192,15 +4250,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector); static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) { struct amdgpu_display_manager *dm = &adev->dm; - int32_t i; + s32 i; struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_encoder *aencoder = NULL; struct amdgpu_mode_info *mode_info = &adev->mode_info; - uint32_t link_cnt; - int32_t primary_planes; + u32 link_cnt; + s32 primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; bool psr_feature_enabled = false; + int max_overlay = dm->dc->caps.max_slave_planes; dm->display_indexes_num = dm->dc->caps.max_streams; /* Update the actual used number of crtc */ @@ -4255,14 +4314,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (!plane->pixel_format_support.argb8888) continue; + if (max_overlay-- == 0) + break; + if (initialize_plane(dm, NULL, primary_planes + i, DRM_PLANE_TYPE_OVERLAY, plane)) { DRM_ERROR("KMS: Failed to initialize overlay plane\n"); goto fail; } - - /* Only create one overlay plane. */ - break; } for (i = 0; i < dm->dc->caps.max_streams; i++) @@ -4341,7 +4400,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link = dc_get_link_at_index(dm->dc, i); - if (!dc_link_detect_sink(link, &new_connection_type)) + if (!dc_link_detect_connection_type(link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -4517,6 +4576,61 @@ DEVICE_ATTR_WO(s3_debug); #endif +static int dm_init_microcode(struct amdgpu_device *adev) +{ + char *fw_name_dmub; + int r; + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): + fw_name_dmub = FIRMWARE_RENOIR_DMUB; + if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) + fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; + break; + case IP_VERSION(3, 0, 0): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; + else + fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; + break; + case IP_VERSION(3, 0, 1): + fw_name_dmub = FIRMWARE_VANGOGH_DMUB; + break; + case IP_VERSION(3, 0, 2): + fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; + break; + case IP_VERSION(3, 0, 3): + fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; + break; + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; + break; + case IP_VERSION(3, 1, 4): + fw_name_dmub = FIRMWARE_DCN_314_DMUB; + break; + case IP_VERSION(3, 1, 5): + fw_name_dmub = FIRMWARE_DCN_315_DMUB; + break; + case IP_VERSION(3, 1, 6): + fw_name_dmub = FIRMWARE_DCN316_DMUB; + break; + case IP_VERSION(3, 2, 0): + fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; + break; + case IP_VERSION(3, 2, 1): + fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; + break; + default: + /* ASIC doesn't support DMUB. */ + return 0; + } + r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); + if (r) + DRM_ERROR("DMUB firmware loading failed: %d\n", r); + return r; +} + static int dm_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -4660,7 +4774,7 @@ static int dm_early_init(void *handle) #endif adev->dc_enabled = true; - return 0; + return dm_init_microcode(adev); } static bool modereset_required(struct drm_crtc_state *crtc_state) @@ -4725,7 +4839,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, static int fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, - const uint64_t tiling_flags, + const u64 tiling_flags, struct dc_plane_info *plane_info, struct dc_plane_address *address, bool tmz_surface, @@ -4900,7 +5014,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, static inline void fill_dc_dirty_rect(struct drm_plane *plane, struct rect *dirty_rect, int32_t x, - int32_t y, int32_t width, int32_t height, + s32 y, s32 width, s32 height, int *i, bool ffu) { if (*i > DC_MAX_DIRTY_RECTS) @@ -4936,6 +5050,7 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, * @new_plane_state: New state of @plane * @crtc_state: New state of CRTC connected to the @plane * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @dirty_regions_changed: dirty regions changed * * For PSR SU, DC informs the DMUB uController of dirty rectangle regions * (referred to as "damage clips" in DRM nomenclature) that require updating on @@ -4952,15 +5067,17 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, - struct dc_flip_addrs *flip_addrs) + struct dc_flip_addrs *flip_addrs, + bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); struct rect *dirty_rects = flip_addrs->dirty_rects; - uint32_t num_clips; + u32 num_clips; struct drm_mode_rect *clips; bool bb_changed; bool fb_changed; - uint32_t i = 0; + u32 i = 0; + *dirty_regions_changed = false; /* * Cursor plane has it's own dirty rect update interface. See @@ -5005,6 +5122,8 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, new_plane_state->plane->base.id, bb_changed, fb_changed, num_clips); + *dirty_regions_changed = bb_changed; + if (bb_changed) { fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], new_plane_state->crtc_x, @@ -5106,7 +5225,7 @@ static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, bool is_y420, int requested_bpc) { - uint8_t bpc; + u8 bpc; if (is_y420) { bpc = 8; @@ -5650,8 +5769,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, uint32_t max_dsc_target_bpp_limit_override) { const struct dc_link_settings *verified_link_cap = NULL; - uint32_t link_bw_in_kbps; - uint32_t edp_min_bpp_x16, edp_max_bpp_x16; + u32 link_bw_in_kbps; + u32 edp_min_bpp_x16, edp_max_bpp_x16; struct dc *dc = sink->ctx->dc; struct dc_dsc_bw_range bw_range = {0}; struct dc_dsc_config dsc_cfg = {0}; @@ -5708,11 +5827,11 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct dsc_dec_dpcd_caps *dsc_caps) { struct drm_connector *drm_connector = &aconnector->base; - uint32_t link_bandwidth_kbps; + u32 link_bandwidth_kbps; struct dc *dc = sink->ctx->dc; - uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; - uint32_t dsc_max_supported_bw_in_kbps; - uint32_t max_dsc_target_bpp_limit_override = + u32 max_supported_bw_in_kbps, timing_bw_in_kbps; + u32 dsc_max_supported_bw_in_kbps; + u32 max_dsc_target_bpp_limit_override = drm_connector->display_info.max_dsc_bpp; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, @@ -5891,6 +6010,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream, &mode, &aconnector->base, con_state, old_stream, requested_bpc); + if (aconnector->timing_changed) { + DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", + __func__, + stream->timing.display_color_depth, + aconnector->timing_requested->display_color_depth); + stream->timing = *aconnector->timing_requested; + } + #if defined(CONFIG_DRM_AMD_DC_DCN) /* SST DSC determination policy */ update_dsc_caps(aconnector, sink, stream, &dsc_caps); @@ -6083,15 +6210,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) if (aconnector->mst_mgr.dev) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) for (i = 0; i < dm->num_of_edps; i++) { if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { backlight_device_unregister(dm->backlight_dev[i]); dm->backlight_dev[i] = NULL; } } -#endif if (aconnector->dc_em_sink) dc_sink_release(aconnector->dc_em_sink); @@ -6285,7 +6409,6 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_plane_state->plane_size.surface_size.width = stream->src.width; dc_plane_state->plane_size.chroma_size.height = stream->src.height; dc_plane_state->plane_size.chroma_size.width = stream->src.width; - dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; dc_plane_state->rotation = ROTATION_ANGLE_0; @@ -6583,11 +6706,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; - if (!aconnector->port || !aconnector->dc_sink) + if (!aconnector->mst_output_port || !aconnector->dc_sink) return 0; - mst_port = aconnector->port; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_port = aconnector->mst_output_port; + mst_mgr = &aconnector->mst_root->mst_mgr; if (!crtc_state->connectors_changed && !crtc_state->mode_changed) return 0; @@ -6597,7 +6720,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, return PTR_ERR(mst_state); if (!mst_state->pbn_div) - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link); + mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; @@ -6643,7 +6766,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->port) + if (!aconnector->mst_output_port) continue; if (!new_con_state || !new_con_state->crtc) @@ -6683,7 +6806,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; - ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, + ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, dm_conn_state->pbn, false); if (ret < 0) return ret; @@ -6691,7 +6814,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, continue; } - vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true); + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); if (vcpi < 0) return vcpi; @@ -6934,7 +7057,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) const struct drm_display_mode *m; struct drm_display_mode *new_mode; uint i; - uint32_t new_modes_count = 0; + u32 new_modes_count = 0; /* Standard FPS values * @@ -6948,7 +7071,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) * 60 - Commonly used * 48,72,96,120 - Multiples of 24 */ - static const uint32_t common_rates[] = { + static const u32 common_rates[] = { 23976, 24000, 25000, 29970, 30000, 48000, 50000, 60000, 72000, 96000, 120000 }; @@ -6964,8 +7087,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) return 0; for (i = 0; i < ARRAY_SIZE(common_rates); i++) { - uint64_t target_vtotal, target_vtotal_diff; - uint64_t num, den; + u64 target_vtotal, target_vtotal_diff; + u64 num, den; if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) continue; @@ -7064,6 +7187,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.dpms = DRM_MODE_DPMS_OFF; aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ aconnector->audio_inst = -1; + aconnector->pack_sdp_v1_3 = false; + aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; + memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); mutex_init(&aconnector->hpd_lock); /* @@ -7105,7 +7231,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.underscan_vborder_property, 0); - if (!aconnector->mst_port) + if (!aconnector->mst_root) drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); /* This defaults to the max in the range, but we want 8bpc for non-edp. */ @@ -7123,7 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, connector_type == DRM_MODE_CONNECTOR_eDP) { drm_connector_attach_hdr_output_metadata_property(&aconnector->base); - if (!aconnector->mst_port) + if (!aconnector->mst_root) drm_connector_attach_vrr_capable_property(&aconnector->base); #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -7207,7 +7333,7 @@ create_i2c(struct ddc_service *ddc_service, */ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, - uint32_t link_index, + u32 link_index, struct amdgpu_encoder *aencoder) { int res = 0; @@ -7392,27 +7518,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, } #ifdef CONFIG_DRM_AMD_DC_HDCP -static bool is_content_protection_different(struct drm_connector_state *state, - const struct drm_connector_state *old_state, - const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, + struct drm_crtc_state *old_crtc_state, + struct drm_connector_state *new_conn_state, + struct drm_connector_state *old_conn_state, + const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - /* Handle: Type0/1 change */ - if (old_state->hdcp_content_type != state->hdcp_content_type && - state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_conn_state->content_protection, new_conn_state->content_protection); + + if (old_crtc_state) + pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* hdcp content type change */ + if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && + new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); return true; } - /* CP is being re enabled, ignore this - * - * Handles: ENABLED -> DESIRED - */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + /* CP is being re enabled, ignore this */ + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); + return true; + } + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); return false; } @@ -7420,9 +7574,9 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: UNDESIRED -> ENABLED */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; /* Stream removed and re-enabled * @@ -7432,10 +7586,12 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (!(old_state->crtc && old_state->crtc->enabled) && - state->crtc && state->crtc->enabled && + if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && + new_conn_state->crtc && new_conn_state->crtc->enabled && connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", + __func__); return true; } @@ -7447,35 +7603,42 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && - connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { + if (dm_con_state->update_hdcp && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && + connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", + __func__); return true; } - /* - * Handles: UNDESIRED -> UNDESIRED - * DESIRED -> DESIRED - * ENABLED -> ENABLED - */ - if (old_state->content_protection == state->content_protection) + if (old_conn_state->content_protection == new_conn_state->content_protection) { + if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", + __func__); + return true; + } + pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", + __func__); + return false; + } + + pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); return false; + } - /* - * Handles: UNDESIRED -> DESIRED - * DESIRED -> UNDESIRED - * ENABLED -> UNDESIRED - */ - if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) + if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { + pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", + __func__); return true; + } - /* - * Handles: DESIRED -> ENABLED - */ + pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); return false; } - #endif + static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -7517,6 +7680,8 @@ static void update_freesync_state_on_stream( struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); unsigned long flags; bool pack_sdp_v1_3 = false; + struct amdgpu_dm_connector *aconn; + enum vrr_packet_type packet_type = PACKET_TYPE_VRR; if (!new_stream) return; @@ -7552,11 +7717,27 @@ static void update_freesync_state_on_stream( } } + aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; + + if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { + pack_sdp_v1_3 = aconn->pack_sdp_v1_3; + + if (aconn->vsdb_info.amd_vsdb_version == 1) + packet_type = PACKET_TYPE_FS_V1; + else if (aconn->vsdb_info.amd_vsdb_version == 2) + packet_type = PACKET_TYPE_FS_V2; + else if (aconn->vsdb_info.amd_vsdb_version == 3) + packet_type = PACKET_TYPE_FS_V3; + + mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, + &new_stream->adaptive_sync_infopacket); + } + mod_freesync_build_vrr_infopacket( dm->freesync_module, new_stream, &vrr_params, - PACKET_TYPE_VRR, + packet_type, TRANSFER_FUNC_UNKNOWN, &vrr_infopacket, pack_sdp_v1_3); @@ -7570,6 +7751,7 @@ static void update_freesync_state_on_stream( new_crtc_state->vrr_infopacket = vrr_infopacket; new_stream->vrr_infopacket = vrr_infopacket; + new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); if (new_crtc_state->freesync_vrr_info_changed) DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", @@ -7691,8 +7873,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc *pcrtc, bool wait_for_vblank) { - uint32_t i; - uint64_t timestamp_ns; + u32 i; + u64 timestamp_ns = ktime_get_ns(); struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); @@ -7703,10 +7885,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0, vpos, hpos; unsigned long flags; - uint32_t target_vblank, last_flip_vblank; + u32 target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool cursor_update = false; bool pflip_present = false; + bool dirty_rects_changed = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -7794,10 +7977,32 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) { fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, - &bundle->flip_addrs[planes_count]); + &bundle->flip_addrs[planes_count], + &dirty_rects_changed); + + /* + * If the dirty regions changed, PSR-SU need to be disabled temporarily + * and enabled it again after dirty regions are stable to avoid video glitch. + * PSR-SU will be enabled in vblank_control_worker() if user pause the video + * during the PSR-SU was disabled. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_psr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + dirty_rects_changed) { + mutex_lock(&dm->dc_lock); + acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = + timestamp_ns; + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + mutex_unlock(&dm->dc_lock); + } + } /* * Only allow immediate flips for fast updates that don't @@ -8016,7 +8221,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif - !acrtc_state->stream->link->psr_settings.psr_allow_active) + !acrtc_state->stream->link->psr_settings.psr_allow_active && + (timestamp_ns - + acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > + 500000000) amdgpu_dm_psr_enable(acrtc_state->stream); } else { acrtc_attach->dm_irq_params.allow_psr_entry = false; @@ -8141,7 +8349,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_display_manager *dm = &adev->dm; struct dm_atomic_state *dm_state; struct dc_state *dc_state = NULL, *dc_state_temp = NULL; - uint32_t i, j; + u32 i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; unsigned long flags; @@ -8315,10 +8523,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); + + if (!connector) + continue; + + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_con_state->content_protection, new_con_state->content_protection); + + if (aconnector->dc_sink) { + if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { + pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", + aconnector->dc_sink->edid_caps.display_name); + } + } + new_crtc_state = NULL; + old_crtc_state = NULL; - if (acrtc) + if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + if (old_crtc_state) + pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + } + + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + new_crtc_state = NULL; + old_crtc_state = NULL; + + if (acrtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -8330,11 +8589,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; } - if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, + old_con_state, connector, adev->dm.hdcp_workqueue)) { + /* when display is unplugged from mst hub, connctor will + * be destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + + bool enable_encryption = false; + + if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + if (aconnector->dc_link && aconnector->dc_sink && + aconnector->dc_link->type == dc_connection_mst_branch) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + hdcp_w->hdcp_content_type[connector->index] = + new_con_state->hdcp_content_type; + hdcp_w->content_protection[connector->index] = + new_con_state->content_protection; + } + + if (new_crtc_state && new_crtc_state->mode_changed && + new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type, - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); + new_con_state->hdcp_content_type, enable_encryption); + } } #endif @@ -8432,9 +8724,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); #ifdef CONFIG_DEBUG_FS enum amdgpu_dm_pipe_crc_source cur_crc_src; -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - struct crc_rd_work *crc_rd_wrk; -#endif #endif /* Count number of newly disabled CRTCs for dropping PM refs later. */ if (old_crtc_state->active && !new_crtc_state->active) @@ -8447,9 +8736,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) update_stream_irq_parameters(dm, dm_new_crtc_state); #ifdef CONFIG_DEBUG_FS -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - crc_rd_wrk = dm->crc_rd_wrk; -#endif spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); @@ -8478,10 +8764,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (amdgpu_dm_crc_window_is_activated(crtc)) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); acrtc->dm_irq_params.window_param.update_win = true; + + /** + * It takes 2 frames for HW to stably generate CRC when + * resuming from suspend, so we set skip_frame_cnt 2. + */ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - crc_rd_wrk->crtc = crtc; - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #endif @@ -8772,7 +9060,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, } static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { - uint64_t num, den, res; + u64 num, den, res; struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; @@ -9228,7 +9516,8 @@ static int dm_update_plane_state(struct dc *dc, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state, bool enable, - bool *lock_and_validation_needed) + bool *lock_and_validation_needed, + bool *is_top_most_overlay) { struct dm_atomic_state *dm_state = NULL; @@ -9336,6 +9625,14 @@ static int dm_update_plane_state(struct dc *dc, if (!dc_new_plane_state) return -ENOMEM; + /* Block top most plane from being a video plane */ + if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) + return -EINVAL; + else + *is_top_most_overlay = false; + } + DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", plane->base.id, new_plane_crtc->base.id); @@ -9479,7 +9776,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm continue; aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->port || !aconnector->mst_port) + if (!aconnector->mst_output_port || !aconnector->mst_root) aconnector = NULL; else break; @@ -9488,7 +9785,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm if (!aconnector) return 0; - return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); } #endif @@ -9532,6 +9829,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, enum dc_status status; int ret, i; bool lock_and_validation_needed = false; + bool is_top_most_overlay = true; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; #if defined(CONFIG_DRM_AMD_DC_DCN) struct drm_dp_mst_topology_mgr *mgr; @@ -9670,7 +9968,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, old_plane_state, new_plane_state, false, - &lock_and_validation_needed); + &lock_and_validation_needed, + &is_top_most_overlay); if (ret) { DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; @@ -9709,7 +10008,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, old_plane_state, new_plane_state, true, - &lock_and_validation_needed); + &lock_and_validation_needed, + &is_top_most_overlay); if (ret) { DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; @@ -9944,7 +10244,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static bool is_dp_capable_without_timing_msa(struct dc *dc, struct amdgpu_dm_connector *amdgpu_dm_connector) { - uint8_t dpcd_data; + u8 dpcd_data; bool capable = false; if (amdgpu_dm_connector->dc_link && @@ -9963,7 +10263,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc, static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, unsigned int offset, unsigned int total_length, - uint8_t *data, + u8 *data, unsigned int length, struct amdgpu_hdmi_vsdb_info *vsdb) { @@ -10018,7 +10318,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, } static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, - uint8_t *edid_ext, int len, + u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { int i; @@ -10059,7 +10359,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, } static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, - uint8_t *edid_ext, int len, + u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { int i; @@ -10075,21 +10375,25 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, } static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, - uint8_t *edid_ext, int len, + u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + bool ret; + mutex_lock(&adev->dm.dc_lock); if (adev->dm.dmub_srv) - return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); + ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); else - return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); + ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); + mutex_unlock(&adev->dm.dc_lock); + return ret; } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { - uint8_t *edid_ext = NULL; + u8 *edid_ext = NULL; int i; bool valid_vsdb_found = false; @@ -10144,6 +10448,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; bool freesync_capable = false; + enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); @@ -10236,6 +10541,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, } } + as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); + + if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { + i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { + + amdgpu_dm_connector->pack_sdp_v1_3 = true; + amdgpu_dm_connector->as_type = as_type; + amdgpu_dm_connector->vsdb_info = vsdb_info; + + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; + + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; + } + } + update: if (dm_con_state) dm_con_state->freesync_capable = freesync_capable; @@ -10265,7 +10590,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) } void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, - uint32_t value, const char *func_name) + u32 value, const char *func_name) { #ifdef DM_CHECK_ADDR_0 if (address == 0) { @@ -10280,7 +10605,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, const char *func_name) { - uint32_t value; + u32 value; #ifdef DM_CHECK_ADDR_0 if (address == 0) { DC_ERR("invalid register read; address = 0\n"); @@ -10359,6 +10684,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( ret = p_notify->aux_reply.length; *operation_result = p_notify->result; out: + reinit_completion(&adev->dm.dmub_aux_transfer_done); mutex_unlock(&adev->dm.dpia_aux_lock); return ret; } @@ -10386,6 +10712,8 @@ int amdgpu_dm_process_dmub_set_config_sync( *operation_result = SET_CONFIG_UNKNOWN_ERROR; } + if (!is_cmd_complete) + reinit_completion(&adev->dm.dmub_aux_transfer_done); mutex_unlock(&adev->dm.dpia_aux_lock); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index df3c25e32c65dc..ed5cbe9da40cdf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -31,6 +31,7 @@ #include #include #include +#include "link_service_types.h" /* * This file contains the definition for amdgpu_display_manager @@ -58,6 +59,7 @@ #include "irq_types.h" #include "signal_types.h" #include "amdgpu_dm_crc.h" +#include "mod_info_packet.h" struct aux_payload; struct set_config_cmd_payload; enum aux_return_code_type; @@ -494,11 +496,12 @@ struct amdgpu_display_manager { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /** - * @crc_rd_wrk: + * @secure_display_ctxs: * - * Work to be executed in a separate thread to communicate with PSP. + * Store the ROI information and the work_struct to command dmub and psp for + * all crtcs. */ - struct crc_rd_work *crc_rd_wrk; + struct secure_display_context *secure_display_ctxs; #endif /** * @hpd_rx_offload_wq: @@ -575,6 +578,36 @@ enum mst_progress_status { MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), }; +/** + * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info + * + * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this + * struct is useful to keep track of the display-specific information about + * FreeSync. + */ +struct amdgpu_hdmi_vsdb_info { + /** + * @amd_vsdb_version: Vendor Specific Data Block Version, should be + * used to determine which Vendor Specific InfoFrame (VSIF) to send. + */ + unsigned int amd_vsdb_version; + + /** + * @freesync_supported: FreeSync Supported. + */ + bool freesync_supported; + + /** + * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. + */ + unsigned int min_refresh_rate_hz; + + /** + * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz + */ + unsigned int max_refresh_rate_hz; +}; + struct amdgpu_dm_connector { struct drm_connector base; @@ -603,8 +636,8 @@ struct amdgpu_dm_connector { /* DM only */ struct drm_dp_mst_topology_mgr mst_mgr; struct amdgpu_dm_dp_aux dm_dp_aux; - struct drm_dp_mst_port *port; - struct amdgpu_dm_connector *mst_port; + struct drm_dp_mst_port *mst_output_port; + struct amdgpu_dm_connector *mst_root; struct drm_dp_aux *dsc_aux; /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; @@ -643,6 +676,15 @@ struct amdgpu_dm_connector { /* Record progress status of mst*/ uint8_t mst_status; + + /* Automated testing */ + bool timing_changed; + struct dc_crtc_timing *timing_requested; + + /* Adaptive Sync */ + bool pack_sdp_v1_3; + enum adaptive_sync_type as_type; + struct amdgpu_hdmi_vsdb_info vsdb_info; }; static inline void amdgpu_dm_set_mst_status(uint8_t *status, @@ -713,37 +755,6 @@ struct dm_connector_state { uint64_t pbn; }; -/** - * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info - * - * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this - * struct is useful to keep track of the display-specific information about - * FreeSync. - */ -struct amdgpu_hdmi_vsdb_info { - /** - * @amd_vsdb_version: Vendor Specific Data Block Version, should be - * used to determine which Vendor Specific InfoFrame (VSIF) to send. - */ - unsigned int amd_vsdb_version; - - /** - * @freesync_supported: FreeSync Supported. - */ - bool freesync_supported; - - /** - * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. - */ - unsigned int min_refresh_rate_hz; - - /** - * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz - */ - unsigned int max_refresh_rate_hz; -}; - - #define to_dm_connector_state(x)\ container_of((x), struct dm_connector_state, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 66df2394d7e4c2..27711743c22ce6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -101,35 +101,44 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) { - struct crc_rd_work *crc_rd_wrk; - struct amdgpu_device *adev; + struct secure_display_context *secure_display_ctx; struct psp_context *psp; - struct securedisplay_cmd *securedisplay_cmd; + struct ta_securedisplay_cmd *securedisplay_cmd; struct drm_crtc *crtc; - uint8_t phy_id; + struct dc_stream_state *stream; + uint8_t phy_inst; int ret; - crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - crtc = crc_rd_wrk->crtc; + secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); + crtc = secure_display_ctx->crtc; if (!crtc) { - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); return; } - adev = drm_to_adev(crtc->dev); - psp = &adev->psp; - phy_id = crc_rd_wrk->phy_inst; - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + psp = &drm_to_adev(crtc->dev)->psp; + + if (!psp->securedisplay_context.context.initialized) { + DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n"); + return; + } + + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; + phy_inst = stream->link->link_enc_hw_inst; + /* need lock for multiple crtcs to use the command buffer */ mutex_lock(&psp->securedisplay_context.mutex); psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); - securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = - phy_id; + + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst; + + /* PSP TA is expected to finish data transmission over I2C within current frame, + * even there are up to 4 crtcs request to send in this frame. + */ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + if (!ret) { if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); @@ -142,17 +151,23 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) static void amdgpu_dm_forward_crc_window(struct work_struct *work) { - struct crc_fw_work *crc_fw_wrk; + struct secure_display_context *secure_display_ctx; struct amdgpu_display_manager *dm; + struct drm_crtc *crtc; + struct dc_stream_state *stream; - crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work); - dm = crc_fw_wrk->dm; + secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work); + crtc = secure_display_ctx->crtc; + + if (!crtc) + return; + + dm = &drm_to_adev(crtc->dev)->dm; + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; mutex_lock(&dm->dc_lock); - dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->rect, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd); + dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false); mutex_unlock(&dm->dc_lock); - - kfree(crc_fw_wrk); } bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) @@ -189,6 +204,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state, enum amdgpu_dm_pipe_crc_source source) { +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + int i; +#endif struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc_stream_state *stream_state = dm_crtc_state->stream; bool enable = amdgpu_dm_is_valid_crc_source(source); @@ -200,21 +218,18 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, mutex_lock(&adev->dm.dc_lock); - /* Enable CRTC CRC generation if necessary. */ + /* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* Disable secure_display if it was enabled */ if (!enable) { - if (adev->dm.crc_rd_wrk) { - flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); - spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); - - if (adev->dm.crc_rd_wrk->crtc == crtc) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->dm.secure_display_ctxs[i].crtc == crtc) { /* stop ROI update on this crtc */ - dc_stream_forward_crc_window(stream_state->ctx->dc, - NULL, stream_state, true); - adev->dm.crc_rd_wrk->crtc = NULL; + flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); + flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); + dc_stream_forward_crc_window(stream_state, NULL, true); } - spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); } } #endif @@ -329,7 +344,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) goto cleanup; } - aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux; + aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux; if (!aux) { DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); @@ -347,6 +362,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* Reset secure_display when we change crc source from debugfs */ amdgpu_dm_set_crc_window_default(crtc); #endif @@ -456,14 +472,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) { - struct dc_stream_state *stream_state; struct drm_device *drm_dev = NULL; enum amdgpu_dm_pipe_crc_source cur_crc_src; struct amdgpu_crtc *acrtc = NULL; struct amdgpu_device *adev = NULL; - struct crc_rd_work *crc_rd_wrk; - struct crc_fw_work *crc_fw_wrk; - unsigned long flags1, flags2; + struct secure_display_context *secure_display_ctx = NULL; + unsigned long flags1; if (crtc == NULL) return; @@ -473,75 +487,76 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) drm_dev = crtc->dev; spin_lock_irqsave(&drm_dev->event_lock, flags1); - stream_state = acrtc->dm_irq_params.stream; cur_crc_src = acrtc->dm_irq_params.crc_src; /* Early return if CRC capture is not enabled. */ - if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) || + !dm_is_crc_source_crtc(cur_crc_src)) goto cleanup; - if (!dm_is_crc_source_crtc(cur_crc_src)) + if (!acrtc->dm_irq_params.window_param.activated) goto cleanup; - if (!acrtc->dm_irq_params.window_param.activated) + if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { + acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; goto cleanup; + } - if (acrtc->dm_irq_params.window_param.update_win) { - if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { - acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; - goto cleanup; - } + secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id]; + if (WARN_ON(secure_display_ctx->crtc != crtc)) { + /* We have set the crtc when creating secure_display_context, + * don't expect it to be changed here. + */ + secure_display_ctx->crtc = crtc; + } + if (acrtc->dm_irq_params.window_param.update_win) { /* prepare work for dmub to update ROI */ - crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC); - if (!crc_fw_wrk) - goto cleanup; - - INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window); - crc_fw_wrk->dm = &adev->dm; - crc_fw_wrk->stream = stream_state; - crc_fw_wrk->rect.x = acrtc->dm_irq_params.window_param.x_start; - crc_fw_wrk->rect.y = acrtc->dm_irq_params.window_param.y_start; - crc_fw_wrk->rect.width = acrtc->dm_irq_params.window_param.x_end - + secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start; + secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start; + secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end - acrtc->dm_irq_params.window_param.x_start; - crc_fw_wrk->rect.height = acrtc->dm_irq_params.window_param.y_end - + secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end - acrtc->dm_irq_params.window_param.y_start; - schedule_work(&crc_fw_wrk->forward_roi_work); + schedule_work(&secure_display_ctx->forward_roi_work); acrtc->dm_irq_params.window_param.update_win = false; + + /* Statically skip 1 frame, because we may need to wait below things + * before sending ROI to dmub: + * 1. We defer the work by using system workqueue. + * 2. We may need to wait for dc_lock before accessing dmub. + */ acrtc->dm_irq_params.window_param.skip_frame_cnt = 1; } else { - if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { - acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; - goto cleanup; - } - - if (adev->dm.crc_rd_wrk) { - crc_rd_wrk = adev->dm.crc_rd_wrk; - spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); - crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst; - spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); - schedule_work(&crc_rd_wrk->notify_ta_work); - } + /* prepare work for psp to read ROI/CRC and send to I2C */ + schedule_work(&secure_display_ctx->notify_ta_work); } cleanup: spin_unlock_irqrestore(&drm_dev->event_lock, flags1); } -struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) +struct secure_display_context * +amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev) { - struct crc_rd_work *crc_rd_wrk = NULL; + struct secure_display_context *secure_display_ctxs = NULL; + int i; - crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); + secure_display_ctxs = kcalloc(adev->mode_info.num_crtc, + sizeof(struct secure_display_context), + GFP_KERNEL); - if (!crc_rd_wrk) + if (!secure_display_ctxs) return NULL; - spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); - INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + for (i = 0; i < adev->mode_info.num_crtc; i++) { + INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window); + INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base; + } - return crc_rd_wrk; + return secure_display_ctxs; } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index 71bce608d751d6..935adca6f0486a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -45,7 +45,7 @@ struct crc_window_param { uint16_t y_start; uint16_t x_end; uint16_t y_end; - /* CRC windwo is activated or not*/ + /* CRC window is activated or not*/ bool activated; /* Update crc window during vertical blank or not */ bool update_win; @@ -53,22 +53,17 @@ struct crc_window_param { int skip_frame_cnt; }; -/* read_work for driver to call PSP to read */ -struct crc_rd_work { +struct secure_display_context { + /* work to notify PSP TA*/ struct work_struct notify_ta_work; - /* To protect crc_rd_work carried fields*/ - spinlock_t crc_rd_work_lock; - struct drm_crtc *crtc; - uint8_t phy_inst; -}; -/* forward_work for driver to forward ROI to dmu */ -struct crc_fw_work { + /* work to forward ROI to dmcu/dmub */ struct work_struct forward_roi_work; - struct amdgpu_display_manager *dm; - struct dc_stream_state *stream; + + struct drm_crtc *crtc; + + /* Region of Interest (ROI) */ struct rect rect; - bool is_stop_cmd; }; #endif @@ -100,11 +95,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc); void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); -struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void); +struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts( + struct amdgpu_device *adev); #else #define amdgpu_dm_crc_window_is_activated(x) #define amdgpu_dm_crtc_handle_crc_window_irq(x) -#define amdgpu_dm_crtc_secure_display_create_work() +#define amdgpu_dm_crtc_secure_display_create_contexts() #endif #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 22125daf9dcfe6..dc4f37240beb4c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -77,6 +77,9 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) struct amdgpu_device *adev = drm_to_adev(crtc->dev); int rc; + if (acrtc->otg_inst == -1) + return 0; + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -105,8 +108,7 @@ static void vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - dc_allow_idle_optimizations( - dm->dc, dm->active_vblank_irq_count == 0 ? true : false); + dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); @@ -152,6 +154,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) struct vblank_control_work *work; int rc = 0; + if (acrtc->otg_inst == -1) + goto skip; + if (enable) { /* vblank irq on -> Only need vupdate irq in vrr mode */ if (amdgpu_dm_vrr_active(acrtc_state)) @@ -169,6 +174,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) return -EBUSY; +skip: if (amdgpu_in_reset(adev)) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 461037a3dd7566..09a3efa517da99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -34,9 +34,9 @@ #include "dmub/dmub_srv.h" #include "resource.h" #include "dsc.h" -#include "dc_link_dp.h" #include "link_hwss.h" #include "dc/dc_dmub_srv.h" +#include "link/protocols/link_dp_capability.h" #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY #include "amdgpu_dm_psr.h" @@ -419,67 +419,38 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, return result; } -static int dp_lttpr_status_show(struct seq_file *m, void *d) +static int dp_lttpr_status_show(struct seq_file *m, void *unused) { - char *data; - struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t read_size = 1; - uint8_t repeater_count = 0; + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(connector); + struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps; - data = kzalloc(read_size, GFP_KERNEL); - if (!data) - return 0; + if (connector->status != connector_status_connected) + return -ENODEV; - dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size); + seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n", + dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt), + caps.phy_repeater_cnt); - switch ((uint8_t)*data) { - case 0x80: - repeater_count = 1; - break; - case 0x40: - repeater_count = 2; - break; - case 0x20: - repeater_count = 3; - break; - case 0x10: - repeater_count = 4; - break; - case 0x8: - repeater_count = 5; - break; - case 0x4: - repeater_count = 6; - break; - case 0x2: - repeater_count = 7; + seq_puts(m, "phy repeater mode: "); + + switch (caps.mode) { + case DP_PHY_REPEATER_MODE_TRANSPARENT: + seq_puts(m, "transparent"); break; - case 0x1: - repeater_count = 8; + case DP_PHY_REPEATER_MODE_NON_TRANSPARENT: + seq_puts(m, "non-transparent"); break; - case 0x0: - repeater_count = 0; + case 0x00: + seq_puts(m, "non lttpr"); break; default: - repeater_count = (uint8_t)*data; + seq_printf(m, "read error (raw: 0x%x)", caps.mode); break; } - seq_printf(m, "phy repeater count: %d\n", repeater_count); - - dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size); - - if ((uint8_t)*data == 0x55) - seq_printf(m, "phy repeater mode: transparent\n"); - else if ((uint8_t)*data == 0xAA) - seq_printf(m, "phy repeater mode: non-transparent\n"); - else if ((uint8_t)*data == 0x00) - seq_printf(m, "phy repeater mode: non lttpr\n"); - else - seq_printf(m, "phy repeater mode: read error\n"); - - kfree(data); + seq_puts(m, "\n"); return 0; } @@ -1192,7 +1163,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data) break; } dpcd_caps = aconnector->dc_link->dpcd_caps; - if (aconnector->port) { + if (aconnector->mst_output_port) { /* aconnector sets dsc_aux during get_modes call * if MST connector has it means it can either * enable DSC on the sink device or on MST branch @@ -1279,14 +1250,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, mutex_lock(&aconnector->hpd_lock); /* Don't support for mst end device*/ - if (aconnector->mst_port) { + if (aconnector->mst_root) { mutex_unlock(&aconnector->hpd_lock); return -EINVAL; } if (param[0] == 1) { - if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) && + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) && new_connection_type != dc_connection_none) goto unlock; @@ -1323,7 +1294,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, /* If the aconnector is the root node in mst topology */ if (aconnector->mst_mgr.mst_state == true) - reset_cur_dp_mst_topology(link); + dc_link_reset_cur_dp_mst_topology(link); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); @@ -1375,16 +1346,11 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -1481,12 +1447,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Get CRTC state @@ -1566,16 +1532,11 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -1670,12 +1631,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Safely get CRTC state @@ -1755,16 +1716,11 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -1859,12 +1815,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Get CRTC state @@ -1940,16 +1896,11 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2041,12 +1992,12 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Get CRTC state @@ -2120,16 +2071,11 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2181,16 +2127,11 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2257,16 +2198,11 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2333,16 +2269,11 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && + if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2578,13 +2509,13 @@ static int dp_is_mst_connector_show(struct seq_file *m, void *unused) if (aconnector->mst_mgr.mst_state) { role = "root"; - } else if (aconnector->mst_port && - aconnector->mst_port->mst_mgr.mst_state) { + } else if (aconnector->mst_root && + aconnector->mst_root->mst_mgr.mst_state) { role = "end"; - mgr = &aconnector->mst_port->mst_mgr; - port = aconnector->port; + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; drm_modeset_lock(&mgr->base.lock, NULL); if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && @@ -3245,46 +3176,24 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, */ static int crc_win_update_set(void *data, u64 val) { - struct drm_crtc *new_crtc = data; - struct drm_crtc *old_crtc = NULL; - struct amdgpu_crtc *new_acrtc, *old_acrtc; - struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); - struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; - - if (!crc_rd_wrk) - return 0; + struct drm_crtc *crtc = data; + struct amdgpu_crtc *acrtc; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); if (val) { - new_acrtc = to_amdgpu_crtc(new_crtc); + acrtc = to_amdgpu_crtc(crtc); mutex_lock(&adev->dm.dc_lock); /* PSR may write to OTG CRC window control register, * so close it before starting secure_display. */ - amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream); + amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); spin_lock_irq(&adev_to_drm(adev)->event_lock); - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - if (crc_rd_wrk->crtc) { - old_crtc = crc_rd_wrk->crtc; - old_acrtc = to_amdgpu_crtc(old_crtc); - } - if (old_crtc && old_crtc != new_crtc) { - old_acrtc->dm_irq_params.window_param.activated = false; - old_acrtc->dm_irq_params.window_param.update_win = false; - old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; + acrtc->dm_irq_params.window_param.activated = true; + acrtc->dm_irq_params.window_param.update_win = true; + acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; - new_acrtc->dm_irq_params.window_param.activated = true; - new_acrtc->dm_irq_params.window_param.update_win = true; - new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; - crc_rd_wrk->crtc = new_crtc; - } else { - new_acrtc->dm_irq_params.window_param.activated = true; - new_acrtc->dm_irq_params.window_param.update_win = true; - new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; - crc_rd_wrk->crtc = new_crtc; - } - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); spin_unlock_irq(&adev_to_drm(adev)->event_lock); mutex_unlock(&adev->dm.dc_lock); } @@ -3453,12 +3362,12 @@ static int trigger_hpd_mst_set(void *data, u64 val) if (!aconnector->dc_link) continue; - if (!aconnector->mst_port) + if (!aconnector->mst_root) continue; link = aconnector->dc_link; - dp_receiver_power_ctrl(link, false); - drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false); + dc_link_dp_receiver_power_ctrl(link, false); + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false); link->mst_stream_alloc_table.stream_count = 0; memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations)); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index a7fd98f57f94ca..8e572f07ec4760 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct mod_hdcp_display_query query; + unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + hdcp_w->aconnector[conn_index] = aconnector; query.display = NULL; mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); @@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); } else { display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; cancel_delayed_work(&hdcp_w->property_validate_dwork); } @@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct drm_connector_state *conn_state = aconnector->base.state; + unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + unsigned int conn_index; mutex_lock(&hdcp_w->mutex); mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); cancel_delayed_work(&hdcp_w->property_validate_dwork); - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + hdcp_w->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } process_output(hdcp_w); @@ -290,49 +297,80 @@ static void event_callback(struct work_struct *work) } + static void event_property_update(struct work_struct *work) { - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - struct drm_device *dev = hdcp_work->aconnector->base.dev; + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_device *dev; long ret; + unsigned int conn_index; + struct drm_connector *connector; + struct drm_connector_state *conn_state; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&hdcp_work->mutex); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + if (!aconnector) + continue; - if (aconnector->base.state && aconnector->base.state->commit) { - ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + connector = &aconnector->base; - if (ret == 0) { - DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); - hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - } - } + /* check if display connected */ + if (connector->status != connector_status_connected) + continue; - if (aconnector->base.state) { - if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { - if (aconnector->base.state->hdcp_content_type == + conn_state = aconnector->base.state; + + if (!conn_state) + continue; + + dev = connector->dev; + + if (!dev) + continue; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + if (conn_state->commit) { + ret = wait_for_completion_interruptible_timeout( + &conn_state->commit->hw_done, 10 * HZ); + if (ret == 0) { + DRM_ERROR( + "HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + if (hdcp_work->encryption_status[conn_index] != + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && - hdcp_work->encryption_status <= - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) - drm_hdcp_update_content_protection(&aconnector->base, + hdcp_work->encryption_status[conn_index] <= + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { + + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); + drm_hdcp_update_content_protection( + connector, DRM_MODE_CONTENT_PROTECTION_ENABLED); - else if (aconnector->base.state->hdcp_content_type == + } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && - hdcp_work->encryption_status == - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) - drm_hdcp_update_content_protection(&aconnector->base, + hdcp_work->encryption_status[conn_index] == + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { + drm_hdcp_update_content_protection( + connector, DRM_MODE_CONTENT_PROTECTION_ENABLED); + } } else { - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_DESIRED); + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); + drm_hdcp_update_content_protection( + connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); + } + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); } - - mutex_unlock(&hdcp_work->mutex); - drm_modeset_unlock(&dev->mode_config.connection_mutex); } static void event_property_validate(struct work_struct *work) @@ -340,19 +378,47 @@ static void event_property_validate(struct work_struct *work) struct hdcp_workqueue *hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); struct mod_hdcp_display_query query; - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - - if (!aconnector) - return; + struct amdgpu_dm_connector *aconnector; + unsigned int conn_index; mutex_lock(&hdcp_work->mutex); - query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; + conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + + if (!aconnector) + continue; + + /* check if display connected */ + if (aconnector->base.status != connector_status_connected) + continue; - if (query.encryption_status != hdcp_work->encryption_status) { - hdcp_work->encryption_status = query.encryption_status; - schedule_work(&hdcp_work->property_update_work); + if (!aconnector->base.state) + continue; + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, + &query); + + DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", + aconnector->base.index, + aconnector->base.state->content_protection, + query.encryption_status, + hdcp_work->encryption_status[conn_index]); + + if (query.encryption_status != + hdcp_work->encryption_status[conn_index]) { + DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", + hdcp_work->encryption_status[conn_index], query.encryption_status); + + hdcp_work->encryption_status[conn_index] = + query.encryption_status; + + DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); + + schedule_work(&hdcp_work->property_update_work); + } } mutex_unlock(&hdcp_work->mutex); @@ -686,6 +752,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + + memset(hdcp_work[i].aconnector, 0, + sizeof(struct amdgpu_dm_connector *) * + AMDGPU_DM_MAX_DISPLAY_INDEX); + memset(hdcp_work[i].encryption_status, 0, + sizeof(enum mod_hdcp_encryption_status) * + AMDGPU_DM_MAX_DISPLAY_INDEX); } cp_psp->funcs.update_stream_config = update_config; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 09294ff122fead..69b445b011c8cb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -43,7 +43,7 @@ struct hdcp_workqueue { struct delayed_work callback_dwork; struct delayed_work watchdog_timer_dwork; struct delayed_work property_validate_dwork; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX]; struct mutex mutex; struct mod_hdcp hdcp; @@ -51,7 +51,20 @@ struct hdcp_workqueue { struct mod_hdcp_display display; struct mod_hdcp_link link; - enum mod_hdcp_encryption_status encryption_status; + enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + /* un-desired, desired, enabled */ + unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* hdcp1.x, hdcp2.x */ + unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; + uint8_t max_link; uint8_t *srm; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 5cff56bb8f5602..6fdc2027c2b476 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -38,6 +38,8 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" #include "amdgpu_dm_mst_types.h" +#include "dpcd_defs.h" +#include "dc/inc/core_types.h" #include "dm_helpers.h" #include "ddc_service_types.h" @@ -195,18 +197,18 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( * that blocks before commit guaranteeing that the state * is not gonna be swapped while still in use in commit tail */ - if (!aconnector || !aconnector->mst_port) + if (!aconnector || !aconnector->mst_root) return false; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); /* It's OK for this to fail */ - payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); if (enable) drm_dp_add_payload_part1(mst_mgr, mst_state, payload); else - drm_dp_remove_payload(mst_mgr, mst_state, payload); + drm_dp_remove_payload(mst_mgr, mst_state, payload, payload); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -247,10 +249,10 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->mst_port) + if (!aconnector || !aconnector->mst_root) return ACT_FAILED; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_mgr = &aconnector->mst_root->mst_mgr; if (!mst_mgr->mst_state) return ACT_FAILED; @@ -274,22 +276,27 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct drm_dp_mst_atomic_payload *payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + int ret = 0; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->mst_port) + if (!aconnector || !aconnector->mst_root) return false; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + if (!enable) { set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; clr_flag = MST_ALLOCATE_NEW_PAYLOAD; } - if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) { + if (enable) + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload); + + if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, false); } else { @@ -396,6 +403,7 @@ bool dm_helpers_dp_mst_start_top_mgr( bool boot) { struct amdgpu_dm_connector *aconnector = link->priv; + int ret; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); @@ -411,7 +419,16 @@ bool dm_helpers_dp_mst_start_top_mgr( DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); - return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); + ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + if (ret < 0) { + DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); + return false; + } + + DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], + aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); + + return true; } bool dm_helpers_dp_mst_stop_top_mgr( @@ -710,7 +727,7 @@ bool dm_helpers_dp_write_dsc_enable( aconnector->dsc_aux, stream, enable_dsc); #endif - port = aconnector->port; + port = aconnector->mst_output_port; if (enable) { if (port->passthrough_aux) { @@ -987,6 +1004,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) sizeof(new_downspread)); } +bool dm_helpers_dp_handle_test_pattern_request( + struct dc_context *ctx, + const struct dc_link *link, + union link_test_pattern dpcd_test_pattern, + union test_misc dpcd_test_params) +{ + enum dp_test_pattern test_pattern; + enum dp_test_pattern_color_space test_pattern_color_space = + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; + enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; + enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = NULL; + struct amdgpu_dm_connector *aconnector = link->priv; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream == NULL) + continue; + + if (pipes[i].stream->link == link && !pipes[i].top_pipe && + !pipes[i].prev_odm_pipe) { + pipe_ctx = &pipes[i]; + break; + } + } + + if (pipe_ctx == NULL) + return false; + + switch (dpcd_test_pattern.bits.PATTERN) { + case LINK_TEST_PATTERN_COLOR_RAMP: + test_pattern = DP_TEST_PATTERN_COLOR_RAMP; + break; + case LINK_TEST_PATTERN_VERTICAL_BARS: + test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; + break; /* black and white */ + case LINK_TEST_PATTERN_COLOR_SQUARES: + test_pattern = (dpcd_test_params.bits.DYN_RANGE == + TEST_DYN_RANGE_VESA ? + DP_TEST_PATTERN_COLOR_SQUARES : + DP_TEST_PATTERN_COLOR_SQUARES_CEA); + break; + default: + test_pattern = DP_TEST_PATTERN_VIDEO_MODE; + break; + } + + if (dpcd_test_params.bits.CLR_FORMAT == 0) + test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; + else + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + + switch (dpcd_test_params.bits.BPC) { + case 0: // 6 bits + requestColorDepth = COLOR_DEPTH_666; + break; + case 1: // 8 bits + requestColorDepth = COLOR_DEPTH_888; + break; + case 2: // 10 bits + requestColorDepth = COLOR_DEPTH_101010; + break; + case 3: // 12 bits + requestColorDepth = COLOR_DEPTH_121212; + break; + default: + break; + } + + switch (dpcd_test_params.bits.CLR_FORMAT) { + case 0: + requestPixelEncoding = PIXEL_ENCODING_RGB; + break; + case 1: + requestPixelEncoding = PIXEL_ENCODING_YCBCR422; + break; + case 2: + requestPixelEncoding = PIXEL_ENCODING_YCBCR444; + break; + default: + requestPixelEncoding = PIXEL_ENCODING_RGB; + break; + } + + if ((requestColorDepth != COLOR_DEPTH_UNDEFINED + && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) + || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED + && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { + DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n", + __func__, + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->timing.pixel_encoding, + requestColorDepth, + requestPixelEncoding); + pipe_ctx->stream->timing.display_color_depth = requestColorDepth; + pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; + + dc_link_update_dsc_config(pipe_ctx); + + aconnector->timing_changed = true; + /* store current timing */ + if (aconnector->timing_requested) + *aconnector->timing_requested = pipe_ctx->stream->timing; + else + DC_LOG_ERROR("%s: timing storage failed\n", __func__); + + } + + dc_link_dp_set_test_pattern( + (struct dc_link *) link, + test_pattern, + test_pattern_color_space, + NULL, + NULL, + 0); + + return false; +} + void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) { // TODO @@ -1004,3 +1143,36 @@ void dm_helpers_dp_mst_update_branch_bandwidth( // TODO } +static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) +{ + bool ret_val = false; + + switch (branch_dev_id) { + case DP_BRANCH_DEVICE_ID_0060AD: + ret_val = true; + break; + default: + break; + } + + return ret_val; +} + +enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) +{ + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; + + switch (dpcd_caps->dongle_type) { + case DISPLAY_DONGLE_DP_HDMI_CONVERTER: + if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && + dpcd_caps->allow_invalid_MSA_timing_param == true && + dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) + as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; + break; + default: + break; + } + + return as_type; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index abdbd4352f6f3c..e25e1b2bf19493 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -32,15 +32,16 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_mst_types.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif + #include "dc.h" #include "dm_helpers.h" -#include "dc_link_ddc.h" -#include "dc_link_dp.h" #include "ddc_service_types.h" #include "dpcd_defs.h" -#include "i2caux_interface.h" #include "dmub_cmd.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" @@ -49,7 +50,7 @@ #include "dc/dcn20/dcn20_resource.h" bool is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); - +#define PEAK_FACTOR_X1000 1006 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) @@ -132,7 +133,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) kfree(aconnector->edid); drm_connector_cleanup(connector); - drm_dp_mst_put_port_malloc(aconnector->port); + drm_dp_mst_put_port_malloc(aconnector->mst_output_port); kfree(aconnector); } @@ -144,7 +145,7 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) int r; r = drm_dp_mst_connector_late_register(connector, - amdgpu_dm_connector->port); + amdgpu_dm_connector->mst_output_port); if (r < 0) return r; @@ -160,8 +161,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_port *port = aconnector->port; - struct amdgpu_dm_connector *root = aconnector->mst_port; + struct drm_dp_mst_port *port = aconnector->mst_output_port; + struct amdgpu_dm_connector *root = aconnector->mst_root; struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_sink = aconnector->dc_sink; @@ -176,6 +177,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) if (dc_link->sink_count) dc_link_remove_remote_sink(dc_link, dc_sink); + DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", + dc_sink, dc_link->sink_count); + dc_sink_release(dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; @@ -211,7 +215,7 @@ bool needs_dsc_aux_workaround(struct dc_link *link) static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; - struct drm_dp_mst_port *port = aconnector->port; + struct drm_dp_mst_port *port = aconnector->mst_output_port; u8 dsc_caps[16] = { 0 }; u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 u8 *dsc_branch_dec_caps = NULL; @@ -229,7 +233,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto */ if (!aconnector->dsc_aux && !port->parent->port_parent && needs_dsc_aux_workaround(aconnector->dc_link)) - aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; + aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; if (!aconnector->dsc_aux) return false; @@ -279,7 +283,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); if (!edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -307,6 +311,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } + DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); + dc_sink->priv = aconnector; aconnector->dc_sink = dc_sink; } @@ -340,10 +347,35 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } + DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); + dc_sink->priv = aconnector; /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (aconnector->dc_sink && connector->state) { + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } +#endif + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( connector, aconnector->edid); @@ -386,15 +418,15 @@ dm_dp_mst_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - struct drm_dp_mst_port *port = aconnector->port; + struct amdgpu_dm_connector *master = aconnector->mst_root; + struct drm_dp_mst_port *port = aconnector->mst_output_port; int connection_status; if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, - aconnector->port); + aconnector->mst_output_port); if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { uint8_t dpcd_rev; @@ -435,6 +467,9 @@ dm_dp_mst_detect(struct drm_connector *connector, if (aconnector->dc_link->sink_count) dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", + aconnector->dc_link, aconnector->dc_link->sink_count); + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; @@ -451,8 +486,8 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr; - struct drm_dp_mst_port *mst_port = aconnector->port; + struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; + struct drm_dp_mst_port *mst_port = aconnector->mst_output_port; return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); } @@ -514,8 +549,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, return NULL; connector = &aconnector->base; - aconnector->port = port; - aconnector->mst_port = master; + aconnector->mst_output_port = port; + aconnector->mst_root = master; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_PROBE, true); @@ -916,7 +951,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (!aconnector) continue; - if (!aconnector->port) + if (!aconnector->mst_output_port) continue; stream->timing.flags.DSC = 0; @@ -924,7 +959,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].timing = &stream->timing; params[count].sink = stream->sink; params[count].aconnector = aconnector; - params[count].port = aconnector->port; + params[count].port = aconnector->mst_output_port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) debugfs_overwrite = true; @@ -1133,7 +1168,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->dc_sink || !aconnector->port) + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1148,7 +1183,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; - mst_mgr = aconnector->port->mgr; + mst_mgr = aconnector->mst_output_port->mgr; ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, &link_vars_start_index); if (ret != 0) @@ -1194,7 +1229,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->dc_sink || !aconnector->port) + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1206,7 +1241,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; - mst_mgr = aconnector->port->mgr; + mst_mgr = aconnector->mst_output_port->mgr; ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, &link_vars_start_index); if (ret != 0) @@ -1421,8 +1456,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( * with DSC enabled. */ if (is_dsc_common_config_possible(stream, &bw_range) && - aconnector->port->passthrough_aux) { - mst_mgr = aconnector->port->mgr; + aconnector->mst_output_port->passthrough_aux) { + mst_mgr = aconnector->mst_output_port->mgr; mutex_lock(&mst_mgr->lock); cur_link_settings = stream->link->verified_link_cap; @@ -1430,7 +1465,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings ); - down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn); + down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); /* pick the bottleneck */ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, @@ -1454,7 +1489,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); - if (pbn > aconnector->port->full_pbn) + if (pbn > aconnector->mst_output_port->full_pbn) return DC_FAIL_BANDWIDTH_VALIDATE; #if defined(CONFIG_DRM_AMD_DC_DCN) } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 3c50b3ff795417..28fb1f02591aba 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -67,7 +67,16 @@ static const uint32_t overlay_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, - DRM_FORMAT_RGB565 + DRM_FORMAT_RGB565, + DRM_FORMAT_NV21, + DRM_FORMAT_NV12, + DRM_FORMAT_P010 +}; + +static const uint32_t video_formats[] = { + DRM_FORMAT_NV21, + DRM_FORMAT_NV12, + DRM_FORMAT_P010 }; static const u32 cursor_formats[] = { @@ -1616,3 +1625,14 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, return 0; } +bool is_video_format(uint32_t format) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(video_formats); i++) + if (format == video_formats[i]) + return true; + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 286981a2dd403b..a4bee8528a51b2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -62,4 +62,5 @@ void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value); +bool is_video_format(uint32_t format); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 26291db0a3cf6c..d647f68fd5630e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index b9effadfc4bb7f..94f156d572208a 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -64,9 +64,8 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) -DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ -dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o +DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ +dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index a1a00f432168ef..27af9d3c2b73d6 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -33,7 +33,6 @@ #include "include/gpio_service_interface.h" #include "include/grph_object_ctrl_defs.h" #include "include/bios_parser_interface.h" -#include "include/i2caux_interface.h" #include "include/logger_interface.h" #include "command_table.h" diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 074e70a5c458e3..e381de2429fa63 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -32,7 +32,6 @@ #include "dc_bios_types.h" #include "include/grph_object_ctrl_defs.h" #include "include/bios_parser_interface.h" -#include "include/i2caux_interface.h" #include "include/logger_interface.h" #include "command_table2.h" @@ -1698,14 +1697,15 @@ static enum bp_result bios_parser_enable_disp_power_gating( static enum bp_result bios_parser_enable_lvtma_control( struct dc_bios *dcb, uint8_t uc_pwr_on, - uint8_t panel_instance) + uint8_t panel_instance, + uint8_t bypass_panel_control_wait) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_lvtma_control) return BP_RESULT_FAILURE; - return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance); + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait); } static bool bios_parser_is_accelerated_mode( @@ -2929,7 +2929,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -3032,14 +3031,8 @@ static enum bp_result construct_integrated_info( for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { for (j = i; j > 0; --j) { if (info->disp_clk_voltage[j].max_supported_clk < - info->disp_clk_voltage[j-1].max_supported_clk - ) { - /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; - } + info->disp_clk_voltage[j-1].max_supported_clk) + swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]); } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index f52f7ff7ead4b6..1ef9e4053bb717 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -986,7 +986,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance); + uint8_t panel_instance, + uint8_t bypass_panel_control_wait); static void init_enable_lvtma_control(struct bios_parser *bp) { @@ -998,7 +999,8 @@ static void init_enable_lvtma_control(struct bios_parser *bp) static void enable_lvtma_control_dmcub( struct dc_dmub_srv *dmcub, uint8_t uc_pwr_on, - uint8_t panel_instance) + uint8_t panel_instance, + uint8_t bypass_panel_control_wait) { union dmub_rb_cmd cmd; @@ -1012,6 +1014,8 @@ static void enable_lvtma_control_dmcub( uc_pwr_on; cmd.lvtma_control.data.panel_inst = panel_instance; + cmd.lvtma_control.data.bypass_panel_control_wait = + bypass_panel_control_wait; dc_dmub_srv_cmd_queue(dmcub, &cmd); dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); @@ -1021,7 +1025,8 @@ static void enable_lvtma_control_dmcub( static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance) + uint8_t panel_instance, + uint8_t bypass_panel_control_wait) { enum bp_result result = BP_RESULT_FAILURE; @@ -1029,7 +1034,8 @@ static enum bp_result enable_lvtma_control( bp->base.ctx->dc->debug.dmub_command_table) { enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, uc_pwr_on, - panel_instance); + panel_instance, + bypass_panel_control_wait); return BP_RESULT_OK; } return result; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index be060b4b87db6a..b6d09bf6cf72b6 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -96,7 +96,8 @@ struct cmd_tbl { struct bios_parser *bp, uint8_t id); enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance); + uint8_t panel_instance, + uint8_t bypass_panel_control_wait); }; void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index f276abb63bcd7c..69691daf4dbbd1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -29,6 +29,7 @@ #include "dc_types.h" #include "dccg.h" #include "clk_mgr_internal.h" +#include "link.h" #include "dce100/dce_clk_mgr.h" #include "dce110/dce110_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3ce0ee0d012f34..694a9d3d92aee4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -577,8 +577,7 @@ void dcn3_clk_mgr_construct( void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) { - if (clk_mgr->base.bw_params) - kfree(clk_mgr->base.bw_params); + kfree(clk_mgr->base.bw_params); if (clk_mgr->wm_range_table) dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 1c0569b1dc8f0c..f9e2e0c3095e7c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -47,6 +47,7 @@ #include "dcn30/dcn30_clk_mgr.h" #include "dc_dmub_srv.h" +#include "link.h" #include "logger_types.h" #undef DC_LOGGER diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 20a06c04e4a1d6..89df7244b27286 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -48,7 +48,7 @@ #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" -#include "dc_link_dp.h" +#include "link.h" #include "dcn314_smu.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index f47cfe6b42bd2e..0765334f082598 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq || + msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk) + DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS"); else ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 07edd9777edfb1..a737782b2840c1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -46,7 +46,7 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "dc_link_dp.h" +#include "link.h" #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) @@ -87,6 +87,16 @@ static int dcn315_get_active_display_cnt_wa( return display_count; } +static bool should_disable_otg(struct pipe_ctx *pipe) +{ + bool ret = true; + + if (pipe->stream->link->link_enc && pipe->stream->link->link_enc->funcs->is_dig_enabled && + pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)) + ret = false; + return ret; +} + static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; @@ -98,12 +108,16 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state if (pipe->top_pipe || pipe->prev_odm_pipe) continue; if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { - if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); - reset_sync_context_for_pipe(dc, context, i); - } else - pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + dc_is_virtual_signal(pipe->stream->signal))) { + + /* This w/a should not trigger when we have a dig active */ + if (should_disable_otg(pipe)) { + if (disable) { + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); + } else + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 3edc81e2d417ad..93db4dbee713e5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -39,7 +39,7 @@ #include "dcn316_smu.h" #include "dm_helpers.h" #include "dc_dmub_srv.h" -#include "dc_link_dp.h" +#include "link.h" // DCN316 this is CLK1 instance #define MAX_INSTANCE 7 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 200fcec1918615..61768bf726f8cb 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -33,7 +33,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" -#include "dc_link_dp.h" +#include "link.h" #include "atomfirmware.h" #include "smu13_driver_if.h" @@ -255,6 +255,94 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s } } +static void dcn32_update_clocks_update_dentist( + struct clk_mgr_internal *clk_mgr, + struct dc_state *context, + uint32_t old_dispclk_khz) +{ + uint32_t new_disp_divider = 0; + uint32_t old_disp_divider = 0; + uint32_t new_dispclk_wdivider = 0; + uint32_t old_dispclk_wdivider = 0; + uint32_t i; + + if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0) + return; + + new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz; + + new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider); + old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider); + + /* When changing divider to or from 127, some extra programming is required to prevent corruption */ + if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) { + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + uint32_t fifo_level; + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + int32_t N; + int32_t j; + + if (!pipe_ctx->stream) + continue; + /* Virtual encoders don't have this function */ + if (!stream_enc->funcs->get_fifo_cal_average_level) + continue; + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( + stream_enc); + N = fifo_level / 4; + dccg->funcs->set_fifo_errdet_ovr_en( + dccg, + true); + for (j = 0; j < N - 4; j++) + dccg->funcs->otg_drop_pixel( + dccg, + pipe_ctx->stream_res.tg->inst); + dccg->funcs->set_fifo_errdet_ovr_en( + dccg, + false); + } + } else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) { + /* request clock with 126 divider first */ + uint32_t temp_disp_divider = dentist_get_divider_from_did(126); + uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider; + + if (clk_mgr->smu_present) + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz)); + + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + uint32_t fifo_level; + int32_t N; + int32_t j; + + if (!pipe_ctx->stream) + continue; + /* Virtual encoders don't have this function */ + if (!stream_enc->funcs->get_fifo_cal_average_level) + continue; + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( + stream_enc); + N = fifo_level / 4; + dccg->funcs->set_fifo_errdet_ovr_en(dccg, true); + for (j = 0; j < 12 - N; j++) + dccg->funcs->otg_add_pixel(dccg, + pipe_ctx->stream_res.tg->inst); + dccg->funcs->set_fifo_errdet_ovr_en(dccg, false); + } + } + + /* do requested DISPCLK updates*/ + if (clk_mgr->smu_present) + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz)); +} + static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -273,6 +361,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, bool p_state_change_support; bool fclk_p_state_change_support; int total_plane_count; + int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz; if (dc->work_arounds.skip_clock_update) return; @@ -396,9 +485,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz)); - update_dispclk = true; } @@ -418,13 +504,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, if (dpp_clock_lowered) { /* if clock is being lowered, increase DTO before lowering refclk */ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); - dcn20_update_clocks_update_dentist(clk_mgr, context); + dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz); if (clk_mgr->smu_present) dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); } else { /* if clock is being raised, increase refclk before lowering DTO */ if (update_dppclk || update_dispclk) - dcn20_update_clocks_update_dentist(clk_mgr, context); + dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz); /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures * that we do not lower dto when it is not safe to lower. We do not need to * compare the current and new dppclk before calling this function. @@ -783,8 +869,7 @@ void dcn32_clk_mgr_construct( void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) { - if (clk_mgr->base.bw_params) - kfree(clk_mgr->base.bw_params); + kfree(clk_mgr->base.bw_params); if (clk_mgr->wm_range_table) dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0cb8d1f934d12d..1c218c5266509f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -33,6 +33,7 @@ #include "resource.h" +#include "gpio_service_interface.h" #include "clk_mgr.h" #include "clock_source.h" #include "dc_bios_types.h" @@ -53,11 +54,10 @@ #include "link_enc_cfg.h" #include "dc_link.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dm_helpers.h" #include "mem_input.h" -#include "dc_link_dp.h" #include "dc_dmub_srv.h" #include "dsc.h" @@ -68,8 +68,6 @@ #include "dmub/dmub_srv.h" -#include "i2caux_interface.h" - #include "dce/dmub_psr.h" #include "dce/dmub_hw_lock_mgr.h" @@ -382,16 +380,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) } /** - * dc_stream_adjust_vmin_vmax: + * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR + * @dc: dc reference + * @stream: Initial dc stream state + * @adjust: Updated parameters for vertical_total_min and vertical_total_max * * Looks up the pipe context of dc_stream_state and updates the * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh * Rate, which is a power-saving feature that targets reducing panel * refresh rate while the screen is static * - * @dc: dc reference - * @stream: Initial dc stream state - * @adjust: Updated parameters for vertical_total_min and vertical_total_max + * Return: %true if the pipe context is found and adjusted; + * %false if the pipe context is not found. */ bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct dc_stream_state *stream, @@ -419,14 +419,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, } /** - * dc_stream_get_last_used_drr_vtotal - dc_stream_get_last_vrr_vtotal + * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of + * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) * * @dc: [in] dc reference * @stream: [in] Initial dc stream state - * @adjust: [in] Updated parameters for vertical_total_min and + * @refresh_rate: [in] new refresh_rate * - * Looks up the pipe context of dc_stream_state and gets the last VTOTAL used - * by DRR (Dynamic Refresh Rate) + * Return: %true if the pipe context is found and there is an associated + * timing_generator for the DC; + * %false if the pipe context is not found or there is no + * timing_generator for the DC. */ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, struct dc_stream_state *stream, @@ -518,14 +521,15 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, } bool -dc_stream_forward_crc_window(struct dc *dc, - struct rect *rect, struct dc_stream_state *stream, bool is_stop) +dc_stream_forward_crc_window(struct dc_stream_state *stream, + struct rect *rect, bool is_stop) { struct dmcu *dmcu; struct dc_dmub_srv *dmub_srv; struct otg_phy_mux mux_mapping; struct pipe_ctx *pipe; int i; + struct dc *dc = stream->ctx->dc; for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -566,7 +570,10 @@ dc_stream_forward_crc_window(struct dc *dc, * once. * * By default, only CRC0 is configured, and the entire frame is used to - * calculate the crc. + * calculate the CRC. + * + * Return: %false if the stream is not found or CRC capture is not supported; + * %true if the stream has been configured. */ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, bool enable, bool continuous) @@ -635,7 +642,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, * dc_stream_configure_crc needs to be called beforehand to enable CRCs. * * Return: - * false if stream is not found, or if CRCs are not enabled. + * %false if stream is not found, or if CRCs are not enabled. */ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) @@ -862,6 +869,7 @@ static bool dc_construct_ctx(struct dc *dc, dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { + kfree(dc_ctx); ASSERT_CRITICAL(false); return false; } @@ -1191,7 +1199,7 @@ static void disable_vbios_mode_if_required( pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; if (pix_clk_100hz != requested_pix_clk_100hz) { - core_link_disable_stream(pipe); + link_set_dpms_off(pipe); pipe->stream->dpms_off = false; } } @@ -1299,7 +1307,7 @@ static void detect_edp_presence(struct dc *dc) if (dc->config.edp_not_connected) { edp_link->edp_sink_present = false; } else { - dc_link_detect_sink(edp_link, &type); + dc_link_detect_connection_type(edp_link, &type); edp_link->edp_sink_present = (type != dc_connection_none); } } @@ -1650,7 +1658,7 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; } - if (is_edp_ilr_optimization_required(link, crtc_timing)) { + if (link_is_edp_ilr_optimization_required(link, crtc_timing)) { DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); return false; } @@ -1740,6 +1748,8 @@ void dc_z10_save_init(struct dc *dc) * * Applies given context to the hardware and copy it into current context. * It's up to the user to release the src context afterwards. + * + * Return: an enum dc_status result code for the operation */ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) { @@ -2007,8 +2017,9 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return result == DC_OK; } - if (!streams_changed(dc, context->streams, context->stream_count)) + if (!streams_changed(dc, context->streams, context->stream_count)) { return DC_OK; + } DC_LOG_DC("%s: %d streams\n", __func__, context->stream_count); @@ -2948,6 +2959,9 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vsp_infopacket) stream->vsp_infopacket = *update->vsp_infopacket; + if (update->adaptive_sync_infopacket) + stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; + if (update->dither_option) stream->dither_option = *update->dither_option; @@ -3153,12 +3167,13 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->vsc_infopacket || stream_update->vsp_infopacket || stream_update->hfvsif_infopacket || + stream_update->adaptive_sync_infopacket || stream_update->vtem_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); } if (stream_update->hdr_static_metadata && @@ -3194,14 +3209,14 @@ static void commit_planes_do_stream_update(struct dc *dc, continue; if (stream_update->dsc_config) - dp_update_dsc_config(pipe_ctx); + link_update_dsc_config(pipe_ctx); if (stream_update->mst_bw_update) { if (stream_update->mst_bw_update->is_increase) - dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); - else - dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); - } + link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + else + link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + } if (stream_update->pending_test_pattern) { dc_link_dp_set_test_pattern(stream->link, @@ -3214,7 +3229,7 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->dpms_off) { if (*stream_update->dpms_off) { - core_link_disable_stream(pipe_ctx); + link_set_dpms_off(pipe_ctx); /* for dpms, keep acquired resources*/ if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); @@ -3224,7 +3239,7 @@ static void commit_planes_do_stream_update(struct dc *dc, } else { if (get_seamless_boot_stream_count(context) == 0) dc->hwss.prepare_bandwidth(dc, dc->current_state); - core_link_enable_stream(dc->current_state, pipe_ctx); + link_set_dpms_on(dc->current_state, pipe_ctx); } } @@ -3325,6 +3340,7 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *top_pipe_to_program = NULL; bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); bool subvp_prev_use = false; + bool subvp_curr_use = false; // Once we apply the new subvp context to hardware it won't be in the // dc->current_state anymore, so we have to cache it before we apply @@ -3334,6 +3350,21 @@ static void commit_planes_for_stream(struct dc *dc, dc_z10_restore(dc); + if (update_type == UPDATE_TYPE_FULL) { + /* wait for all double-buffer activity to clear on all pipes */ + int pipe_idx; + + for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) + pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + } + } + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { /* Optimize seamless boot flag keeps clocks and watermarks high until * first flip. After first flip, optimization is required to lower @@ -3381,6 +3412,15 @@ static void commit_planes_for_stream(struct dc *dc, break; } + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + subvp_curr_use = true; + break; + } + } + if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3652,42 +3692,22 @@ static void commit_planes_for_stream(struct dc *dc, top_pipe_to_program->stream_res.tg); } - /* For phantom pipe OTG enable, it has to be done after any previous pipe - * that was in use has already been programmed at gotten its double buffer - * update for "disable". - */ - if (update_type != UPDATE_TYPE_FAST) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - - /* If an active, non-phantom pipe is being transitioned into a phantom - * pipe, wait for the double buffer update to complete first before we do - * ANY phantom pipe programming. - */ - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && - old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { - old_pipe->stream_res.tg->funcs->wait_for_state( - old_pipe->stream_res.tg, - CRTC_STATE_VBLANK); - old_pipe->stream_res.tg->funcs->wait_for_state( - old_pipe->stream_res.tg, - CRTC_STATE_VACTIVE); - } + if (subvp_curr_use) { + /* If enabling subvp or transitioning from subvp->subvp, enable the + * phantom streams before we program front end for the phantom pipes. + */ + if (update_type != UPDATE_TYPE_FAST) { + if (dc->hwss.enable_phantom_streams) + dc->hwss.enable_phantom_streams(dc, context); } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + } - if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || - subvp_prev_use) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; - } - } + if (subvp_prev_use && !subvp_curr_use) { + /* If disabling subvp, disable phantom streams after front end + * programming has completed (we turn on phantom OTG in order + * to complete the plane disable for phantom pipes). + */ + dc->hwss.apply_ctx_to_hw(dc, context); } if (update_type != UPDATE_TYPE_FAST) @@ -4285,7 +4305,7 @@ void dc_resume(struct dc *dc) uint32_t i; for (i = 0; i < dc->link_count; i++) - core_link_resume(dc->links[i]); + link_resume(dc->links[i]); } bool dc_is_dmcu_initialized(struct dc *dc) @@ -4704,7 +4724,7 @@ bool dc_enable_dmub_notifications(struct dc *dc) /** * dc_enable_dmub_outbox - Enables DMUB unsolicited notification * - * dc: [in] dc structure + * @dc: [in] dc structure * * Enables DMUB unsolicited notifications to x86 via outbox. */ @@ -4905,8 +4925,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, /** * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption * - * @dc [in]: dc structure - * @hpd_int_enable [in]: 1 for hpd int enable, 0 to disable + * @dc: [in] dc structure + * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable * * Submits dpia hpd int enable command to dmub via inbox message */ @@ -4987,7 +5007,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo } /** - * dc_extended_blank_supported 0 Decide whether extended blank is supported + * dc_extended_blank_supported - Decide whether extended blank is supported * * @dc: [in] Current DC state * @@ -4996,7 +5016,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo * ability to enter z9/z10. * * Return: - * Indicate whether extended blank is supported (true or false) + * Indicate whether extended blank is supported (%true or %false) */ bool dc_extended_blank_supported(struct dc *dc) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index c88f044666fee6..c26e7258a91cfb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -23,4949 +23,5 @@ * */ -#include - -#include "dm_services.h" -#include "atomfirmware.h" -#include "dm_helpers.h" -#include "dc.h" -#include "grph_object_id.h" -#include "gpio_service_interface.h" -#include "core_status.h" -#include "dc_link_dp.h" -#include "dc_link_dpia.h" -#include "dc_link_ddc.h" -#include "link_hwss.h" -#include "opp.h" - -#include "link_encoder.h" -#include "hw_sequencer.h" -#include "resource.h" -#include "abm.h" -#include "fixed31_32.h" -#include "dpcd_defs.h" -#include "dmcu.h" -#include "hw/clk_mgr.h" -#include "dce/dmub_psr.h" -#include "dmub/dmub_srv.h" -#include "inc/hw/panel_cntl.h" -#include "inc/link_enc_cfg.h" -#include "inc/link_dpcd.h" -#include "link/link_dp_trace.h" - -#include "dc/dcn30/dcn30_vpg.h" - -#define DC_LOGGER_INIT(logger) - -#define LINK_INFO(...) \ - DC_LOG_HW_HOTPLUG( \ - __VA_ARGS__) - -#define RETIMER_REDRIVER_INFO(...) \ - DC_LOG_RETIMER_REDRIVER( \ - __VA_ARGS__) - -/******************************************************************************* - * Private functions - ******************************************************************************/ -static void dc_link_destruct(struct dc_link *link) -{ - int i; - - if (link->hpd_gpio) { - dal_gpio_destroy_irq(&link->hpd_gpio); - link->hpd_gpio = NULL; - } - - if (link->ddc) - dal_ddc_service_destroy(&link->ddc); - - if (link->panel_cntl) - link->panel_cntl->funcs->destroy(&link->panel_cntl); - - if (link->link_enc) { - /* Update link encoder resource tracking variables. These are used for - * the dynamic assignment of link encoders to streams. Virtual links - * are not assigned encoder resources on creation. - */ - if (link->link_id.id != CONNECTOR_ID_VIRTUAL) { - link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL; - link->dc->res_pool->dig_link_enc_count--; - } - link->link_enc->funcs->destroy(&link->link_enc); - } - - if (link->local_sink) - dc_sink_release(link->local_sink); - - for (i = 0; i < link->sink_count; ++i) - dc_sink_release(link->remote_sinks[i]); -} - -struct gpio *get_hpd_gpio(struct dc_bios *dcb, - struct graphics_object_id link_id, - struct gpio_service *gpio_service) -{ - enum bp_result bp_result; - struct graphics_object_hpd_info hpd_info; - struct gpio_pin_info pin_info; - - if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) - return NULL; - - bp_result = dcb->funcs->get_gpio_pin_info(dcb, - hpd_info.hpd_int_gpio_uid, &pin_info); - - if (bp_result != BP_RESULT_OK) { - ASSERT(bp_result == BP_RESULT_NORECORD); - return NULL; - } - - return dal_gpio_service_create_irq(gpio_service, - pin_info.offset, - pin_info.mask); -} - -/* - * Function: program_hpd_filter - * - * @brief - * Programs HPD filter on associated HPD line - * - * @param [in] delay_on_connect_in_ms: Connect filter timeout - * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout - * - * @return - * true on success, false otherwise - */ -static bool program_hpd_filter(const struct dc_link *link) -{ - bool result = false; - struct gpio *hpd; - int delay_on_connect_in_ms = 0; - int delay_on_disconnect_in_ms = 0; - - if (link->is_hpd_filter_disabled) - return false; - /* Verify feature is supported */ - switch (link->connector_signal) { - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - /* Program hpd filter */ - delay_on_connect_in_ms = 500; - delay_on_disconnect_in_ms = 100; - break; - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_DISPLAY_PORT_MST: - /* Program hpd filter to allow DP signal to settle */ - /* 500: not able to detect MST <-> SST switch as HPD is low for - * only 100ms on DELL U2413 - * 0: some passive dongle still show aux mode instead of i2c - * 20-50: not enough to hide bouncing HPD with passive dongle. - * also see intermittent i2c read issues. - */ - delay_on_connect_in_ms = 80; - delay_on_disconnect_in_ms = 0; - break; - case SIGNAL_TYPE_LVDS: - case SIGNAL_TYPE_EDP: - default: - /* Don't program hpd filter */ - return false; - } - - /* Obtain HPD handle */ - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - - if (!hpd) - return result; - - /* Setup HPD filtering */ - if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { - struct gpio_hpd_config config; - - config.delay_on_connect = delay_on_connect_in_ms; - config.delay_on_disconnect = delay_on_disconnect_in_ms; - - dal_irq_setup_hpd_filter(hpd, &config); - - dal_gpio_close(hpd); - - result = true; - } else { - ASSERT_CRITICAL(false); - } - - /* Release HPD handle */ - dal_gpio_destroy_irq(&hpd); - - return result; -} - -bool dc_link_wait_for_t12(struct dc_link *link) -{ - if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { - link->dc->hwss.edp_wait_for_T12(link); - - return true; - } - - return false; -} - -/** - * dc_link_detect_sink() - Determine if there is a sink connected - * - * @link: pointer to the dc link - * @type: Returned connection type - * Does not detect downstream devices, such as MST sinks - * or display connected through active dongles - */ -bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) -{ - uint32_t is_hpd_high = 0; - struct gpio *hpd_pin; - - if (link->connector_signal == SIGNAL_TYPE_LVDS) { - *type = dc_connection_single; - return true; - } - - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /*in case it is not on*/ - if (!link->dc->config.edp_no_power_sequencing) - link->dc->hwss.edp_power_control(link, true); - link->dc->hwss.edp_wait_for_hpd_ready(link, true); - } - - /* Link may not have physical HPD pin. */ - if (link->ep_type != DISPLAY_ENDPOINT_PHY) { - if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link)) - *type = dc_connection_none; - else - *type = dc_connection_single; - - return true; - } - - /* todo: may need to lock gpio access */ - hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - if (!hpd_pin) - goto hpd_gpio_failure; - - dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); - dal_gpio_get_value(hpd_pin, &is_hpd_high); - dal_gpio_close(hpd_pin); - dal_gpio_destroy_irq(&hpd_pin); - - if (is_hpd_high) { - *type = dc_connection_single; - /* TODO: need to do the actual detection */ - } else { - *type = dc_connection_none; - } - - return true; - -hpd_gpio_failure: - return false; -} - -static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) -{ - enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; - - switch (sink_signal) { - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - case SIGNAL_TYPE_LVDS: - case SIGNAL_TYPE_RGB: - transaction_type = DDC_TRANSACTION_TYPE_I2C; - break; - - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_EDP: - transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; - break; - - case SIGNAL_TYPE_DISPLAY_PORT_MST: - /* MST does not use I2COverAux, but there is the - * SPECIAL use case for "immediate dwnstrm device - * access" (EPR#370830). - */ - transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; - break; - - default: - break; - } - - return transaction_type; -} - -static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, - struct graphics_object_id downstream) -{ - if (downstream.type == OBJECT_TYPE_CONNECTOR) { - switch (downstream.id) { - case CONNECTOR_ID_SINGLE_LINK_DVII: - switch (encoder.id) { - case ENCODER_ID_INTERNAL_DAC1: - case ENCODER_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_ID_INTERNAL_DAC2: - case ENCODER_ID_INTERNAL_KLDSCP_DAC2: - return SIGNAL_TYPE_RGB; - default: - return SIGNAL_TYPE_DVI_SINGLE_LINK; - } - break; - case CONNECTOR_ID_DUAL_LINK_DVII: - { - switch (encoder.id) { - case ENCODER_ID_INTERNAL_DAC1: - case ENCODER_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_ID_INTERNAL_DAC2: - case ENCODER_ID_INTERNAL_KLDSCP_DAC2: - return SIGNAL_TYPE_RGB; - default: - return SIGNAL_TYPE_DVI_DUAL_LINK; - } - } - break; - case CONNECTOR_ID_SINGLE_LINK_DVID: - return SIGNAL_TYPE_DVI_SINGLE_LINK; - case CONNECTOR_ID_DUAL_LINK_DVID: - return SIGNAL_TYPE_DVI_DUAL_LINK; - case CONNECTOR_ID_VGA: - return SIGNAL_TYPE_RGB; - case CONNECTOR_ID_HDMI_TYPE_A: - return SIGNAL_TYPE_HDMI_TYPE_A; - case CONNECTOR_ID_LVDS: - return SIGNAL_TYPE_LVDS; - case CONNECTOR_ID_DISPLAY_PORT: - case CONNECTOR_ID_USBC: - return SIGNAL_TYPE_DISPLAY_PORT; - case CONNECTOR_ID_EDP: - return SIGNAL_TYPE_EDP; - default: - return SIGNAL_TYPE_NONE; - } - } else if (downstream.type == OBJECT_TYPE_ENCODER) { - switch (downstream.id) { - case ENCODER_ID_EXTERNAL_NUTMEG: - case ENCODER_ID_EXTERNAL_TRAVIS: - return SIGNAL_TYPE_DISPLAY_PORT; - default: - return SIGNAL_TYPE_NONE; - } - } - - return SIGNAL_TYPE_NONE; -} - -/* - * dc_link_is_dp_sink_present() - Check if there is a native DP - * or passive DP-HDMI dongle connected - */ -bool dc_link_is_dp_sink_present(struct dc_link *link) -{ - enum gpio_result gpio_result; - uint32_t clock_pin = 0; - uint8_t retry = 0; - struct ddc *ddc; - - enum connector_id connector_id = - dal_graphics_object_id_get_connector_id(link->link_id); - - bool present = - ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || - (connector_id == CONNECTOR_ID_EDP) || - (connector_id == CONNECTOR_ID_USBC)); - - ddc = dal_ddc_service_get_ddc_pin(link->ddc); - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return present; - } - - /* Open GPIO and set it to I2C mode */ - /* Note: this GpioMode_Input will be converted - * to GpioConfigType_I2cAuxDualMode in GPIO component, - * which indicates we need additional delay - */ - - if (dal_ddc_open(ddc, GPIO_MODE_INPUT, - GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { - dal_ddc_close(ddc); - - return present; - } - - /* - * Read GPIO: DP sink is present if both clock and data pins are zero - * - * [W/A] plug-unplug DP cable, sometimes customer board has - * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI - * then monitor can't br light up. Add retry 3 times - * But in real passive dongle, it need additional 3ms to detect - */ - do { - gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); - ASSERT(gpio_result == GPIO_RESULT_OK); - if (clock_pin) - udelay(1000); - else - break; - } while (retry++ < 3); - - present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; - - dal_ddc_close(ddc); - - return present; -} - -/* - * @brief - * Detect output sink type - */ -static enum signal_type link_detect_sink(struct dc_link *link, - enum dc_detect_reason reason) -{ - enum signal_type result; - struct graphics_object_id enc_id; - - if (link->is_dig_mapping_flexible) - enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; - else - enc_id = link->link_enc->id; - result = get_basic_signal_type(enc_id, link->link_id); - - /* Use basic signal type for link without physical connector. */ - if (link->ep_type != DISPLAY_ENDPOINT_PHY) - return result; - - /* Internal digital encoder will detect only dongles - * that require digital signal - */ - - /* Detection mechanism is different - * for different native connectors. - * LVDS connector supports only LVDS signal; - * PCIE is a bus slot, the actual connector needs to be detected first; - * eDP connector supports only eDP signal; - * HDMI should check straps for audio - */ - - /* PCIE detects the actual connector on add-on board */ - if (link->link_id.id == CONNECTOR_ID_PCIE) { - /* ZAZTODO implement PCIE add-on card detection */ - } - - switch (link->link_id.id) { - case CONNECTOR_ID_HDMI_TYPE_A: { - /* check audio support: - * if native HDMI is not supported, switch to DVI - */ - struct audio_support *aud_support = - &link->dc->res_pool->audio_support; - - if (!aud_support->hdmi_audio_native) - if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) - result = SIGNAL_TYPE_DVI_SINGLE_LINK; - } - break; - case CONNECTOR_ID_DISPLAY_PORT: - case CONNECTOR_ID_USBC: { - /* DP HPD short pulse. Passive DP dongle will not - * have short pulse - */ - if (reason != DETECT_REASON_HPDRX) { - /* Check whether DP signal detected: if not - - * we assume signal is DVI; it could be corrected - * to HDMI after dongle detection - */ - if (!dm_helpers_is_dp_sink_present(link)) - result = SIGNAL_TYPE_DVI_SINGLE_LINK; - } - } - break; - default: - break; - } - - return result; -} - -static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, - struct audio_support *audio_support) -{ - enum signal_type signal = SIGNAL_TYPE_NONE; - - switch (dongle_type) { - case DISPLAY_DONGLE_DP_HDMI_DONGLE: - if (audio_support->hdmi_audio_on_dongle) - signal = SIGNAL_TYPE_HDMI_TYPE_A; - else - signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - break; - case DISPLAY_DONGLE_DP_DVI_DONGLE: - signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - break; - case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: - if (audio_support->hdmi_audio_native) - signal = SIGNAL_TYPE_HDMI_TYPE_A; - else - signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - break; - default: - signal = SIGNAL_TYPE_NONE; - break; - } - - return signal; -} - -static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, - struct display_sink_capability *sink_cap, - struct audio_support *audio_support) -{ - dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap); - - return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, - audio_support); -} - -static void link_disconnect_sink(struct dc_link *link) -{ - if (link->local_sink) { - dc_sink_release(link->local_sink); - link->local_sink = NULL; - } - - link->dpcd_sink_count = 0; - //link->dpcd_caps.dpcd_rev.raw = 0; -} - -static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) -{ - dc_sink_release(link->local_sink); - link->local_sink = prev_sink; -} - -#if defined(CONFIG_DRM_AMD_DC_HDCP) -bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal) -{ - bool ret = false; - - switch (signal) { - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_DISPLAY_PORT_MST: - ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; - break; - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, - * we can poll for bksv but some displays have an issue with this. Since its so rare - * for a display to not be 1.4 capable, this assumtion is ok - */ - ret = true; - break; - default: - break; - } - return ret; -} - -bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal) -{ - bool ret = false; - - switch (signal) { - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_DISPLAY_PORT_MST: - ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && - link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && - (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; - break; - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; - break; - default: - break; - } - - return ret; -} - -static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) -{ - struct hdcp_protection_message msg22; - struct hdcp_protection_message msg14; - - memset(&msg22, 0, sizeof(struct hdcp_protection_message)); - memset(&msg14, 0, sizeof(struct hdcp_protection_message)); - memset(link->hdcp_caps.rx_caps.raw, 0, - sizeof(link->hdcp_caps.rx_caps.raw)); - - if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - link->ddc->transaction_type == - DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || - link->connector_signal == SIGNAL_TYPE_EDP) { - msg22.data = link->hdcp_caps.rx_caps.raw; - msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); - msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; - } else { - msg22.data = &link->hdcp_caps.rx_caps.fields.version; - msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); - msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; - } - msg22.version = HDCP_VERSION_22; - msg22.link = HDCP_LINK_PRIMARY; - msg22.max_retries = 5; - dc_process_hdcp_msg(signal, link, &msg22); - - if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - msg14.data = &link->hdcp_caps.bcaps.raw; - msg14.length = sizeof(link->hdcp_caps.bcaps.raw); - msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; - msg14.version = HDCP_VERSION_14; - msg14.link = HDCP_LINK_PRIMARY; - msg14.max_retries = 5; - - dc_process_hdcp_msg(signal, link, &msg14); - } - -} -#endif - -static void read_current_link_settings_on_detect(struct dc_link *link) -{ - union lane_count_set lane_count_set = {0}; - uint8_t link_bw_set; - uint8_t link_rate_set; - uint32_t read_dpcd_retry_cnt = 10; - enum dc_status status = DC_ERROR_UNEXPECTED; - int i; - union max_down_spread max_down_spread = {0}; - - // Read DPCD 00101h to find out the number of lanes currently set - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd(link, - DP_LANE_COUNT_SET, - &lane_count_set.raw, - sizeof(lane_count_set)); - /* First DPCD read after VDD ON can fail if the particular board - * does not have HPD pin wired correctly. So if DPCD read fails, - * which it should never happen, retry a few times. Target worst - * case scenario of 80 ms. - */ - if (status == DC_OK) { - link->cur_link_settings.lane_count = - lane_count_set.bits.LANE_COUNT_SET; - break; - } - - msleep(8); - } - - // Read DPCD 00100h to find if standard link rates are set - core_link_read_dpcd(link, DP_LINK_BW_SET, - &link_bw_set, sizeof(link_bw_set)); - - if (link_bw_set == 0) { - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* If standard link rates are not being used, - * Read DPCD 00115h to find the edp link rate set used - */ - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - // edp_supported_link_rates_count = 0 for DP - if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; - link->cur_link_settings.link_rate_set = link_rate_set; - link->cur_link_settings.use_link_rate_set = true; - } - } else { - // Link Rate not found. Seamless boot may not work. - ASSERT(false); - } - } else { - link->cur_link_settings.link_rate = link_bw_set; - link->cur_link_settings.use_link_rate_set = false; - } - // Read DPCD 00003h to find the max down spread. - core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, - &max_down_spread.raw, sizeof(max_down_spread)); - link->cur_link_settings.link_spread = - max_down_spread.bits.MAX_DOWN_SPREAD ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; -} - -static bool detect_dp(struct dc_link *link, - struct display_sink_capability *sink_caps, - enum dc_detect_reason reason) -{ - struct audio_support *audio_support = &link->dc->res_pool->audio_support; - - sink_caps->signal = link_detect_sink(link, reason); - sink_caps->transaction_type = - get_ddc_transaction_type(sink_caps->signal); - - if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { - sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; - if (!detect_dp_sink_caps(link)) - return false; - - if (is_dp_branch_device(link)) - /* DP SST branch */ - link->type = dc_connection_sst_branch; - } else { - /* DP passive dongles */ - sink_caps->signal = dp_passive_dongle_detection(link->ddc, - sink_caps, - audio_support); - link->dpcd_caps.dongle_type = sink_caps->dongle_type; - link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; - link->dpcd_caps.dpcd_rev.raw = 0; - } - - return true; -} - -static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) -{ - if (old_edid->length != new_edid->length) - return false; - - if (new_edid->length == 0) - return false; - - return (memcmp(old_edid->raw_edid, - new_edid->raw_edid, new_edid->length) == 0); -} - -static bool wait_for_entering_dp_alt_mode(struct dc_link *link) -{ - /** - * something is terribly wrong if time out is > 200ms. (5Hz) - * 500 microseconds * 400 tries us 200 ms - **/ - unsigned int sleep_time_in_microseconds = 500; - unsigned int tries_allowed = 400; - bool is_in_alt_mode; - unsigned long long enter_timestamp; - unsigned long long finish_timestamp; - unsigned long long time_taken_in_ns; - int tries_taken; - - DC_LOGGER_INIT(link->ctx->logger); - - if (!link->link_enc->funcs->is_in_alt_mode) - return true; - - is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); - DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode); - - if (is_in_alt_mode) - return true; - - enter_timestamp = dm_get_timestamp(link->ctx); - - for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) { - udelay(sleep_time_in_microseconds); - /* ask the link if alt mode is enabled, if so return ok */ - if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { - finish_timestamp = dm_get_timestamp(link->ctx); - time_taken_in_ns = - dm_get_elapse_time_in_ns(link->ctx, - finish_timestamp, - enter_timestamp); - DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", - div_u64(time_taken_in_ns, 1000000)); - return true; - } - } - finish_timestamp = dm_get_timestamp(link->ctx); - time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, - enter_timestamp); - DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", - div_u64(time_taken_in_ns, 1000000)); - return false; -} - -static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) -{ - /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock - * reports DSC support. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && - link->type == dc_connection_mst_branch && - link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && - link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && - link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && - !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) - link->wa_flags.dpia_mst_dsc_always_on = true; -} - -static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) -{ - /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - link->wa_flags.dpia_mst_dsc_always_on = false; -} - -static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) -{ - DC_LOGGER_INIT(link->ctx->logger); - - LINK_INFO("link=%d, mst branch is now Connected\n", - link->link_index); - - link->type = dc_connection_mst_branch; - apply_dpia_mst_dsc_always_on_wa(link); - - dm_helpers_dp_update_branch_info(link->ctx, link); - if (dm_helpers_dp_mst_start_top_mgr(link->ctx, - link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { - link_disconnect_sink(link); - } else { - link->type = dc_connection_sst_branch; - } - - return link->type == dc_connection_mst_branch; -} - -bool reset_cur_dp_mst_topology(struct dc_link *link) -{ - DC_LOGGER_INIT(link->ctx->logger); - - LINK_INFO("link=%d, mst branch is now Disconnected\n", - link->link_index); - - revert_dpia_mst_dsc_always_on_wa(link); - return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); -} - -static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, - enum dc_detect_reason reason) -{ - int i; - bool can_apply_seamless_boot = false; - - for (i = 0; i < dc->current_state->stream_count; i++) { - if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { - can_apply_seamless_boot = true; - break; - } - } - - return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; -} - -static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) -{ - dc_z10_restore(dc); - clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); -} - -static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) -{ - clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); -} - -static void set_all_streams_dpms_off_for_link(struct dc_link *link) -{ - int i; - struct pipe_ctx *pipe_ctx; - struct dc_stream_update stream_update; - bool dpms_off = true; - struct link_resource link_res = {0}; - - memset(&stream_update, 0, sizeof(stream_update)); - stream_update.dpms_off = &dpms_off; - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { - stream_update.stream = pipe_ctx->stream; - dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, - pipe_ctx->stream, &stream_update, - link->ctx->dc->current_state); - } - } - - /* link can be also enabled by vbios. In this case it is not recorded - * in pipe_ctx. Disable link phy here to make sure it is completely off - */ - dp_disable_link_phy(link, &link_res, link->connector_signal); -} - -static void verify_link_capability_destructive(struct dc_link *link, - struct dc_sink *sink, - enum dc_detect_reason reason) -{ - bool should_prepare_phy_clocks = - should_prepare_phy_clocks_for_link_verification(link->dc, reason); - - if (should_prepare_phy_clocks) - prepare_phy_clocks_for_destructive_link_verification(link->dc); - - if (dc_is_dp_signal(link->local_sink->sink_signal)) { - struct dc_link_settings known_limit_link_setting = - dp_get_max_link_cap(link); - set_all_streams_dpms_off_for_link(link); - dp_verify_link_cap_with_retries( - link, &known_limit_link_setting, - LINK_TRAINING_MAX_VERIFY_RETRY); - } else { - ASSERT(0); - } - - if (should_prepare_phy_clocks) - restore_phy_clocks_for_destructive_link_verification(link->dc); -} - -static void verify_link_capability_non_destructive(struct dc_link *link) -{ - if (dc_is_dp_signal(link->local_sink->sink_signal)) { - if (dc_is_embedded_signal(link->local_sink->sink_signal) || - link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - /* TODO - should we check link encoder's max link caps here? - * How do we know which link encoder to check from? - */ - link->verified_link_cap = link->reported_link_cap; - else - link->verified_link_cap = dp_get_max_link_cap(link); - } -} - -static bool should_verify_link_capability_destructively(struct dc_link *link, - enum dc_detect_reason reason) -{ - bool destrictive = false; - struct dc_link_settings max_link_cap; - bool is_link_enc_unavailable = link->link_enc && - link->dc->res_pool->funcs->link_encs_assign && - !link_enc_cfg_is_link_enc_avail( - link->ctx->dc, - link->link_enc->preferred_engine, - link); - - if (dc_is_dp_signal(link->local_sink->sink_signal)) { - max_link_cap = dp_get_max_link_cap(link); - destrictive = true; - - if (link->dc->debug.skip_detection_link_training || - dc_is_embedded_signal(link->local_sink->sink_signal) || - link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - destrictive = false; - } else if (dp_get_link_encoding_format(&max_link_cap) == - DP_8b_10b_ENCODING) { - if (link->dpcd_caps.is_mst_capable || - is_link_enc_unavailable) { - destrictive = false; - } - } - } - - return destrictive; -} - -static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, - enum dc_detect_reason reason) -{ - if (should_verify_link_capability_destructively(link, reason)) - verify_link_capability_destructive(link, sink, reason); - else - verify_link_capability_non_destructive(link); -} - - -/** - * detect_link_and_local_sink() - Detect if a sink is attached to a given link - * - * link->local_sink is created or destroyed as needed. - * - * This does not create remote sinks. - */ -static bool detect_link_and_local_sink(struct dc_link *link, - enum dc_detect_reason reason) -{ - struct dc_sink_init_data sink_init_data = { 0 }; - struct display_sink_capability sink_caps = { 0 }; - uint32_t i; - bool converter_disable_audio = false; - struct audio_support *aud_support = &link->dc->res_pool->audio_support; - bool same_edid = false; - enum dc_edid_status edid_status; - struct dc_context *dc_ctx = link->ctx; - struct dc *dc = dc_ctx->dc; - struct dc_sink *sink = NULL; - struct dc_sink *prev_sink = NULL; - struct dpcd_caps prev_dpcd_caps; - enum dc_connection_type new_connection_type = dc_connection_none; - const uint32_t post_oui_delay = 30; // 30ms - - DC_LOGGER_INIT(link->ctx->logger); - - if (dc_is_virtual_signal(link->connector_signal)) - return false; - - if (((link->connector_signal == SIGNAL_TYPE_LVDS || - link->connector_signal == SIGNAL_TYPE_EDP) && - (!link->dc->config.allow_edp_hotplug_detection)) && - link->local_sink) { - // need to re-write OUI and brightness in resume case - if (link->connector_signal == SIGNAL_TYPE_EDP && - (link->dpcd_sink_ext_caps.bits.oled == 1)) { - dpcd_set_source_specific_data(link); - msleep(post_oui_delay); - dc_link_set_default_brightness_aux(link); - //TODO: use cached - } - - return true; - } - - if (!dc_link_detect_sink(link, &new_connection_type)) { - BREAK_TO_DEBUGGER(); - return false; - } - - prev_sink = link->local_sink; - if (prev_sink) { - dc_sink_retain(prev_sink); - memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); - } - - link_disconnect_sink(link); - if (new_connection_type != dc_connection_none) { - link->type = new_connection_type; - link->link_state_valid = false; - - /* From Disconnected-to-Connected. */ - switch (link->connector_signal) { - case SIGNAL_TYPE_HDMI_TYPE_A: { - sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; - if (aud_support->hdmi_audio_native) - sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; - else - sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - break; - } - - case SIGNAL_TYPE_DVI_SINGLE_LINK: { - sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; - sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - break; - } - - case SIGNAL_TYPE_DVI_DUAL_LINK: { - sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; - sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; - break; - } - - case SIGNAL_TYPE_LVDS: { - sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; - sink_caps.signal = SIGNAL_TYPE_LVDS; - break; - } - - case SIGNAL_TYPE_EDP: { - read_current_link_settings_on_detect(link); - - detect_edp_sink_caps(link); - read_current_link_settings_on_detect(link); - - /* Disable power sequence on MIPI panel + converter - */ - if (dc->config.enable_mipi_converter_optimization && - dc_ctx->dce_version == DCN_VERSION_3_01 && - link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 && - memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580, - sizeof(link->dpcd_caps.branch_dev_name)) == 0) { - dc->config.edp_no_power_sequencing = true; - - if (!link->dpcd_caps.set_power_state_capable_edp) - link->wa_flags.dp_keep_receiver_powered = true; - } - - sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; - sink_caps.signal = SIGNAL_TYPE_EDP; - break; - } - - case SIGNAL_TYPE_DISPLAY_PORT: { - /* wa HPD high coming too early*/ - if (link->ep_type == DISPLAY_ENDPOINT_PHY && - link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { - /* if alt mode times out, return false */ - if (!wait_for_entering_dp_alt_mode(link)) - return false; - } - - if (!detect_dp(link, &sink_caps, reason)) { - if (prev_sink) - dc_sink_release(prev_sink); - return false; - } - - /* Active SST downstream branch device unplug*/ - if (link->type == dc_connection_sst_branch && - link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { - if (prev_sink) - /* Downstream unplug */ - dc_sink_release(prev_sink); - return true; - } - - /* disable audio for non DP to HDMI active sst converter */ - if (link->type == dc_connection_sst_branch && - is_dp_active_dongle(link) && - (link->dpcd_caps.dongle_type != - DISPLAY_DONGLE_DP_HDMI_CONVERTER)) - converter_disable_audio = true; - break; - } - - default: - DC_ERROR("Invalid connector type! signal:%d\n", - link->connector_signal); - if (prev_sink) - dc_sink_release(prev_sink); - return false; - } /* switch() */ - - if (link->dpcd_caps.sink_count.bits.SINK_COUNT) - link->dpcd_sink_count = - link->dpcd_caps.sink_count.bits.SINK_COUNT; - else - link->dpcd_sink_count = 1; - - dal_ddc_service_set_transaction_type(link->ddc, - sink_caps.transaction_type); - - link->aux_mode = - dal_ddc_service_is_in_aux_transaction_mode(link->ddc); - - sink_init_data.link = link; - sink_init_data.sink_signal = sink_caps.signal; - - sink = dc_sink_create(&sink_init_data); - if (!sink) { - DC_ERROR("Failed to create sink!\n"); - if (prev_sink) - dc_sink_release(prev_sink); - return false; - } - - sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; - sink->converter_disable_audio = converter_disable_audio; - - /* dc_sink_create returns a new reference */ - link->local_sink = sink; - - edid_status = dm_helpers_read_local_edid(link->ctx, - link, sink); - - switch (edid_status) { - case EDID_BAD_CHECKSUM: - DC_LOG_ERROR("EDID checksum invalid.\n"); - break; - case EDID_PARTIAL_VALID: - DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); - break; - case EDID_NO_RESPONSE: - DC_LOG_ERROR("No EDID read.\n"); - /* - * Abort detection for non-DP connectors if we have - * no EDID - * - * DP needs to report as connected if HDP is high - * even if we have no EDID in order to go to - * fail-safe mode - */ - if (dc_is_hdmi_signal(link->connector_signal) || - dc_is_dvi_signal(link->connector_signal)) { - if (prev_sink) - dc_sink_release(prev_sink); - - return false; - } - - if (link->type == dc_connection_sst_branch && - link->dpcd_caps.dongle_type == - DISPLAY_DONGLE_DP_VGA_CONVERTER && - reason == DETECT_REASON_HPDRX) { - /* Abort detection for DP-VGA adapters when EDID - * can't be read and detection reason is VGA-side - * hotplug - */ - if (prev_sink) - dc_sink_release(prev_sink); - link_disconnect_sink(link); - - return true; - } - - break; - default: - break; - } - - // Check if edid is the same - if ((prev_sink) && - (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) - same_edid = is_same_edid(&prev_sink->dc_edid, - &sink->dc_edid); - - if (sink->edid_caps.panel_patch.skip_scdc_overwrite) - link->ctx->dc->debug.hdmi20_disable = true; - - if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == - DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { - /* - * TODO debug why Dell 2413 doesn't like - * two link trainings - */ -#if defined(CONFIG_DRM_AMD_DC_HDCP) - query_hdcp_capability(sink->sink_signal, link); -#endif - } else { - // If edid is the same, then discard new sink and revert back to original sink - if (same_edid) { - link_disconnect_remap(prev_sink, link); - sink = prev_sink; - prev_sink = NULL; - } -#if defined(CONFIG_DRM_AMD_DC_HDCP) - query_hdcp_capability(sink->sink_signal, link); -#endif - } - - /* HDMI-DVI Dongle */ - if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && - !sink->edid_caps.edid_hdmi) - sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - - if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) - dp_trace_init(link); - - /* Connectivity log: detection */ - for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { - CONN_DATA_DETECT(link, - &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], - DC_EDID_BLOCK_SIZE, - "%s: [Block %d] ", sink->edid_caps.display_name, i); - } - - DC_LOG_DETECTION_EDID_PARSER("%s: " - "manufacturer_id = %X, " - "product_id = %X, " - "serial_number = %X, " - "manufacture_week = %d, " - "manufacture_year = %d, " - "display_name = %s, " - "speaker_flag = %d, " - "audio_mode_count = %d\n", - __func__, - sink->edid_caps.manufacturer_id, - sink->edid_caps.product_id, - sink->edid_caps.serial_number, - sink->edid_caps.manufacture_week, - sink->edid_caps.manufacture_year, - sink->edid_caps.display_name, - sink->edid_caps.speaker_flags, - sink->edid_caps.audio_mode_count); - - for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { - DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, " - "format_code = %d, " - "channel_count = %d, " - "sample_rate = %d, " - "sample_size = %d\n", - __func__, - i, - sink->edid_caps.audio_modes[i].format_code, - sink->edid_caps.audio_modes[i].channel_count, - sink->edid_caps.audio_modes[i].sample_rate, - sink->edid_caps.audio_modes[i].sample_size); - } - - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* Init dc_panel_config by HW config */ - if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults) - dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config); - /* Pickup base DM settings */ - dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); - // Override dc_panel_config if system has specific settings - dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); - } - - } else { - /* From Connected-to-Disconnected. */ - link->type = dc_connection_none; - sink_caps.signal = SIGNAL_TYPE_NONE; - /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk - * is not cleared. If we emulate a DP signal on this connection, it thinks - * the dongle is still there and limits the number of modes we can emulate. - * Clear dongle_max_pix_clk on disconnect to fix this - */ - link->dongle_max_pix_clk = 0; - - dc_link_clear_dprx_states(link); - dp_trace_reset(link); - } - - LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", - link->link_index, sink, - (sink_caps.signal == - SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), - prev_sink, same_edid); - - if (prev_sink) - dc_sink_release(prev_sink); - - return true; -} - -bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) -{ - bool is_local_sink_detect_success; - bool is_delegated_to_mst_top_mgr = false; - enum dc_connection_type pre_link_type = link->type; - - is_local_sink_detect_success = detect_link_and_local_sink(link, reason); - - if (is_local_sink_detect_success && link->local_sink) - verify_link_capability(link, link->local_sink, reason); - - if (is_local_sink_detect_success && link->local_sink && - dc_is_dp_signal(link->local_sink->sink_signal) && - link->dpcd_caps.is_mst_capable) - is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); - - if (is_local_sink_detect_success && - pre_link_type == dc_connection_mst_branch && - link->type != dc_connection_mst_branch) - is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link); - - return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; -} - -bool dc_link_get_hpd_state(struct dc_link *dc_link) -{ - uint32_t state; - - dal_gpio_lock_pin(dc_link->hpd_gpio); - dal_gpio_get_value(dc_link->hpd_gpio, &state); - dal_gpio_unlock_pin(dc_link->hpd_gpio); - - return state; -} - -static enum hpd_source_id get_hpd_line(struct dc_link *link) -{ - struct gpio *hpd; - enum hpd_source_id hpd_id; - - hpd_id = HPD_SOURCEID_UNKNOWN; - - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - - if (hpd) { - switch (dal_irq_get_source(hpd)) { - case DC_IRQ_SOURCE_HPD1: - hpd_id = HPD_SOURCEID1; - break; - case DC_IRQ_SOURCE_HPD2: - hpd_id = HPD_SOURCEID2; - break; - case DC_IRQ_SOURCE_HPD3: - hpd_id = HPD_SOURCEID3; - break; - case DC_IRQ_SOURCE_HPD4: - hpd_id = HPD_SOURCEID4; - break; - case DC_IRQ_SOURCE_HPD5: - hpd_id = HPD_SOURCEID5; - break; - case DC_IRQ_SOURCE_HPD6: - hpd_id = HPD_SOURCEID6; - break; - default: - BREAK_TO_DEBUGGER(); - break; - } - - dal_gpio_destroy_irq(&hpd); - } - - return hpd_id; -} - -static enum channel_id get_ddc_line(struct dc_link *link) -{ - struct ddc *ddc; - enum channel_id channel; - - channel = CHANNEL_ID_UNKNOWN; - - ddc = dal_ddc_service_get_ddc_pin(link->ddc); - - if (ddc) { - switch (dal_ddc_get_line(ddc)) { - case GPIO_DDC_LINE_DDC1: - channel = CHANNEL_ID_DDC1; - break; - case GPIO_DDC_LINE_DDC2: - channel = CHANNEL_ID_DDC2; - break; - case GPIO_DDC_LINE_DDC3: - channel = CHANNEL_ID_DDC3; - break; - case GPIO_DDC_LINE_DDC4: - channel = CHANNEL_ID_DDC4; - break; - case GPIO_DDC_LINE_DDC5: - channel = CHANNEL_ID_DDC5; - break; - case GPIO_DDC_LINE_DDC6: - channel = CHANNEL_ID_DDC6; - break; - case GPIO_DDC_LINE_DDC_VGA: - channel = CHANNEL_ID_DDC_VGA; - break; - case GPIO_DDC_LINE_I2C_PAD: - channel = CHANNEL_ID_I2C_PAD; - break; - default: - BREAK_TO_DEBUGGER(); - break; - } - } - - return channel; -} - -static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder) -{ - switch (encoder.id) { - case ENCODER_ID_INTERNAL_UNIPHY: - switch (encoder.enum_id) { - case ENUM_ID_1: - return TRANSMITTER_UNIPHY_A; - case ENUM_ID_2: - return TRANSMITTER_UNIPHY_B; - default: - return TRANSMITTER_UNKNOWN; - } - break; - case ENCODER_ID_INTERNAL_UNIPHY1: - switch (encoder.enum_id) { - case ENUM_ID_1: - return TRANSMITTER_UNIPHY_C; - case ENUM_ID_2: - return TRANSMITTER_UNIPHY_D; - default: - return TRANSMITTER_UNKNOWN; - } - break; - case ENCODER_ID_INTERNAL_UNIPHY2: - switch (encoder.enum_id) { - case ENUM_ID_1: - return TRANSMITTER_UNIPHY_E; - case ENUM_ID_2: - return TRANSMITTER_UNIPHY_F; - default: - return TRANSMITTER_UNKNOWN; - } - break; - case ENCODER_ID_INTERNAL_UNIPHY3: - switch (encoder.enum_id) { - case ENUM_ID_1: - return TRANSMITTER_UNIPHY_G; - default: - return TRANSMITTER_UNKNOWN; - } - break; - case ENCODER_ID_EXTERNAL_NUTMEG: - switch (encoder.enum_id) { - case ENUM_ID_1: - return TRANSMITTER_NUTMEG_CRT; - default: - return TRANSMITTER_UNKNOWN; - } - break; - case ENCODER_ID_EXTERNAL_TRAVIS: - switch (encoder.enum_id) { - case ENUM_ID_1: - return TRANSMITTER_TRAVIS_CRT; - case ENUM_ID_2: - return TRANSMITTER_TRAVIS_LCD; - default: - return TRANSMITTER_UNKNOWN; - } - break; - default: - return TRANSMITTER_UNKNOWN; - } -} - -static bool dc_link_construct_legacy(struct dc_link *link, - const struct link_init_data *init_params) -{ - uint8_t i; - struct ddc_service_init_data ddc_service_init_data = { 0 }; - struct dc_context *dc_ctx = init_params->ctx; - struct encoder_init_data enc_init_data = { 0 }; - struct panel_cntl_init_data panel_cntl_init_data = { 0 }; - struct integrated_info *info; - struct dc_bios *bios = init_params->dc->ctx->dc_bios; - const struct dc_vbios_funcs *bp_funcs = bios->funcs; - struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 }; - - DC_LOGGER_INIT(dc_ctx->logger); - - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - goto create_fail; - - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; - link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; - - link->link_status.dpcd_caps = &link->dpcd_caps; - - link->dc = init_params->dc; - link->ctx = dc_ctx; - link->link_index = init_params->link_index; - - memset(&link->preferred_training_settings, 0, - sizeof(struct dc_link_training_overrides)); - memset(&link->preferred_link_setting, 0, - sizeof(struct dc_link_settings)); - - link->link_id = - bios->funcs->get_connector_id(bios, init_params->connector_index); - - link->ep_type = DISPLAY_ENDPOINT_PHY; - - DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); - - if (bios->funcs->get_disp_connector_caps_info) { - bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); - link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; - DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display); - } - - if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { - dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", - __func__, init_params->connector_index, - link->link_id.type, OBJECT_TYPE_CONNECTOR); - goto create_fail; - } - - if (link->dc->res_pool->funcs->link_init) - link->dc->res_pool->funcs->link_init(link); - - link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - - if (link->hpd_gpio) { - dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); - dal_gpio_unlock_pin(link->hpd_gpio); - link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); - - DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id); - DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en); - } - - switch (link->link_id.id) { - case CONNECTOR_ID_HDMI_TYPE_A: - link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; - - break; - case CONNECTOR_ID_SINGLE_LINK_DVID: - case CONNECTOR_ID_SINGLE_LINK_DVII: - link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - break; - case CONNECTOR_ID_DUAL_LINK_DVID: - case CONNECTOR_ID_DUAL_LINK_DVII: - link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; - break; - case CONNECTOR_ID_DISPLAY_PORT: - case CONNECTOR_ID_USBC: - link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; - - if (link->hpd_gpio) - link->irq_source_hpd_rx = - dal_irq_get_rx_source(link->hpd_gpio); - - break; - case CONNECTOR_ID_EDP: - link->connector_signal = SIGNAL_TYPE_EDP; - - if (link->hpd_gpio) { - if (!link->dc->config.allow_edp_hotplug_detection) - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; - - switch (link->dc->config.allow_edp_hotplug_detection) { - case 1: // only the 1st eDP handles hotplug - if (link->link_index == 0) - link->irq_source_hpd_rx = - dal_irq_get_rx_source(link->hpd_gpio); - else - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; - break; - case 2: // only the 2nd eDP handles hotplug - if (link->link_index == 1) - link->irq_source_hpd_rx = - dal_irq_get_rx_source(link->hpd_gpio); - else - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; - break; - default: - break; - } - } - - break; - case CONNECTOR_ID_LVDS: - link->connector_signal = SIGNAL_TYPE_LVDS; - break; - default: - DC_LOG_WARNING("Unsupported Connector type:%d!\n", - link->link_id.id); - goto create_fail; - } - - /* TODO: #DAL3 Implement id to str function.*/ - LINK_INFO("Connector[%d] description:" - "signal %d\n", - init_params->connector_index, - link->connector_signal); - - ddc_service_init_data.ctx = link->ctx; - ddc_service_init_data.id = link->link_id; - ddc_service_init_data.link = link; - link->ddc = dal_ddc_service_create(&ddc_service_init_data); - - if (!link->ddc) { - DC_ERROR("Failed to create ddc_service!\n"); - goto ddc_create_fail; - } - - if (!link->ddc->ddc_pin) { - DC_ERROR("Failed to get I2C info for connector!\n"); - goto ddc_create_fail; - } - - link->ddc_hw_inst = - dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc)); - - - if (link->dc->res_pool->funcs->panel_cntl_create && - (link->link_id.id == CONNECTOR_ID_EDP || - link->link_id.id == CONNECTOR_ID_LVDS)) { - panel_cntl_init_data.ctx = dc_ctx; - panel_cntl_init_data.inst = - panel_cntl_init_data.ctx->dc_edp_id_count; - link->panel_cntl = - link->dc->res_pool->funcs->panel_cntl_create( - &panel_cntl_init_data); - panel_cntl_init_data.ctx->dc_edp_id_count++; - - if (link->panel_cntl == NULL) { - DC_ERROR("Failed to create link panel_cntl!\n"); - goto panel_cntl_create_fail; - } - } - - enc_init_data.ctx = dc_ctx; - bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, - &enc_init_data.encoder); - enc_init_data.connector = link->link_id; - enc_init_data.channel = get_ddc_line(link); - enc_init_data.hpd_source = get_hpd_line(link); - - link->hpd_src = enc_init_data.hpd_source; - - enc_init_data.transmitter = - translate_encoder_to_transmitter(enc_init_data.encoder); - link->link_enc = - link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); - - if (!link->link_enc) { - DC_ERROR("Failed to create link encoder!\n"); - goto link_enc_create_fail; - } - - DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); - DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); - - /* Update link encoder tracking variables. These are used for the dynamic - * assignment of link encoders to streams. - */ - link->eng_id = link->link_enc->preferred_engine; - link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc; - link->dc->res_pool->dig_link_enc_count++; - - link->link_enc_hw_inst = link->link_enc->transmitter; - - for (i = 0; i < 4; i++) { - if (bp_funcs->get_device_tag(dc_ctx->dc_bios, - link->link_id, i, - &link->device_tag) != BP_RESULT_OK) { - DC_ERROR("Failed to find device tag!\n"); - goto device_tag_fail; - } - - /* Look for device tag that matches connector signal, - * CRT for rgb, LCD for other supported signal tyes - */ - if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, - link->device_tag.dev_id)) - continue; - if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && - link->connector_signal != SIGNAL_TYPE_RGB) - continue; - if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && - link->connector_signal == SIGNAL_TYPE_RGB) - continue; - - DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device); - DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type); - DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id); - break; - } - - if (bios->integrated_info) - memcpy(info, bios->integrated_info, sizeof(*info)); - - /* Look for channel mapping corresponding to connector and device tag */ - for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { - struct external_display_path *path = - &info->ext_disp_conn_info.path[i]; - - if (path->device_connector_id.enum_id == link->link_id.enum_id && - path->device_connector_id.id == link->link_id.id && - path->device_connector_id.type == link->link_id.type) { - if (link->device_tag.acpi_device != 0 && - path->device_acpi_enum == link->device_tag.acpi_device) { - link->ddi_channel_mapping = path->channel_mapping; - link->chip_caps = path->caps; - DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); - DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); - } else if (path->device_tag == - link->device_tag.dev_id.raw_device_tag) { - link->ddi_channel_mapping = path->channel_mapping; - link->chip_caps = path->caps; - DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); - DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); - } - - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { - link->bios_forced_drive_settings.VOLTAGE_SWING = - (info->ext_disp_conn_info.fixdpvoltageswing & 0x3); - link->bios_forced_drive_settings.PRE_EMPHASIS = - ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); - } - - break; - } - } - - if (bios->funcs->get_atom_dc_golden_table) - bios->funcs->get_atom_dc_golden_table(bios); - - /* - * TODO check if GPIO programmed correctly - * - * If GPIO isn't programmed correctly HPD might not rise or drain - * fast enough, leading to bounces. - */ - program_hpd_filter(link); - - link->psr_settings.psr_vtotal_control_support = false; - link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - - DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); - kfree(info); - return true; -device_tag_fail: - link->link_enc->funcs->destroy(&link->link_enc); -link_enc_create_fail: - if (link->panel_cntl != NULL) - link->panel_cntl->funcs->destroy(&link->panel_cntl); -panel_cntl_create_fail: - dal_ddc_service_destroy(&link->ddc); -ddc_create_fail: -create_fail: - - if (link->hpd_gpio) { - dal_gpio_destroy_irq(&link->hpd_gpio); - link->hpd_gpio = NULL; - } - - DC_LOG_DC("BIOS object table - %s failed.\n", __func__); - kfree(info); - - return false; -} - -static bool dc_link_construct_dpia(struct dc_link *link, - const struct link_init_data *init_params) -{ - struct ddc_service_init_data ddc_service_init_data = { 0 }; - struct dc_context *dc_ctx = init_params->ctx; - - DC_LOGGER_INIT(dc_ctx->logger); - - /* Initialized irq source for hpd and hpd rx */ - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; - link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; - link->link_status.dpcd_caps = &link->dpcd_caps; - - link->dc = init_params->dc; - link->ctx = dc_ctx; - link->link_index = init_params->link_index; - - memset(&link->preferred_training_settings, 0, - sizeof(struct dc_link_training_overrides)); - memset(&link->preferred_link_setting, 0, - sizeof(struct dc_link_settings)); - - /* Dummy Init for linkid */ - link->link_id.type = OBJECT_TYPE_CONNECTOR; - link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; - link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; - link->is_internal_display = false; - link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; - LINK_INFO("Connector[%d] description:signal %d\n", - init_params->connector_index, - link->connector_signal); - - link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; - link->is_dig_mapping_flexible = true; - - /* TODO: Initialize link : funcs->link_init */ - - ddc_service_init_data.ctx = link->ctx; - ddc_service_init_data.id = link->link_id; - ddc_service_init_data.link = link; - /* Set indicator for dpia link so that ddc won't be created */ - ddc_service_init_data.is_dpia_link = true; - - link->ddc = dal_ddc_service_create(&ddc_service_init_data); - if (!link->ddc) { - DC_ERROR("Failed to create ddc_service!\n"); - goto ddc_create_fail; - } - - /* Set dpia port index : 0 to number of dpia ports */ - link->ddc_hw_inst = init_params->connector_index; - - /* TODO: Create link encoder */ - - link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - - /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ - link->wa_flags.dp_mot_reset_segment = true; - - return true; - -ddc_create_fail: - return false; -} - -static bool dc_link_construct(struct dc_link *link, - const struct link_init_data *init_params) -{ - /* Handle dpia case */ - if (init_params->is_dpia_link) - return dc_link_construct_dpia(link, init_params); - else - return dc_link_construct_legacy(link, init_params); -} -/******************************************************************************* - * Public functions - ******************************************************************************/ -struct dc_link *link_create(const struct link_init_data *init_params) -{ - struct dc_link *link = - kzalloc(sizeof(*link), GFP_KERNEL); - - if (NULL == link) - goto alloc_fail; - - if (false == dc_link_construct(link, init_params)) - goto construct_fail; - - /* - * Must use preferred_link_setting, not reported_link_cap or verified_link_cap, - * since struct preferred_link_setting won't be reset after S3. - */ - link->preferred_link_setting.dpcd_source_device_specific_field_support = true; - - return link; - -construct_fail: - kfree(link); - -alloc_fail: - return NULL; -} - -void link_destroy(struct dc_link **link) -{ - dc_link_destruct(*link); - kfree(*link); - *link = NULL; -} - -static void enable_stream_features(struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - - if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { - struct dc_link *link = stream->link; - union down_spread_ctrl old_downspread; - union down_spread_ctrl new_downspread; - - memset(&old_downspread, 0, sizeof(old_downspread)); - - core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, - &old_downspread.raw, sizeof(old_downspread)); - - new_downspread.raw = old_downspread.raw; - - new_downspread.bits.IGNORE_MSA_TIMING_PARAM = - (stream->ignore_msa_timing_param) ? 1 : 0; - - if (new_downspread.raw != old_downspread.raw) { - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &new_downspread.raw, sizeof(new_downspread)); - } - - } else { - dm_helpers_mst_enable_stream_features(stream); - } -} - -static enum dc_status enable_link_dp(struct dc_state *state, - struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - enum dc_status status; - bool skip_video_pattern; - struct dc_link *link = stream->link; - const struct dc_link_settings *link_settings = - &pipe_ctx->link_config.dp_link_settings; - bool fec_enable; - int i; - bool apply_seamless_boot_optimization = false; - uint32_t bl_oled_enable_delay = 50; // in ms - uint32_t post_oui_delay = 30; // 30ms - /* Reduce link bandwidth between failed link training attempts. */ - bool do_fallback = false; - - // check for seamless boot - for (i = 0; i < state->stream_count; i++) { - if (state->streams[i]->apply_seamless_boot_optimization) { - apply_seamless_boot_optimization = true; - break; - } - } - - /* Train with fallback when enabling DPIA link. Conventional links are - * trained with fallback during sink detection. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - do_fallback = true; - - /* - * Temporary w/a to get DP2.0 link rates to work with SST. - * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. - */ - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING && - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - link->dc->debug.set_mst_en_for_sst) { - dp_enable_mst_on_sink(link, true); - } - - if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { - /*in case it is not on*/ - if (!link->dc->config.edp_no_power_sequencing) - link->dc->hwss.edp_power_control(link, true); - link->dc->hwss.edp_wait_for_hpd_ready(link, true); - } - - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { - /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ - } else { - pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = - link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (state->clk_mgr && !apply_seamless_boot_optimization) - state->clk_mgr->funcs->update_clocks(state->clk_mgr, - state, false); - } - - // during mode switch we do DP_SET_POWER off then on, and OUI is lost - dpcd_set_source_specific_data(link); - if (link->dpcd_sink_ext_caps.raw != 0) { - post_oui_delay += link->panel_config.pps.extra_post_OUI_ms; - msleep(post_oui_delay); - } - - // similarly, mode switch can cause loss of cable ID - dpcd_write_cable_id_to_dprx(link); - - skip_video_pattern = true; - - if (link_settings->link_rate == LINK_RATE_LOW) - skip_video_pattern = false; - - if (perform_link_training_with_retries(link_settings, - skip_video_pattern, - LINK_TRAINING_ATTEMPTS, - pipe_ctx, - pipe_ctx->stream->signal, - do_fallback)) { - status = DC_OK; - } else { - status = DC_FAIL_DP_LINK_TRAINING; - } - - if (link->preferred_training_settings.fec_enable) - fec_enable = *link->preferred_training_settings.fec_enable; - else - fec_enable = true; - - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) - dp_set_fec_enable(link, fec_enable); - - // during mode set we do DP_SET_POWER off then on, aux writes are lost - if (link->dpcd_sink_ext_caps.bits.oled == 1 || - link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || - link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - dc_link_set_default_brightness_aux(link); // TODO: use cached if known - if (link->dpcd_sink_ext_caps.bits.oled == 1) - msleep(bl_oled_enable_delay); - dc_link_backlight_enable_aux(link, true); - } - - return status; -} - -static enum dc_status enable_link_edp( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) -{ - return enable_link_dp(state, pipe_ctx); -} - -static enum dc_status enable_link_dp_mst( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) -{ - struct dc_link *link = pipe_ctx->stream->link; - - /* sink signal type after MST branch is MST. Multiple MST sinks - * share one link. Link DP PHY is enable or training only once. - */ - if (link->link_status.link_active) - return DC_OK; - - /* clear payload table */ - dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); - - /* to make sure the pending down rep can be processed - * before enabling the link - */ - dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); - - /* set the sink to MST mode before enabling the link */ - dp_enable_mst_on_sink(link, true); - - return enable_link_dp(state, pipe_ctx); -} - -void dc_link_blank_all_dp_displays(struct dc *dc) -{ - unsigned int i; - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || - (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) - continue; - - /* DP 2.0 spec requires that we read LTTPR caps first */ - dp_retrieve_lttpr_cap(dc->links[i]); - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) - dc_link_blank_dp_stream(dc->links[i], true); - } - -} - -void dc_link_blank_all_edp_displays(struct dc *dc) -{ - unsigned int i; - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) || - (!dc->links[i]->edp_sink_present)) - continue; - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) - dc_link_blank_dp_stream(dc->links[i], true); - } -} - -void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) -{ - unsigned int j; - struct dc *dc = link->ctx->dc; - enum signal_type signal = link->connector_signal; - - if ((signal == SIGNAL_TYPE_EDP) || - (signal == SIGNAL_TYPE_DISPLAY_PORT)) { - if (link->ep_type == DISPLAY_ENDPOINT_PHY && - link->link_enc->funcs->get_dig_frontend && - link->link_enc->funcs->is_dig_enabled(link->link_enc)) { - unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); - - if (fe != ENGINE_ID_UNKNOWN) - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(link, - dc->res_pool->stream_enc[j]); - break; - } - } - } - - if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) - dp_receiver_power_ctrl(link, false); - } -} - -static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, - enum engine_id eng_id, - struct ext_hdmi_settings *settings) -{ - bool result = false; - int i = 0; - struct integrated_info *integrated_info = - pipe_ctx->stream->ctx->dc_bios->integrated_info; - - if (integrated_info == NULL) - return false; - - /* - * Get retimer settings from sbios for passing SI eye test for DCE11 - * The setting values are varied based on board revision and port id - * Therefore the setting values of each ports is passed by sbios. - */ - - // Check if current bios contains ext Hdmi settings - if (integrated_info->gpu_cap_info & 0x20) { - switch (eng_id) { - case ENGINE_ID_DIGA: - settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr; - settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num; - settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num; - memmove(settings->reg_settings, - integrated_info->dp0_ext_hdmi_reg_settings, - sizeof(integrated_info->dp0_ext_hdmi_reg_settings)); - memmove(settings->reg_settings_6g, - integrated_info->dp0_ext_hdmi_6g_reg_settings, - sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings)); - result = true; - break; - case ENGINE_ID_DIGB: - settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr; - settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num; - settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num; - memmove(settings->reg_settings, - integrated_info->dp1_ext_hdmi_reg_settings, - sizeof(integrated_info->dp1_ext_hdmi_reg_settings)); - memmove(settings->reg_settings_6g, - integrated_info->dp1_ext_hdmi_6g_reg_settings, - sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings)); - result = true; - break; - case ENGINE_ID_DIGC: - settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr; - settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num; - settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num; - memmove(settings->reg_settings, - integrated_info->dp2_ext_hdmi_reg_settings, - sizeof(integrated_info->dp2_ext_hdmi_reg_settings)); - memmove(settings->reg_settings_6g, - integrated_info->dp2_ext_hdmi_6g_reg_settings, - sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings)); - result = true; - break; - case ENGINE_ID_DIGD: - settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr; - settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num; - settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num; - memmove(settings->reg_settings, - integrated_info->dp3_ext_hdmi_reg_settings, - sizeof(integrated_info->dp3_ext_hdmi_reg_settings)); - memmove(settings->reg_settings_6g, - integrated_info->dp3_ext_hdmi_6g_reg_settings, - sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings)); - result = true; - break; - default: - break; - } - - if (result == true) { - // Validate settings from bios integrated info table - if (settings->slv_addr == 0) - return false; - if (settings->reg_num > 9) - return false; - if (settings->reg_num_6g > 3) - return false; - - for (i = 0; i < settings->reg_num; i++) { - if (settings->reg_settings[i].i2c_reg_index > 0x20) - return false; - } - - for (i = 0; i < settings->reg_num_6g; i++) { - if (settings->reg_settings_6g[i].i2c_reg_index > 0x20) - return false; - } - } - } - - return result; -} - -static bool i2c_write(struct pipe_ctx *pipe_ctx, - uint8_t address, uint8_t *buffer, uint32_t length) -{ - struct i2c_command cmd = {0}; - struct i2c_payload payload = {0}; - - memset(&payload, 0, sizeof(payload)); - memset(&cmd, 0, sizeof(cmd)); - - cmd.number_of_payloads = 1; - cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; - cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz; - - payload.address = address; - payload.data = buffer; - payload.length = length; - payload.write = true; - cmd.payloads = &payload; - - if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, - pipe_ctx->stream->link, &cmd)) - return true; - - return false; -} - -static void write_i2c_retimer_setting( - struct pipe_ctx *pipe_ctx, - bool is_vga_mode, - bool is_over_340mhz, - struct ext_hdmi_settings *settings) -{ - uint8_t slave_address = (settings->slv_addr >> 1); - uint8_t buffer[2]; - const uint8_t apply_rx_tx_change = 0x4; - uint8_t offset = 0xA; - uint8_t value = 0; - int i = 0; - bool i2c_success = false; - DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - - memset(&buffer, 0, sizeof(buffer)); - - /* Start Ext-Hdmi programming*/ - - for (i = 0; i < settings->reg_num; i++) { - /* Apply 3G settings */ - if (settings->reg_settings[i].i2c_reg_index <= 0x20) { - - buffer[0] = settings->reg_settings[i].i2c_reg_index; - buffer[1] = settings->reg_settings[i].i2c_reg_val; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - - if (!i2c_success) - goto i2c_write_fail; - - /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A - * needs to be set to 1 on every 0xA-0xC write. - */ - if (settings->reg_settings[i].i2c_reg_index == 0xA || - settings->reg_settings[i].i2c_reg_index == 0xB || - settings->reg_settings[i].i2c_reg_index == 0xC) { - - /* Query current value from offset 0xA */ - if (settings->reg_settings[i].i2c_reg_index == 0xA) - value = settings->reg_settings[i].i2c_reg_val; - else { - i2c_success = - dal_ddc_service_query_ddc_data( - pipe_ctx->stream->link->ddc, - slave_address, &offset, 1, &value, 1); - if (!i2c_success) - goto i2c_write_fail; - } - - buffer[0] = offset; - /* Set APPLY_RX_TX_CHANGE bit to 1 */ - buffer[1] = value | apply_rx_tx_change; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - } - } - } - - /* Apply 3G settings */ - if (is_over_340mhz) { - for (i = 0; i < settings->reg_num_6g; i++) { - /* Apply 3G settings */ - if (settings->reg_settings[i].i2c_reg_index <= 0x20) { - - buffer[0] = settings->reg_settings_6g[i].i2c_reg_index; - buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - - if (!i2c_success) - goto i2c_write_fail; - - /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A - * needs to be set to 1 on every 0xA-0xC write. - */ - if (settings->reg_settings_6g[i].i2c_reg_index == 0xA || - settings->reg_settings_6g[i].i2c_reg_index == 0xB || - settings->reg_settings_6g[i].i2c_reg_index == 0xC) { - - /* Query current value from offset 0xA */ - if (settings->reg_settings_6g[i].i2c_reg_index == 0xA) - value = settings->reg_settings_6g[i].i2c_reg_val; - else { - i2c_success = - dal_ddc_service_query_ddc_data( - pipe_ctx->stream->link->ddc, - slave_address, &offset, 1, &value, 1); - if (!i2c_success) - goto i2c_write_fail; - } - - buffer[0] = offset; - /* Set APPLY_RX_TX_CHANGE bit to 1 */ - buffer[1] = value | apply_rx_tx_change; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - } - } - } - } - - if (is_vga_mode) { - /* Program additional settings if using 640x480 resolution */ - - /* Write offset 0xFF to 0x01 */ - buffer[0] = 0xff; - buffer[1] = 0x01; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x00 to 0x23 */ - buffer[0] = 0x00; - buffer[1] = 0x23; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0xff to 0x00 */ - buffer[0] = 0xff; - buffer[1] = 0x00; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - } - - return; - -i2c_write_fail: - DC_LOG_DEBUG("Set retimer failed"); -} - -static void write_i2c_default_retimer_setting( - struct pipe_ctx *pipe_ctx, - bool is_vga_mode, - bool is_over_340mhz) -{ - uint8_t slave_address = (0xBA >> 1); - uint8_t buffer[2]; - bool i2c_success = false; - DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - - memset(&buffer, 0, sizeof(buffer)); - - /* Program Slave Address for tuning single integrity */ - /* Write offset 0x0A to 0x13 */ - buffer[0] = 0x0A; - buffer[1] = 0x13; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x0A to 0x17 */ - buffer[0] = 0x0A; - buffer[1] = 0x17; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x0B to 0xDA or 0xD8 */ - buffer[0] = 0x0B; - buffer[1] = is_over_340mhz ? 0xDA : 0xD8; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x0A to 0x17 */ - buffer[0] = 0x0A; - buffer[1] = 0x17; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x0C to 0x1D or 0x91 */ - buffer[0] = 0x0C; - buffer[1] = is_over_340mhz ? 0x1D : 0x91; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x0A to 0x17 */ - buffer[0] = 0x0A; - buffer[1] = 0x17; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - - if (is_vga_mode) { - /* Program additional settings if using 640x480 resolution */ - - /* Write offset 0xFF to 0x01 */ - buffer[0] = 0xff; - buffer[1] = 0x01; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0x00 to 0x23 */ - buffer[0] = 0x00; - buffer[1] = 0x23; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ - offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - - /* Write offset 0xff to 0x00 */ - buffer[0] = 0xff; - buffer[1] = 0x00; - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ - offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", - slave_address, buffer[0], buffer[1], i2c_success?1:0); - if (!i2c_success) - goto i2c_write_fail; - } - - return; - -i2c_write_fail: - DC_LOG_DEBUG("Set default retimer failed"); -} - -static void write_i2c_redriver_setting( - struct pipe_ctx *pipe_ctx, - bool is_over_340mhz) -{ - uint8_t slave_address = (0xF0 >> 1); - uint8_t buffer[16]; - bool i2c_success = false; - DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - - memset(&buffer, 0, sizeof(buffer)); - - // Program Slave Address for tuning single integrity - buffer[3] = 0x4E; - buffer[4] = 0x4E; - buffer[5] = 0x4E; - buffer[6] = is_over_340mhz ? 0x4E : 0x4A; - - i2c_success = i2c_write(pipe_ctx, slave_address, - buffer, sizeof(buffer)); - RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ - \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ - offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ - i2c_success = %d\n", - slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); - - if (!i2c_success) - DC_LOG_DEBUG("Set redriver failed"); -} - -static void disable_link(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - /* - * TODO: implement call for dp_set_hw_test_pattern - * it is needed for compliance testing - */ - - /* Here we need to specify that encoder output settings - * need to be calculated as for the set mode, - * it will lead to querying dynamic link capabilities - * which should be done before enable output - */ - - if (dc_is_dp_signal(signal)) { - /* SST DP, eDP */ - struct dc_link_settings link_settings = link->cur_link_settings; - if (dc_is_dp_sst_signal(signal)) - dp_disable_link_phy(link, link_res, signal); - else - dp_disable_link_phy_mst(link, link_res, signal); - - if (dc_is_dp_sst_signal(signal) || - link->mst_stream_alloc_table.stream_count == 0) { - if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { - dp_set_fec_enable(link, false); - dp_set_fec_ready(link, link_res, false); - } - } - } else if (signal != SIGNAL_TYPE_VIRTUAL) { - link->dc->hwss.disable_link_output(link, link_res, signal); - } - - if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count <= 0) - link->link_status.link_active = false; - } else { - link->link_status.link_active = false; - } -} - -static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - enum dc_color_depth display_color_depth; - enum engine_id eng_id; - struct ext_hdmi_settings settings = {0}; - bool is_over_340mhz = false; - bool is_vga_mode = (stream->timing.h_addressable == 640) - && (stream->timing.v_addressable == 480); - struct dc *dc = pipe_ctx->stream->ctx->dc; - - if (stream->phy_pix_clk == 0) - stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; - if (stream->phy_pix_clk > 340000) - is_over_340mhz = true; - - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { - unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & - EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; - if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { - /* DP159, Retimer settings */ - eng_id = pipe_ctx->stream_res.stream_enc->id; - - if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) { - write_i2c_retimer_setting(pipe_ctx, - is_vga_mode, is_over_340mhz, &settings); - } else { - write_i2c_default_retimer_setting(pipe_ctx, - is_vga_mode, is_over_340mhz); - } - } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { - /* PI3EQX1204, Redriver settings */ - write_i2c_redriver_setting(pipe_ctx, is_over_340mhz); - } - } - - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) - dal_ddc_service_write_scdc_data( - stream->link->ddc, - stream->phy_pix_clk, - stream->timing.flags.LTE_340MCSC_SCRAMBLE); - - memset(&stream->link->cur_link_settings, 0, - sizeof(struct dc_link_settings)); - - display_color_depth = stream->timing.display_color_depth; - if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) - display_color_depth = COLOR_DEPTH_888; - - dc->hwss.enable_tmds_link_output( - link, - &pipe_ctx->link_res, - pipe_ctx->stream->signal, - pipe_ctx->clock_source->id, - display_color_depth, - stream->phy_pix_clk); - - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) - dal_ddc_service_read_scdc_data(link->ddc); -} - -static void enable_link_lvds(struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct dc *dc = stream->ctx->dc; - - if (stream->phy_pix_clk == 0) - stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; - - memset(&stream->link->cur_link_settings, 0, - sizeof(struct dc_link_settings)); - dc->hwss.enable_lvds_link_output( - link, - &pipe_ctx->link_res, - pipe_ctx->clock_source->id, - stream->phy_pix_clk); - -} - -bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable) -{ - bool ret = false; - union dpcd_alpm_configuration alpm_config; - - if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { - memset(&alpm_config, 0, sizeof(alpm_config)); - - alpm_config.bits.ENABLE = (enable ? true : false); - ret = dm_helpers_dp_write_dpcd(link->ctx, link, - DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, - sizeof(alpm_config.raw)); - } - return ret; -} - -/****************************enable_link***********************************/ -static enum dc_status enable_link( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) -{ - enum dc_status status = DC_ERROR_UNEXPECTED; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - - /* There's some scenarios where driver is unloaded with display - * still enabled. When driver is reloaded, it may cause a display - * to not light up if there is a mismatch between old and new - * link settings. Need to call disable first before enabling at - * new link settings. - */ - if (link->link_status.link_active) { - disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - } - - switch (pipe_ctx->stream->signal) { - case SIGNAL_TYPE_DISPLAY_PORT: - status = enable_link_dp(state, pipe_ctx); - break; - case SIGNAL_TYPE_EDP: - status = enable_link_edp(state, pipe_ctx); - break; - case SIGNAL_TYPE_DISPLAY_PORT_MST: - status = enable_link_dp_mst(state, pipe_ctx); - msleep(200); - break; - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - enable_link_hdmi(pipe_ctx); - status = DC_OK; - break; - case SIGNAL_TYPE_LVDS: - enable_link_lvds(pipe_ctx); - status = DC_OK; - break; - case SIGNAL_TYPE_VIRTUAL: - status = DC_OK; - break; - default: - break; - } - - if (status == DC_OK) - pipe_ctx->stream->link->link_status.link_active = true; - - return status; -} - -static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing) -{ - - uint32_t pxl_clk = timing->pix_clk_100hz; - - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) - pxl_clk /= 2; - else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) - pxl_clk = pxl_clk * 2 / 3; - - if (timing->display_color_depth == COLOR_DEPTH_101010) - pxl_clk = pxl_clk * 10 / 8; - else if (timing->display_color_depth == COLOR_DEPTH_121212) - pxl_clk = pxl_clk * 12 / 8; - - return pxl_clk; -} - -static bool dp_active_dongle_validate_timing( - const struct dc_crtc_timing *timing, - const struct dpcd_caps *dpcd_caps) -{ - const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; - - switch (dpcd_caps->dongle_type) { - case DISPLAY_DONGLE_DP_VGA_CONVERTER: - case DISPLAY_DONGLE_DP_DVI_CONVERTER: - case DISPLAY_DONGLE_DP_DVI_DONGLE: - if (timing->pixel_encoding == PIXEL_ENCODING_RGB) - return true; - else - return false; - default: - break; - } - - if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && - dongle_caps->extendedCapValid == true) { - /* Check Pixel Encoding */ - switch (timing->pixel_encoding) { - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - break; - case PIXEL_ENCODING_YCBCR422: - if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) - return false; - break; - case PIXEL_ENCODING_YCBCR420: - if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) - return false; - break; - default: - /* Invalid Pixel Encoding*/ - return false; - } - - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - case COLOR_DEPTH_888: - /*888 and 666 should always be supported*/ - break; - case COLOR_DEPTH_101010: - if (dongle_caps->dp_hdmi_max_bpc < 10) - return false; - break; - case COLOR_DEPTH_121212: - if (dongle_caps->dp_hdmi_max_bpc < 12) - return false; - break; - case COLOR_DEPTH_141414: - case COLOR_DEPTH_161616: - default: - /* These color depths are currently not supported */ - return false; - } - - /* Check 3D format */ - switch (timing->timing_3d_format) { - case TIMING_3D_FORMAT_NONE: - case TIMING_3D_FORMAT_FRAME_ALTERNATE: - /*Only frame alternate 3D is supported on active dongle*/ - break; - default: - /*other 3D formats are not supported due to bad infoframe translation */ - return false; - } - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter - struct dc_crtc_timing outputTiming = *timing; - - if (timing->flags.DSC && !timing->dsc_cfg.is_frl) - /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ - outputTiming.flags.DSC = 0; - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) - return false; - } else { // DP to HDMI TMDS converter - if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) - return false; - } -#else - if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) - return false; -#endif - } - - if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && - dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && - dongle_caps->dfp_cap_ext.supported) { - - if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) - return false; - - if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) - return false; - - if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) - return false; - - if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { - if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) - return false; - if (timing->display_color_depth == COLOR_DEPTH_666 && - !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_888 && - !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_101010 && - !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_121212 && - !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_161616 && - !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) - return false; - } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { - if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) - return false; - if (timing->display_color_depth == COLOR_DEPTH_888 && - !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_101010 && - !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_121212 && - !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_161616 && - !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) - return false; - } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { - if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) - return false; - if (timing->display_color_depth == COLOR_DEPTH_888 && - !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_101010 && - !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_121212 && - !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_161616 && - !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) - return false; - } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { - if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) - return false; - if (timing->display_color_depth == COLOR_DEPTH_888 && - !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_101010 && - !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_121212 && - !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) - return false; - else if (timing->display_color_depth == COLOR_DEPTH_161616 && - !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) - return false; - } - } - - return true; -} - -enum dc_status dc_link_validate_mode_timing( - const struct dc_stream_state *stream, - struct dc_link *link, - const struct dc_crtc_timing *timing) -{ - uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; - struct dpcd_caps *dpcd_caps = &link->dpcd_caps; - - /* A hack to avoid failing any modes for EDID override feature on - * topology change such as lower quality cable for DP or different dongle - */ - if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) - return DC_OK; - - /* Passive Dongle */ - if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk) - return DC_EXCEED_DONGLE_CAP; - - /* Active Dongle*/ - if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) - return DC_EXCEED_DONGLE_CAP; - - switch (stream->signal) { - case SIGNAL_TYPE_EDP: - case SIGNAL_TYPE_DISPLAY_PORT: - if (!dp_validate_mode_timing( - link, - timing)) - return DC_NO_DP_LINK_BANDWIDTH; - break; - - default: - break; - } - - return DC_OK; -} - -static struct abm *get_abm_from_stream_res(const struct dc_link *link) -{ - int i; - struct dc *dc = NULL; - struct abm *abm = NULL; - - if (!link || !link->ctx) - return NULL; - - dc = link->ctx->dc; - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; - struct dc_stream_state *stream = pipe_ctx.stream; - - if (stream && stream->link == link) { - abm = pipe_ctx.stream_res.abm; - break; - } - } - return abm; -} - -int dc_link_get_backlight_level(const struct dc_link *link) -{ - struct abm *abm = get_abm_from_stream_res(link); - struct panel_cntl *panel_cntl = link->panel_cntl; - struct dc *dc = link->ctx->dc; - struct dmcu *dmcu = dc->res_pool->dmcu; - bool fw_set_brightness = true; - - if (dmcu) - fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); - - if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) - return panel_cntl->funcs->get_current_backlight(panel_cntl); - else if (abm != NULL && abm->funcs->get_current_backlight != NULL) - return (int) abm->funcs->get_current_backlight(abm); - else - return DC_ERROR_UNEXPECTED; -} - -int dc_link_get_target_backlight_pwm(const struct dc_link *link) -{ - struct abm *abm = get_abm_from_stream_res(link); - - if (abm == NULL || abm->funcs->get_target_backlight == NULL) - return DC_ERROR_UNEXPECTED; - - return (int) abm->funcs->get_target_backlight(abm); -} - -static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) -{ - int i; - struct dc *dc = link->ctx->dc; - struct pipe_ctx *pipe_ctx = NULL; - - for (i = 0; i < MAX_PIPES; i++) { - if (dc->current_state->res_ctx.pipe_ctx[i].stream) { - if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { - pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - break; - } - } - } - - return pipe_ctx; -} - -bool dc_link_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) -{ - struct dc *dc = link->ctx->dc; - - DC_LOGGER_INIT(link->ctx->logger); - DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", - backlight_pwm_u16_16, backlight_pwm_u16_16); - - if (dc_is_embedded_signal(link->connector_signal)) { - struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); - - if (pipe_ctx) { - /* Disable brightness ramping when the display is blanked - * as it can hang the DMCU - */ - if (pipe_ctx->plane_state == NULL) - frame_ramp = 0; - } else { - return false; - } - - dc->hwss.set_backlight_level( - pipe_ctx, - backlight_pwm_u16_16, - frame_ramp); - } - return true; -} - -bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, - bool wait, bool force_static, const unsigned int *power_opts) -{ - struct dc *dc = link->ctx->dc; - struct dmcu *dmcu = dc->res_pool->dmcu; - struct dmub_psr *psr = dc->res_pool->psr; - unsigned int panel_inst; - - if (psr == NULL && force_static) - return false; - - if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) - return false; - - if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { - // Don't enter PSR if panel is not connected - return false; - } - - /* Set power optimization flag */ - if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { - link->psr_settings.psr_power_opt = *power_opts; - - if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) - psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); - } - - if (psr != NULL && link->psr_settings.psr_feature_enabled && - force_static && psr->funcs->psr_force_static) - psr->funcs->psr_force_static(psr, panel_inst); - - /* Enable or Disable PSR */ - if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { - link->psr_settings.psr_allow_active = *allow_active; - - if (!link->psr_settings.psr_allow_active) - dc_z10_restore(dc); - - if (psr != NULL && link->psr_settings.psr_feature_enabled) { - psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); - } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && - link->psr_settings.psr_feature_enabled) - dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); - else - return false; - } - - return true; -} - -bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) -{ - struct dc *dc = link->ctx->dc; - struct dmcu *dmcu = dc->res_pool->dmcu; - struct dmub_psr *psr = dc->res_pool->psr; - unsigned int panel_inst; - - if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) - return false; - - if (psr != NULL && link->psr_settings.psr_feature_enabled) - psr->funcs->psr_get_state(psr, state, panel_inst); - else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) - dmcu->funcs->get_psr_state(dmcu, state); - - return true; -} - -static inline enum physical_phy_id -transmitter_to_phy_id(enum transmitter transmitter_value) -{ - switch (transmitter_value) { - case TRANSMITTER_UNIPHY_A: - return PHYLD_0; - case TRANSMITTER_UNIPHY_B: - return PHYLD_1; - case TRANSMITTER_UNIPHY_C: - return PHYLD_2; - case TRANSMITTER_UNIPHY_D: - return PHYLD_3; - case TRANSMITTER_UNIPHY_E: - return PHYLD_4; - case TRANSMITTER_UNIPHY_F: - return PHYLD_5; - case TRANSMITTER_NUTMEG_CRT: - return PHYLD_6; - case TRANSMITTER_TRAVIS_CRT: - return PHYLD_7; - case TRANSMITTER_TRAVIS_LCD: - return PHYLD_8; - case TRANSMITTER_UNIPHY_G: - return PHYLD_9; - case TRANSMITTER_COUNT: - return PHYLD_COUNT; - case TRANSMITTER_UNKNOWN: - return PHYLD_UNKNOWN; - default: - WARN_ONCE(1, "Unknown transmitter value %d\n", - transmitter_value); - return PHYLD_UNKNOWN; - } -} - -bool dc_link_setup_psr(struct dc_link *link, - const struct dc_stream_state *stream, struct psr_config *psr_config, - struct psr_context *psr_context) -{ - struct dc *dc; - struct dmcu *dmcu; - struct dmub_psr *psr; - int i; - unsigned int panel_inst; - /* updateSinkPsrDpcdConfig*/ - union dpcd_psr_configuration psr_configuration; - union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; - - psr_context->controllerId = CONTROLLER_ID_UNDEFINED; - - if (!link) - return false; - - dc = link->ctx->dc; - dmcu = dc->res_pool->dmcu; - psr = dc->res_pool->psr; - - if (!dmcu && !psr) - return false; - - if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) - return false; - - - memset(&psr_configuration, 0, sizeof(psr_configuration)); - - psr_configuration.bits.ENABLE = 1; - psr_configuration.bits.CRC_VERIFICATION = 1; - psr_configuration.bits.FRAME_CAPTURE_INDICATION = - psr_config->psr_frame_capture_indication_req; - - /* Check for PSR v2*/ - if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { - /* For PSR v2 selective update. - * Indicates whether sink should start capturing - * immediately following active scan line, - * or starting with the 2nd active scan line. - */ - psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; - /*For PSR v2, determines whether Sink should generate - * IRQ_HPD when CRC mismatch is detected. - */ - psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; - /* For PSR v2, set the bit when the Source device will - * be enabling PSR2 operation. - */ - psr_configuration.bits.ENABLE_PSR2 = 1; - /* For PSR v2, the Sink device must be able to receive - * SU region updates early in the frame time. - */ - psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1; - } - - dm_helpers_dp_write_dpcd( - link->ctx, - link, - 368, - &psr_configuration.raw, - sizeof(psr_configuration.raw)); - - if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { - dc_power_alpm_dpcd_enable(link, true); - psr_context->su_granularity_required = - psr_config->su_granularity_required; - psr_context->su_y_granularity = - psr_config->su_y_granularity; - psr_context->line_time_in_us = - psr_config->line_time_in_us; - - if (link->psr_settings.psr_vtotal_control_support) { - psr_context->rate_control_caps = psr_config->rate_control_caps; - vtotal_control.bits.ENABLE = true; - core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, - &vtotal_control.raw, sizeof(vtotal_control.raw)); - } - } - - psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; - psr_context->transmitterId = link->link_enc->transmitter; - psr_context->engineId = link->link_enc->preferred_engine; - - for (i = 0; i < MAX_PIPES; i++) { - if (dc->current_state->res_ctx.pipe_ctx[i].stream - == stream) { - /* dmcu -1 for all controller id values, - * therefore +1 here - */ - psr_context->controllerId = - dc->current_state->res_ctx. - pipe_ctx[i].stream_res.tg->inst + 1; - break; - } - } - - /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ - psr_context->phyType = PHY_TYPE_UNIPHY; - /*PhyId is associated with the transmitter id*/ - psr_context->smuPhyId = - transmitter_to_phy_id(link->link_enc->transmitter); - - psr_context->crtcTimingVerticalTotal = stream->timing.v_total; - psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> - timing.pix_clk_100hz * 100), - stream->timing.v_total), - stream->timing.h_total); - - psr_context->psrSupportedDisplayConfig = true; - psr_context->psrExitLinkTrainingRequired = - psr_config->psr_exit_link_training_required; - psr_context->sdpTransmitLineNumDeadline = - psr_config->psr_sdp_transmit_line_num_deadline; - psr_context->psrFrameCaptureIndicationReq = - psr_config->psr_frame_capture_indication_req; - - psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ - - psr_context->numberOfControllers = - link->dc->res_pool->timing_generator_count; - - psr_context->rfb_update_auto_en = true; - - /* 2 frames before enter PSR. */ - psr_context->timehyst_frames = 2; - /* half a frame - * (units in 100 lines, i.e. a value of 1 represents 100 lines) - */ - psr_context->hyst_lines = stream->timing.v_total / 2 / 100; - psr_context->aux_repeats = 10; - - psr_context->psr_level.u32all = 0; - - /*skip power down the single pipe since it blocks the cstate*/ -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (link->ctx->asic_id.chip_family >= FAMILY_RV) { - switch(link->ctx->asic_id.chip_family) { - case FAMILY_YELLOW_CARP: - case AMDGPU_FAMILY_GC_10_3_6: - case AMDGPU_FAMILY_GC_11_0_1: - if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable) - psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; - break; - default: - psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; - break; - } - } -#else - if (link->ctx->asic_id.chip_family >= FAMILY_RV) - psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; -#endif - - /* SMU will perform additional powerdown sequence. - * For unsupported ASICs, set psr_level flag to skip PSR - * static screen notification to SMU. - * (Always set for DAL2, did not check ASIC) - */ - psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; - psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; - - /* Complete PSR entry before aborting to prevent intermittent - * freezes on certain eDPs - */ - psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; - - /* enable ALPM */ - psr_context->psr_level.bits.DISABLE_ALPM = 0; - psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; - - /* Controls additional delay after remote frame capture before - * continuing power down, default = 0 - */ - psr_context->frame_delay = 0; - - if (psr) { - link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, - link, psr_context, panel_inst); - link->psr_settings.psr_power_opt = 0; - link->psr_settings.psr_allow_active = 0; - } - else - link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); - - /* psr_enabled == 0 indicates setup_psr did not succeed, but this - * should not happen since firmware should be running at this point - */ - if (link->psr_settings.psr_feature_enabled == 0) - ASSERT(0); - - return true; - -} - -void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency) -{ - struct dc *dc = link->ctx->dc; - struct dmub_psr *psr = dc->res_pool->psr; - unsigned int panel_inst; - - if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) - return; - - /* PSR residency measurements only supported on DMCUB */ - if (psr != NULL && link->psr_settings.psr_feature_enabled) - psr->funcs->psr_get_residency(psr, residency, panel_inst); - else - *residency = 0; -} - -bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) -{ - struct dc *dc = link->ctx->dc; - struct dmub_psr *psr = dc->res_pool->psr; - - if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) - return false; - - psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); - - return true; -} - -const struct dc_link_status *dc_link_get_status(const struct dc_link *link) -{ - return &link->link_status; -} - -void core_link_resume(struct dc_link *link) -{ - if (link->connector_signal != SIGNAL_TYPE_VIRTUAL) - program_hpd_filter(link); -} - -static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) -{ - struct fixed31_32 mbytes_per_sec; - uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, - &stream->link->cur_link_settings); - link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ - - mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); - - return dc_fixpt_div_int(mbytes_per_sec, 54); -} - -static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) -{ - struct fixed31_32 peak_kbps; - uint32_t numerator = 0; - uint32_t denominator = 1; - - /* - * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 - * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on - * common multiplier to render an integer PBN for all link rate/lane - * counts combinations - * calculate - * peak_kbps *= (1006/1000) - * peak_kbps *= (64/54) - * peak_kbps *= 8 convert to bytes - */ - - numerator = 64 * PEAK_FACTOR_X1000; - denominator = 54 * 8 * 1000 * 1000; - kbps *= numerator; - peak_kbps = dc_fixpt_from_fraction(kbps, denominator); - - return peak_kbps; -} - -static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) -{ - uint64_t kbps; - - kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); - return get_pbn_from_bw_in_kbps(kbps); -} - -static void update_mst_stream_alloc_table( - struct dc_link *link, - struct stream_encoder *stream_enc, - struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? - const struct dc_dp_mst_stream_allocation_table *proposed_table) -{ - struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; - struct link_mst_stream_allocation *dc_alloc; - - int i; - int j; - - /* if DRM proposed_table has more than one new payload */ - ASSERT(proposed_table->stream_count - - link->mst_stream_alloc_table.stream_count < 2); - - /* copy proposed_table to link, add stream encoder */ - for (i = 0; i < proposed_table->stream_count; i++) { - - for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) { - dc_alloc = - &link->mst_stream_alloc_table.stream_allocations[j]; - - if (dc_alloc->vcp_id == - proposed_table->stream_allocations[i].vcp_id) { - - work_table[i] = *dc_alloc; - work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; - break; /* exit j loop */ - } - } - - /* new vcp_id */ - if (j == link->mst_stream_alloc_table.stream_count) { - work_table[i].vcp_id = - proposed_table->stream_allocations[i].vcp_id; - work_table[i].slot_count = - proposed_table->stream_allocations[i].slot_count; - work_table[i].stream_enc = stream_enc; - work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; - } - } - - /* update link->mst_stream_alloc_table with work_table */ - link->mst_stream_alloc_table.stream_count = - proposed_table->stream_count; - for (i = 0; i < MAX_CONTROLLER_NUM; i++) - link->mst_stream_alloc_table.stream_allocations[i] = - work_table[i]; -} - -static void remove_stream_from_alloc_table( - struct dc_link *link, - struct stream_encoder *dio_stream_enc, - struct hpo_dp_stream_encoder *hpo_dp_stream_enc) -{ - int i = 0; - struct link_mst_stream_allocation_table *table = - &link->mst_stream_alloc_table; - - if (hpo_dp_stream_enc) { - for (; i < table->stream_count; i++) - if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc) - break; - } else { - for (; i < table->stream_count; i++) - if (dio_stream_enc == table->stream_allocations[i].stream_enc) - break; - } - - if (i < table->stream_count) { - i++; - for (; i < table->stream_count; i++) - table->stream_allocations[i-1] = table->stream_allocations[i]; - memset(&table->stream_allocations[table->stream_count-1], 0, - sizeof(struct link_mst_stream_allocation)); - table->stream_count--; - } -} - -static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) -{ - const uint32_t VCP_Y_PRECISION = 1000; - uint64_t vcp_x, vcp_y; - - // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision - avg_time_slots_per_mtp = dc_fixpt_add( - avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION)); - - vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp); - vcp_y = dc_fixpt_floor( - dc_fixpt_mul_int( - dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)), - VCP_Y_PRECISION)); - - if (link->type == dc_connection_mst_branch) - DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " - "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION); - else - DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " - "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION); -} - -/* - * Payload allocation/deallocation for SST introduced in DP2.0 - */ -static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, - bool allocate) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct link_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp; - const struct dc_link_settings empty_link_settings = {0}; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - DC_LOGGER_INIT(link->ctx->logger); - - /* slot X.Y for SST payload deallocate */ - if (!allocate) { - avg_time_slots_per_mtp = dc_fixpt_from_int(0); - - dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, - avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &empty_link_settings, - avg_time_slots_per_mtp); - } - - /* calculate VC payload and update branch with new payload allocation table*/ - if (!dpcd_write_128b_132b_sst_payload_allocation_table( - stream, - link, - &proposed_table, - allocate)) { - DC_LOG_ERROR("SST Update Payload: Failed to update " - "allocation table for " - "pipe idx: %d\n", - pipe_ctx->pipe_idx); - return DC_FAIL_DP_PAYLOAD_ALLOCATION; - } - - proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; - - ASSERT(proposed_table.stream_count == 1); - - //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id - DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p " - "vcp_id: %d " - "slot_count: %d\n", - (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, - proposed_table.stream_allocations[0].vcp_id, - proposed_table.stream_allocations[0].slot_count); - - /* program DP source TX for payload */ - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &proposed_table); - - /* poll for ACT handled */ - if (!dpcd_poll_for_allocation_change_trigger(link)) { - // Failures will result in blackscreen and errors logged - BREAK_TO_DEBUGGER(); - } - - /* slot X.Y for SST payload allocate */ - if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) == - DP_128b_132b_ENCODING) { - avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); - - dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, - avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &link->cur_link_settings, - avg_time_slots_per_mtp); - } - - /* Always return DC_OK. - * If part of sequence fails, log failure(s) and show blackscreen - */ - return DC_OK; -} - -/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table - * because stream_encoder is not exposed to dm - */ -enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp; - struct fixed31_32 pbn; - struct fixed31_32 pbn_per_slot; - int i; - enum act_return_status ret; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - DC_LOGGER_INIT(link->ctx->logger); - - /* enable_link_dp_mst already check link->enabled_stream_count - * and stream is in link->stream[]. This is called during set mode, - * stream_enc is available. - */ - - /* get calculate VC payload for stream: stream_alloc */ - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - true)) - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - else - DC_LOG_WARNING("Failed to update" - "MST allocation table for" - "pipe idx:%d\n", - pipe_ctx->pipe_idx); - - DC_LOG_MST("%s " - "stream_count: %d: \n ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - ASSERT(proposed_table.stream_count > 0); - - /* program DP source TX for payload */ - if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, - &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - /* send down message */ - ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - - if (ret != ACT_LINK_LOST) { - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); - } - - /* slot X.Y for only current stream */ - pbn_per_slot = get_pbn_per_slot(stream); - if (pbn_per_slot.value == 0) { - DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n"); - return DC_UNSUPPORTED_VALUE; - } - pbn = get_pbn_from_timing(pipe_ctx); - avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - - dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &link->cur_link_settings, - avg_time_slots_per_mtp); - - return DC_OK; - -} - -enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct fixed31_32 avg_time_slots_per_mtp; - struct fixed31_32 pbn; - struct fixed31_32 pbn_per_slot; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - uint8_t i; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - DC_LOGGER_INIT(link->ctx->logger); - - /* decrease throttled vcp size */ - pbn_per_slot = get_pbn_per_slot(stream); - pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); - avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &link->cur_link_settings, - avg_time_slots_per_mtp); - - /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); - - /* notify immediate branch device table update */ - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - true)) { - /* update mst stream allocation table software state */ - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - } else { - DC_LOG_WARNING("Failed to update" - "MST allocation table for" - "pipe idx:%d\n", - pipe_ctx->pipe_idx); - } - - DC_LOG_MST("%s " - "stream_count: %d: \n ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - ASSERT(proposed_table.stream_count > 0); - - /* update mst stream allocation table hardware state */ - if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - /* poll for immediate branch device ACT handled */ - dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - - return DC_OK; -} - -enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct fixed31_32 avg_time_slots_per_mtp; - struct fixed31_32 pbn; - struct fixed31_32 pbn_per_slot; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - uint8_t i; - enum act_return_status ret; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - DC_LOGGER_INIT(link->ctx->logger); - - /* notify immediate branch device table update */ - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - true)) { - /* update mst stream allocation table software state */ - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - } - - DC_LOG_MST("%s " - "stream_count: %d: \n ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - ASSERT(proposed_table.stream_count > 0); - - /* update mst stream allocation table hardware state */ - if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - /* poll for immediate branch device ACT handled */ - ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - - if (ret != ACT_LINK_LOST) { - /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); - } - - /* increase throttled vcp size */ - pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); - pbn_per_slot = get_pbn_per_slot(stream); - avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &link->cur_link_settings, - avg_time_slots_per_mtp); - - return DC_OK; -} - -static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); - int i; - bool mst_mode = (link->type == dc_connection_mst_branch); - /* adjust for drm changes*/ - bool update_drm_mst_state = true; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - const struct dc_link_settings empty_link_settings = {0}; - DC_LOGGER_INIT(link->ctx->logger); - - - /* deallocate_mst_payload is called before disable link. When mode or - * disable/enable monitor, new stream is created which is not in link - * stream[] yet. For this, payload is not allocated yet, so de-alloc - * should not done. For new mode set, map_resources will get engine - * for new stream, so stream_enc->id should be validated until here. - */ - - /* slot X.Y */ - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &empty_link_settings, - avg_time_slots_per_mtp); - - if (mst_mode || update_drm_mst_state) { - /* when link is in mst mode, reply on mst manager to remove - * payload - */ - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - false)) - - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - else - DC_LOG_WARNING("Failed to update" - "MST allocation table for" - "pipe idx:%d\n", - pipe_ctx->pipe_idx); - } else { - /* when link is no longer in mst mode (mst hub unplugged), - * remove payload with default dc logic - */ - remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc); - } - - DC_LOG_MST("%s" - "stream_count: %d: ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - /* update mst stream allocation table hardware state */ - if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_DEBUG("Unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - if (mst_mode) { - dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - - if (!update_drm_mst_state) - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - } - - if (update_drm_mst_state) - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - - return DC_OK; -} - - -#if defined(CONFIG_DRM_AMD_DC_HDCP) -static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) -{ - struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; - struct link_encoder *link_enc = NULL; - struct cp_psp_stream_config config = {0}; - enum dp_panel_mode panel_mode = - dp_get_panel_mode(pipe_ctx->stream->link); - - if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) - return; - - link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); - ASSERT(link_enc); - if (link_enc == NULL) - return; - - /* otg instance */ - config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - - /* dig front end */ - config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; - - /* stream encoder index */ - config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - if (is_dp_128b_132b_signal(pipe_ctx)) - config.stream_enc_idx = - pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; - - /* dig back end */ - config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; - - /* link encoder index */ - config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - if (is_dp_128b_132b_signal(pipe_ctx)) - config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; - - /* dio output index is dpia index for DPIA endpoint & dcio index by default */ - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; - else - config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - - - /* phy index */ - config.phy_idx = resource_transmitter_to_phy_idx( - pipe_ctx->stream->link->dc, link_enc->transmitter); - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ - config.phy_idx = 0; - - /* stream properties */ - config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; - config.mst_enabled = (pipe_ctx->stream->signal == - SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; - config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; - config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? - 1 : 0; - config.dpms_off = dpms_off; - - /* dm stream context */ - config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; - - cp_psp->funcs.update_stream_config(cp_psp->handle, &config); -} -#endif - -static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - struct link_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp; - uint8_t req_slot_count = 0; - uint8_t vc_id = 1; /// VC ID always 1 for SST - struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings; - const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); - DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - - stream->link->cur_link_settings = link_settings; - - if (link_hwss->ext.enable_dp_link_output) - link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, - stream->signal, pipe_ctx->clock_source->id, - &link_settings); - -#ifdef DIAGS_BUILD - /* Workaround for FPGA HPO capture DP link data: - * HPO capture will set link to active mode - * This workaround is required to get a capture from start of frame - */ - if (!dc->debug.fpga_hpo_capture_en) { - struct encoder_set_dp_phy_pattern_param params = {0}; - params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE; - - /* Set link active */ - stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern( - stream->link->hpo_dp_link_enc, - ¶ms); - } -#endif - - /* Enable DP_STREAM_ENC */ - dc->hwss.enable_stream(pipe_ctx); - - /* Set DPS PPS SDP (AKA "info frames") */ - if (pipe_ctx->stream->timing.flags.DSC) { - dp_set_dsc_pps_sdp(pipe_ctx, true, true); - } - - /* Allocate Payload */ - if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { - // MST case - uint8_t i; - - proposed_table.stream_count = state->stream_count; - for (i = 0; i < state->stream_count; i++) { - avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); - req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); - proposed_table.stream_allocations[i].slot_count = req_slot_count; - proposed_table.stream_allocations[i].vcp_id = i+1; - /* NOTE: This makes assumption that pipe_ctx index is same as stream index */ - proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; - } - } else { - // SST case - avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link); - req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); - proposed_table.stream_count = 1; /// Always 1 stream for SST - proposed_table.stream_allocations[0].slot_count = req_slot_count; - proposed_table.stream_allocations[0].vcp_id = vc_id; - proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; - } - - link_hwss->ext.update_stream_allocation_table(stream->link, - &pipe_ctx->link_res, - &proposed_table); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - - dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); - dc->hwss.enable_audio_stream(pipe_ctx); -} - -void core_link_enable_stream( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; - enum dc_status status; - struct link_encoder *link_enc; - enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; - struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - - if (is_dp_128b_132b_signal(pipe_ctx)) - vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; - - DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - - if (pipe_ctx->stream->sink) { - if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && - pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { - DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, - pipe_ctx->stream->sink->edid_caps.display_name, - pipe_ctx->stream->signal); - } - } - - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) - return; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - if (!dc_is_virtual_signal(pipe_ctx->stream->signal) - && !is_dp_128b_132b_signal(pipe_ctx)) { - if (link_enc) - link_enc->funcs->setup( - link_enc, - pipe_ctx->stream->signal); - } - - pipe_ctx->stream->link->link_state_valid = true; - - if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { - if (is_dp_128b_132b_signal(pipe_ctx)) - otg_out_dest = OUT_MUX_HPO_DP; - else - otg_out_dest = OUT_MUX_DIO; - pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); - } - - link_hwss->setup_stream_attribute(pipe_ctx); - - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - bool apply_edp_fast_boot_optimization = - pipe_ctx->stream->apply_edp_fast_boot_optimization; - - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - - // Enable VPG before building infoframe - if (vpg && vpg->funcs->vpg_poweron) - vpg->funcs->vpg_poweron(vpg); - - resource_build_info_frame(pipe_ctx); - dc->hwss.update_info_frame(pipe_ctx); - - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); - - /* Do not touch link on seamless boot optimization. */ - if (pipe_ctx->stream->apply_seamless_boot_optimization) { - pipe_ctx->stream->dpms_off = false; - - /* Still enable stream features & audio on seamless boot for DP external displays */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { - enable_stream_features(pipe_ctx); - dc->hwss.enable_audio_stream(pipe_ctx); - } - -#if defined(CONFIG_DRM_AMD_DC_HDCP) - update_psp_stream_config(pipe_ctx, false); -#endif - return; - } - - /* eDP lit up by bios already, no need to enable again. */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC && - !pipe_ctx->next_odm_pipe) { - pipe_ctx->stream->dpms_off = false; -#if defined(CONFIG_DRM_AMD_DC_HDCP) - update_psp_stream_config(pipe_ctx, false); -#endif - return; - } - - if (pipe_ctx->stream->dpms_off) - return; - - /* Have to setup DSC before DIG FE and BE are connected (which happens before the - * link training). This is to make sure the bandwidth sent to DIG BE won't be - * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag - * will be automatically set at a later time when the video is enabled - * (DP_VID_STREAM_EN = 1). - */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - dp_set_dsc_enable(pipe_ctx, true); - - } - - status = enable_link(state, pipe_ctx); - - if (status != DC_OK) { - DC_LOG_WARNING("enabling link %u failed: %d\n", - pipe_ctx->stream->link->link_index, - status); - - /* Abort stream enable *unless* the failure was due to - * DP link training - some DP monitors will recover and - * show the stream anyway. But MST displays can't proceed - * without link training. - */ - if (status != DC_FAIL_DP_LINK_TRAINING || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - if (false == stream->link->link_status.link_active) - disable_link(stream->link, &pipe_ctx->link_res, - pipe_ctx->stream->signal); - BREAK_TO_DEBUGGER(); - return; - } - } - - /* turn off otg test pattern if enable */ - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - COLOR_DEPTH_UNDEFINED); - - /* This second call is needed to reconfigure the DIG - * as a workaround for the incorrect value being applied - * from transmitter control. - */ - if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || - is_dp_128b_132b_signal(pipe_ctx))) - if (link_enc) - link_enc->funcs->setup( - link_enc, - pipe_ctx->stream->signal); - - dc->hwss.enable_stream(pipe_ctx); - - /* Set DPS PPS SDP (AKA "info frames") */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) { - dp_set_dsc_on_rx(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true, true); - } - } - - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - dc_link_allocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - is_dp_128b_132b_signal(pipe_ctx)) - dc_link_update_sst_payload(pipe_ctx, true); - - dc->hwss.unblank_stream(pipe_ctx, - &pipe_ctx->stream->link->cur_link_settings); - - if (stream->sink_patches.delay_ignore_msa > 0) - msleep(stream->sink_patches.delay_ignore_msa); - - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - enable_stream_features(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_HDCP) - update_psp_stream_config(pipe_ctx, false); -#endif - - dc->hwss.enable_audio_stream(pipe_ctx); - - } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - if (is_dp_128b_132b_signal(pipe_ctx)) - fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - dp_set_dsc_enable(pipe_ctx, true); - } - - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { - core_link_set_avmute(pipe_ctx, false); - } -} - -void core_link_disable_stream(struct pipe_ctx *pipe_ctx) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; - struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; - - if (is_dp_128b_132b_signal(pipe_ctx)) - vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; - - DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - - if (pipe_ctx->stream->sink) { - if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && - pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { - DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, - pipe_ctx->stream->sink->edid_caps.display_name, - pipe_ctx->stream->signal); - } - } - - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) - return; - - if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) - core_link_set_avmute(pipe_ctx, true); - } - - dc->hwss.disable_audio_stream(pipe_ctx); - -#if defined(CONFIG_DRM_AMD_DC_HDCP) - update_psp_stream_config(pipe_ctx, true); -#endif - dc->hwss.blank_stream(pipe_ctx); - - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - deallocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - is_dp_128b_132b_signal(pipe_ctx)) - dc_link_update_sst_payload(pipe_ctx, false); - - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { - struct ext_hdmi_settings settings = {0}; - enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id; - - unsigned short masked_chip_caps = link->chip_caps & - EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; - //Need to inform that sink is going to use legacy HDMI mode. - dal_ddc_service_write_scdc_data( - link->ddc, - 165000,//vbios only handles 165Mhz. - false); - if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { - /* DP159, Retimer settings */ - if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) - write_i2c_retimer_setting(pipe_ctx, - false, false, &settings); - else - write_i2c_default_retimer_setting(pipe_ctx, - false, false); - } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { - /* PI3EQX1204, Redriver settings */ - write_i2c_redriver_setting(pipe_ctx, false); - } - } - - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - !is_dp_128b_132b_signal(pipe_ctx)) { - - /* In DP1.x SST mode, our encoder will go to TPS1 - * when link is on but stream is off. - * Disabling link before stream will avoid exposing TPS1 pattern - * during the disable sequence as it will confuse some receivers - * state machine. - * In DP2 or MST mode, our encoder will stay video active - */ - disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - dc->hwss.disable_stream(pipe_ctx); - } else { - dc->hwss.disable_stream(pipe_ctx); - disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - } - - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_set_dsc_enable(pipe_ctx, false); - } - if (is_dp_128b_132b_signal(pipe_ctx)) { - if (pipe_ctx->stream_res.tg->funcs->set_out_mux) - pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); - } - - if (vpg && vpg->funcs->vpg_powerdown) - vpg->funcs->vpg_powerdown(vpg); -} - -void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - - if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) - return; - - dc->hwss.set_avmute(pipe_ctx, enable); -} - -/** - * dc_link_enable_hpd_filter: - * If enable is true, programs HPD filter on associated HPD line using - * delay_on_disconnect/delay_on_connect values dependent on - * link->connector_signal - * - * If enable is false, programs HPD filter on associated HPD line with no - * delays on connect or disconnect - * - * @link: pointer to the dc link - * @enable: boolean specifying whether to enable hbd - */ -void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) -{ - struct gpio *hpd; - - if (enable) { - link->is_hpd_filter_disabled = false; - program_hpd_filter(link); - } else { - link->is_hpd_filter_disabled = true; - /* Obtain HPD handle */ - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); - - if (!hpd) - return; - - /* Setup HPD filtering */ - if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { - struct gpio_hpd_config config; - - config.delay_on_connect = 0; - config.delay_on_disconnect = 0; - - dal_irq_setup_hpd_filter(hpd, &config); - - dal_gpio_close(hpd); - } else { - ASSERT_CRITICAL(false); - } - /* Release HPD handle */ - dal_gpio_destroy_irq(&hpd); - } -} - -void dc_link_set_drive_settings(struct dc *dc, - struct link_training_settings *lt_settings, - const struct dc_link *link) -{ - - int i; - struct link_resource link_res; - - for (i = 0; i < dc->link_count; i++) - if (dc->links[i] == link) - break; - - if (i >= dc->link_count) - ASSERT_CRITICAL(false); - - dc_link_get_cur_link_res(link, &link_res); - dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings); -} - -void dc_link_set_preferred_link_settings(struct dc *dc, - struct dc_link_settings *link_setting, - struct dc_link *link) -{ - int i; - struct pipe_ctx *pipe; - struct dc_stream_state *link_stream; - struct dc_link_settings store_settings = *link_setting; - - link->preferred_link_setting = store_settings; - - /* Retrain with preferred link settings only relevant for - * DP signal type - * Check for non-DP signal or if passive dongle present - */ - if (!dc_is_dp_signal(link->connector_signal) || - link->dongle_max_pix_clk > 0) - return; - - for (i = 0; i < MAX_PIPES; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->link) { - if (pipe->stream->link == link) { - link_stream = pipe->stream; - break; - } - } - } - - /* Stream not found */ - if (i == MAX_PIPES) - return; - - /* Cannot retrain link if backend is off */ - if (link_stream->dpms_off) - return; - - if (decide_link_settings(link_stream, &store_settings)) - dp_retrain_link_dp_test(link, &store_settings, false); -} - -void dc_link_set_preferred_training_settings(struct dc *dc, - struct dc_link_settings *link_setting, - struct dc_link_training_overrides *lt_overrides, - struct dc_link *link, - bool skip_immediate_retrain) -{ - if (lt_overrides != NULL) - link->preferred_training_settings = *lt_overrides; - else - memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); - - if (link_setting != NULL) { - link->preferred_link_setting = *link_setting; - if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) - /* TODO: add dc update for acquiring link res */ - skip_immediate_retrain = true; - } else { - link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; - link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; - } - - if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - link->type == dc_connection_mst_branch) - dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link); - - /* Retrain now, or wait until next stream update to apply */ - if (skip_immediate_retrain == false) - dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link); -} - -void dc_link_enable_hpd(const struct dc_link *link) -{ - dc_link_dp_enable_hpd(link); -} - -void dc_link_disable_hpd(const struct dc_link *link) -{ - dc_link_dp_disable_hpd(link); -} - -void dc_link_set_test_pattern(struct dc_link *link, - enum dp_test_pattern test_pattern, - enum dp_test_pattern_color_space test_pattern_color_space, - const struct link_training_settings *p_link_settings, - const unsigned char *p_custom_pattern, - unsigned int cust_pattern_size) -{ - if (link != NULL) - dc_link_dp_set_test_pattern( - link, - test_pattern, - test_pattern_color_space, - p_link_settings, - p_custom_pattern, - cust_pattern_size); -} - -uint32_t dc_link_bandwidth_kbps( - const struct dc_link *link, - const struct dc_link_settings *link_setting) -{ - uint32_t total_data_bw_efficiency_x10000 = 0; - uint32_t link_rate_per_lane_kbps = 0; - - switch (dp_get_link_encoding_format(link_setting)) { - case DP_8b_10b_ENCODING: - /* For 8b/10b encoding: - * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. - * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. - */ - link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; - total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; - if (dc_link_should_enable_fec(link)) { - total_data_bw_efficiency_x10000 /= 100; - total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; - } - break; - case DP_128b_132b_ENCODING: - /* For 128b/132b encoding: - * link rate is defined in the unit of 10mbps per lane. - * total data bandwidth efficiency is always 96.71%. - */ - link_rate_per_lane_kbps = link_setting->link_rate * 10000; - total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; - break; - default: - break; - } - - /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ - return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; -} - -const struct dc_link_settings *dc_link_get_link_cap( - const struct dc_link *link) -{ - if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && - link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) - return &link->preferred_link_setting; - return &link->verified_link_cap; -} - -void dc_link_overwrite_extended_receiver_cap( - struct dc_link *link) -{ - dp_overwrite_extended_receiver_cap(link); -} - -bool dc_link_is_fec_supported(const struct dc_link *link) -{ - /* TODO - use asic cap instead of link_enc->features - * we no longer know which link enc to use for this link before commit - */ - struct link_encoder *link_enc = NULL; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - return (dc_is_dp_signal(link->connector_signal) && link_enc && - link_enc->features.fec_supported && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && - !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); -} - -bool dc_link_should_enable_fec(const struct dc_link *link) -{ - bool force_disable = false; - - if (link->fec_state == dc_link_fec_enabled) - force_disable = false; - else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && - link->local_sink && - link->local_sink->edid_caps.panel_patch.disable_fec) - force_disable = true; - else if (link->connector_signal == SIGNAL_TYPE_EDP - && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. - dsc_support.DSC_SUPPORT == false - || link->panel_config.dsc.disable_dsc_edp - || !link->dc->caps.edp_dsc_support)) - force_disable = true; - - return !force_disable && dc_link_is_fec_supported(link); -} - -uint32_t dc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) -{ - uint32_t bits_per_channel = 0; - uint32_t kbps; - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (timing->flags.DSC) - return dc_dsc_stream_bandwidth_in_kbps(timing, - timing->dsc_cfg.bits_per_pixel, - timing->dsc_cfg.num_slices_h, - timing->dsc_cfg.is_dp); -#endif /* CONFIG_DRM_AMD_DC_DCN */ - - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - bits_per_channel = 6; - break; - case COLOR_DEPTH_888: - bits_per_channel = 8; - break; - case COLOR_DEPTH_101010: - bits_per_channel = 10; - break; - case COLOR_DEPTH_121212: - bits_per_channel = 12; - break; - case COLOR_DEPTH_141414: - bits_per_channel = 14; - break; - case COLOR_DEPTH_161616: - bits_per_channel = 16; - break; - default: - ASSERT(bits_per_channel != 0); - bits_per_channel = 8; - break; - } - - kbps = timing->pix_clk_100hz / 10; - kbps *= bits_per_channel; - - if (timing->flags.Y_ONLY != 1) { - /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ - kbps *= 3; - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) - kbps /= 2; - else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) - kbps = kbps * 2 / 3; - } - - return kbps; - -} - -void dc_link_get_cur_link_res(const struct dc_link *link, - struct link_resource *link_res) -{ - int i; - struct pipe_ctx *pipe = NULL; - - memset(link_res, 0, sizeof(*link_res)); - - for (i = 0; i < MAX_PIPES; i++) { - pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { - if (pipe->stream->link == link) { - *link_res = pipe->link_res; - break; - } - } - } - -} - -/** - * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state - * @dc: pointer to dc of the dm calling this - * @map: a dc link resource snapshot defined internally to dc. - * - * DM needs to capture a snapshot of current link resource allocation mapping - * and store it in its persistent storage. - * - * Some of the link resource is using first come first serve policy. - * The allocation mapping depends on original hotplug order. This information - * is lost after driver is loaded next time. The snapshot is used in order to - * restore link resource to its previous state so user will get consistent - * link capability allocation across reboot. - * - * Return: none (void function) - * - */ -void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) -{ - struct dc_link *link; - uint32_t i; - uint32_t hpo_dp_recycle_map = 0; - - *map = 0; - - if (dc->caps.dp_hpo) { - for (i = 0; i < dc->caps.max_links; i++) { - link = dc->links[i]; - if (link->link_status.link_active && - dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && - dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) - /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability - * but current link doesn't use it. - */ - hpo_dp_recycle_map |= (1 << i); - } - *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); - } -} - -/** - * dc_restore_link_res_map() - restore link resource allocation state from a snapshot - * @dc: pointer to dc of the dm calling this - * @map: a dc link resource snapshot defined internally to dc. - * - * DM needs to call this function after initial link detection on boot and - * before first commit streams to restore link resource allocation state - * from previous boot session. - * - * Some of the link resource is using first come first serve policy. - * The allocation mapping depends on original hotplug order. This information - * is lost after driver is loaded next time. The snapshot is used in order to - * restore link resource to its previous state so user will get consistent - * link capability allocation across reboot. - * - * Return: none (void function) - * - */ -void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) -{ - struct dc_link *link; - uint32_t i; - unsigned int available_hpo_dp_count; - uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) - >> LINK_RES_HPO_DP_REC_MAP__SHIFT; - - if (dc->caps.dp_hpo) { - available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; - /* remove excess 128b/132b encoding support for not recycled links */ - for (i = 0; i < dc->caps.max_links; i++) { - if ((hpo_dp_recycle_map & (1 << i)) == 0) { - link = dc->links[i]; - if (link->type != dc_connection_none && - dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { - if (available_hpo_dp_count > 0) - available_hpo_dp_count--; - else - /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ - link->verified_link_cap.link_rate = LINK_RATE_HIGH3; - } - } - } - /* remove excess 128b/132b encoding support for recycled links */ - for (i = 0; i < dc->caps.max_links; i++) { - if ((hpo_dp_recycle_map & (1 << i)) != 0) { - link = dc->links[i]; - if (link->type != dc_connection_none && - dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { - if (available_hpo_dp_count > 0) - available_hpo_dp_count--; - else - /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ - link->verified_link_cap.link_rate = LINK_RATE_HIGH3; - } - } - } - } -} +// TODO - remove this file after external build dependencies is resolved. +/* NOTE: This file is pending to be removed, do not add new code to this file */ \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c deleted file mode 100644 index dedd1246ce5884..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ /dev/null @@ -1,7553 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - */ -#include "dm_services.h" -#include "dc.h" -#include "dc_link_dp.h" -#include "dm_helpers.h" -#include "opp.h" -#include "dsc.h" -#include "clk_mgr.h" -#include "resource.h" - -#include "inc/core_types.h" -#include "link_hwss.h" -#include "dc_link_ddc.h" -#include "core_status.h" -#include "dpcd_defs.h" -#include "dc_dmub_srv.h" -#include "dce/dmub_hw_lock_mgr.h" -#include "inc/dc_link_dpia.h" -#include "inc/link_enc_cfg.h" -#include "link/link_dp_trace.h" - -/*Travis*/ -static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; -/*Nutmeg*/ -static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; - -#define DC_LOGGER \ - link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ - -#include "link_dpcd.h" - -#ifndef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#endif -#ifndef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#endif - - /* maximum pre emphasis level allowed for each voltage swing level*/ - static const enum dc_pre_emphasis - voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, - PRE_EMPHASIS_LEVEL2, - PRE_EMPHASIS_LEVEL1, - PRE_EMPHASIS_DISABLED }; - -enum { - POST_LT_ADJ_REQ_LIMIT = 6, - POST_LT_ADJ_REQ_TIMEOUT = 200 -}; - -struct dp_lt_fallback_entry { - enum dc_lane_count lane_count; - enum dc_link_rate link_rate; -}; - -static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { - /* This link training fallback array is ordered by - * link bandwidth from highest to lowest. - * DP specs makes it a normative policy to always - * choose the next highest link bandwidth during - * link training fallback. - */ - {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, - {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, - {LANE_COUNT_TWO, LINK_RATE_UHBR20}, - {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, - {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, - {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, - {LANE_COUNT_ONE, LINK_RATE_UHBR20}, - {LANE_COUNT_TWO, LINK_RATE_UHBR10}, - {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, - {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, - {LANE_COUNT_TWO, LINK_RATE_HIGH3}, - {LANE_COUNT_ONE, LINK_RATE_UHBR10}, - {LANE_COUNT_TWO, LINK_RATE_HIGH2}, - {LANE_COUNT_FOUR, LINK_RATE_HIGH}, - {LANE_COUNT_ONE, LINK_RATE_HIGH3}, - {LANE_COUNT_FOUR, LINK_RATE_LOW}, - {LANE_COUNT_ONE, LINK_RATE_HIGH2}, - {LANE_COUNT_TWO, LINK_RATE_HIGH}, - {LANE_COUNT_TWO, LINK_RATE_LOW}, - {LANE_COUNT_ONE, LINK_RATE_HIGH}, - {LANE_COUNT_ONE, LINK_RATE_LOW}, -}; - -static const struct dc_link_settings fail_safe_link_settings = { - .lane_count = LANE_COUNT_ONE, - .link_rate = LINK_RATE_LOW, - .link_spread = LINK_SPREAD_DISABLED, -}; - -static bool decide_fallback_link_setting( - struct dc_link *link, - struct dc_link_settings *max, - struct dc_link_settings *cur, - enum link_training_result training_result); -static void maximize_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); -static void override_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); - -static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, - const struct dc_link_settings *link_settings) -{ - union training_aux_rd_interval training_rd_interval; - uint32_t wait_in_micro_secs = 100; - - memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; - } - - return wait_in_micro_secs; -} - -static uint32_t get_eq_training_aux_rd_interval( - struct dc_link *link, - const struct dc_link_settings *link_settings) -{ - union training_aux_rd_interval training_rd_interval; - - memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { - core_link_read_dpcd( - link, - DP_128b_132b_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - } - - switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { - case 0: return 400; - case 1: return 4000; - case 2: return 8000; - case 3: return 12000; - case 4: return 16000; - case 5: return 32000; - case 6: return 64000; - default: return 400; - } -} - -void dp_wait_for_training_aux_rd_interval( - struct dc_link *link, - uint32_t wait_in_micro_secs) -{ - if (wait_in_micro_secs > 1000) - msleep(wait_in_micro_secs/1000); - else - udelay(wait_in_micro_secs); - - DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", - __func__, - wait_in_micro_secs); -} - -enum dpcd_training_patterns - dc_dp_training_pattern_to_dpcd_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern pattern) -{ - enum dpcd_training_patterns dpcd_tr_pattern = - DPCD_TRAINING_PATTERN_VIDEOIDLE; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_2: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; - break; - case DP_TRAINING_PATTERN_SEQUENCE_3: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; - break; - case DP_128b_132b_TPS1: - dpcd_tr_pattern = DPCD_128b_132b_TPS1; - break; - case DP_128b_132b_TPS2: - dpcd_tr_pattern = DPCD_128b_132b_TPS2; - break; - case DP_128b_132b_TPS2_CDS: - dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; - break; - case DP_TRAINING_PATTERN_VIDEOIDLE: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; - break; - default: - ASSERT(0); - DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", - __func__, pattern); - break; - } - - return dpcd_tr_pattern; -} - -static void dpcd_set_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern training_pattern) -{ - union dpcd_training_pattern dpcd_pattern = {0}; - - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - dc_dp_training_pattern_to_dpcd_training_pattern( - link, training_pattern); - - core_link_write_dpcd( - link, - DP_TRAINING_PATTERN_SET, - &dpcd_pattern.raw, - 1); - - DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", - __func__, - DP_TRAINING_PATTERN_SET, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -} - -static enum dc_dp_training_pattern decide_cr_training_pattern( - const struct dc_link_settings *link_settings) -{ - switch (dp_get_link_encoding_format(link_settings)) { - case DP_8b_10b_ENCODING: - default: - return DP_TRAINING_PATTERN_SEQUENCE_1; - case DP_128b_132b_ENCODING: - return DP_128b_132b_TPS1; - } -} - -static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, - const struct dc_link_settings *link_settings) -{ - struct link_encoder *link_enc; - struct encoder_feature_support *enc_caps; - struct dpcd_caps *rx_caps = &link->dpcd_caps; - enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - enc_caps = &link_enc->features; - - switch (dp_get_link_encoding_format(link_settings)) { - case DP_8b_10b_ENCODING: - if (enc_caps->flags.bits.IS_TPS4_CAPABLE && - rx_caps->max_down_spread.bits.TPS4_SUPPORTED) - pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && - rx_caps->max_ln_count.bits.TPS3_SUPPORTED) - pattern = DP_TRAINING_PATTERN_SEQUENCE_3; - else - pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - break; - case DP_128b_132b_ENCODING: - pattern = DP_128b_132b_TPS2; - break; - default: - pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - break; - } - return pattern; -} - -static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) -{ - uint8_t link_rate = 0; - enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings); - - if (encoding == DP_128b_132b_ENCODING) - switch (link_settings->link_rate) { - case LINK_RATE_UHBR10: - link_rate = 0x1; - break; - case LINK_RATE_UHBR20: - link_rate = 0x2; - break; - case LINK_RATE_UHBR13_5: - link_rate = 0x4; - break; - default: - link_rate = 0; - break; - } - else if (encoding == DP_8b_10b_ENCODING) - link_rate = (uint8_t) link_settings->link_rate; - else - link_rate = 0; - - return link_rate; -} - -static void dp_fixed_vs_pe_read_lane_adjust( - struct dc_link *link, - union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) -{ - const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; - const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - uint32_t vendor_lttpr_write_address = 0xF004F; - uint32_t vendor_lttpr_read_address = 0xF0053; - uint8_t dprx_vs = 0; - uint8_t dprx_pe = 0; - uint8_t lane; - - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - vendor_lttpr_read_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - - /* W/A to read lane settings requested by DPRX */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_vs, - 1); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_pe, - 1); - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; - } -} - -static void dp_fixed_vs_pe_set_retimer_lane_settings( - struct dc_link *link, - const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], - uint8_t lane_count) -{ - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - uint32_t vendor_lttpr_write_address = 0xF004F; - uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; - uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint8_t lane = 0; - - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Force LTTPR to output desired VS and PE */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); -} - -enum dc_status dpcd_set_link_settings( - struct dc_link *link, - const struct link_training_settings *lt_settings) -{ - uint8_t rate; - enum dc_status status; - - union down_spread_ctrl downspread = {0}; - union lane_count_set lane_count_set = {0}; - - downspread.raw = (uint8_t) - (lt_settings->link_settings.link_spread); - - lane_count_set.bits.LANE_COUNT_SET = - lt_settings->link_settings.lane_count; - - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - - if (link->ep_type == DISPLAY_ENDPOINT_PHY && - lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = - link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; - } - - status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); - - status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); - - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && - lt_settings->link_settings.use_link_rate_set == true) { - rate = 0; - /* WA for some MUX chips that will power down with eDP and lose supported - * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure - * MUX chip gets link rate set back before link training. - */ - if (link->connector_signal == SIGNAL_TYPE_EDP) { - uint8_t supported_link_rates[16]; - - core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, - supported_link_rates, sizeof(supported_link_rates)); - } - status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - status = core_link_write_dpcd(link, DP_LINK_RATE_SET, - <_settings->link_settings.link_rate_set, 1); - } else { - rate = get_dpcd_link_rate(<_settings->link_settings); - - status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - } - - if (rate) { - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_BW_SET, - lt_settings->link_settings.link_rate, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - } else { - DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_RATE_SET, - lt_settings->link_settings.link_rate_set, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - } - - return status; -} - -uint8_t dc_dp_initialize_scrambling_data_symbols( - struct dc_link *link, - enum dc_dp_training_pattern pattern) -{ - uint8_t disable_scrabled_data_symbols = 0; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - case DP_TRAINING_PATTERN_SEQUENCE_2: - case DP_TRAINING_PATTERN_SEQUENCE_3: - disable_scrabled_data_symbols = 1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - case DP_128b_132b_TPS1: - case DP_128b_132b_TPS2: - disable_scrabled_data_symbols = 0; - break; - default: - ASSERT(0); - DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", - __func__, pattern); - break; - } - return disable_scrabled_data_symbols; -} - -static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) -{ - return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); -} - -static void dpcd_set_lt_pattern_and_lane_settings( - struct dc_link *link, - const struct link_training_settings *lt_settings, - enum dc_dp_training_pattern pattern, - uint32_t offset) -{ - uint32_t dpcd_base_lt_offset; - - uint8_t dpcd_lt_buffer[5] = {0}; - union dpcd_training_pattern dpcd_pattern = {0}; - uint32_t size_in_bytes; - bool edp_workaround = false; /* TODO link_prop.INTERNAL */ - dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; - - if (is_repeater(lt_settings, offset)) - dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - /***************************************************************** - * DpcdAddress_TrainingPatternSet - *****************************************************************/ - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); - - dpcd_pattern.v1_4.SCRAMBLING_DISABLE = - dc_dp_initialize_scrambling_data_symbols(link, pattern); - - dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] - = dpcd_pattern.raw; - - if (is_repeater(lt_settings, offset)) { - DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - } else { - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", - __func__, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - } - - /* concatenate everything into one buffer*/ - size_in_bytes = lt_settings->link_settings.lane_count * - sizeof(lt_settings->dpcd_lane_settings[0]); - - // 0x00103 - 0x00102 - memmove( - &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], - lt_settings->dpcd_lane_settings, - size_in_bytes); - - if (is_repeater(lt_settings, offset)) { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X TX_FFE_PRESET_VALUE = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - } else { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", - __func__, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - } - if (edp_workaround) { - /* for eDP write in 2 parts because the 5-byte burst is - * causing issues on some eDP panels (EPR#366724) - */ - core_link_write_dpcd( - link, - DP_TRAINING_PATTERN_SET, - &dpcd_pattern.raw, - sizeof(dpcd_pattern.raw)); - - core_link_write_dpcd( - link, - DP_TRAINING_LANE0_SET, - (uint8_t *)(lt_settings->dpcd_lane_settings), - size_in_bytes); - - } else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) { - core_link_write_dpcd( - link, - dpcd_base_lt_offset, - dpcd_lt_buffer, - sizeof(dpcd_lt_buffer)); - } else - /* write it all in (1 + number-of-lanes)-byte burst*/ - core_link_write_dpcd( - link, - dpcd_base_lt_offset, - dpcd_lt_buffer, - size_in_bytes + sizeof(dpcd_pattern.raw)); -} - -bool dp_is_cr_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - uint32_t lane; - /*LANEx_CR_DONE bits All 1's?*/ - for (lane = 0; lane < (uint32_t)(ln_count); lane++) { - if (!dpcd_lane_status[lane].bits.CR_DONE_0) - return false; - } - return true; -} - -bool dp_is_ch_eq_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - bool done = true; - uint32_t lane; - for (lane = 0; lane < (uint32_t)(ln_count); lane++) - if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) - done = false; - return done; -} - -bool dp_is_symbol_locked(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - bool locked = true; - uint32_t lane; - for (lane = 0; lane < (uint32_t)(ln_count); lane++) - if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) - locked = false; - return locked; -} - -bool dp_is_interlane_aligned(union lane_align_status_updated align_status) -{ - return align_status.bits.INTERLANE_ALIGN_DONE == 1; -} - -void dp_hw_to_dpcd_lane_settings( - const struct link_training_settings *lt_settings, - const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]) -{ - uint8_t lane = 0; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) { - dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = - (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); - dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = - (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); - dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = - (hw_lane_settings[lane].VOLTAGE_SWING == - VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); - dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = - (hw_lane_settings[lane].PRE_EMPHASIS == - PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); - } - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) { - dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = - hw_lane_settings[lane].FFE_PRESET.settings.level; - } - } -} - -void dp_decide_lane_settings( - const struct link_training_settings *lt_settings, - const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]) -{ - uint32_t lane; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) { - hw_lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing)(ln_adjust[lane].bits. - VOLTAGE_SWING_LANE); - hw_lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis)(ln_adjust[lane].bits. - PRE_EMPHASIS_LANE); - } - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) { - hw_lane_settings[lane].FFE_PRESET.raw = - ln_adjust[lane].tx_ffe.PRESET_VALUE; - } - } - dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); - - if (lt_settings->disallow_per_lane_settings) { - /* we find the maximum of the requested settings across all lanes*/ - /* and set this maximum for all lanes*/ - maximize_lane_settings(lt_settings, hw_lane_settings); - override_lane_settings(lt_settings, hw_lane_settings); - - if (lt_settings->always_match_dpcd_with_hw_lane_settings) - dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); - } - -} - -static uint8_t get_nibble_at_index(const uint8_t *buf, - uint32_t index) -{ - uint8_t nibble; - nibble = buf[index / 2]; - - if (index % 2) - nibble >>= 4; - else - nibble &= 0x0F; - - return nibble; -} - -static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( - enum dc_voltage_swing voltage) -{ - enum dc_pre_emphasis pre_emphasis; - pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; - - if (voltage <= VOLTAGE_SWING_MAX_LEVEL) - pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; - - return pre_emphasis; - -} - -static void maximize_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) -{ - uint32_t lane; - struct dc_lane_settings max_requested; - - max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; - max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; - max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; - - /* Determine what the maximum of the requested settings are*/ - for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { - if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) - max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; - - if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) - max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; - if (lane_settings[lane].FFE_PRESET.settings.level > - max_requested.FFE_PRESET.settings.level) - max_requested.FFE_PRESET.settings.level = - lane_settings[lane].FFE_PRESET.settings.level; - } - - /* make sure the requested settings are - * not higher than maximum settings*/ - if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) - max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; - - if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) - max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; - if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) - max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; - - /* make sure the pre-emphasis matches the voltage swing*/ - if (max_requested.PRE_EMPHASIS > - get_max_pre_emphasis_for_voltage_swing( - max_requested.VOLTAGE_SWING)) - max_requested.PRE_EMPHASIS = - get_max_pre_emphasis_for_voltage_swing( - max_requested.VOLTAGE_SWING); - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; - lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; - lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; - } -} - -static void override_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) -{ - uint32_t lane; - - if (lt_settings->voltage_swing == NULL && - lt_settings->pre_emphasis == NULL && - lt_settings->ffe_preset == NULL && - lt_settings->post_cursor2 == NULL) - - return; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - if (lt_settings->voltage_swing) - lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; - if (lt_settings->pre_emphasis) - lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; - if (lt_settings->post_cursor2) - lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; - if (lt_settings->ffe_preset) - lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; - } -} - -enum dc_status dp_get_lane_status_and_lane_adjust( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - union lane_status ln_status[LANE_COUNT_DP_MAX], - union lane_align_status_updated *ln_align, - union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - uint32_t offset) -{ - unsigned int lane01_status_address = DP_LANE0_1_STATUS; - uint8_t lane_adjust_offset = 4; - unsigned int lane01_adjust_address; - uint8_t dpcd_buf[6] = {0}; - uint32_t lane; - enum dc_status status; - - if (is_repeater(link_training_setting, offset)) { - lane01_status_address = - DP_LANE0_1_STATUS_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - lane_adjust_offset = 3; - } - - status = core_link_read_dpcd( - link, - lane01_status_address, - (uint8_t *)(dpcd_buf), - sizeof(dpcd_buf)); - - if (status != DC_OK) { - DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," - " keep current lane status and lane adjust unchanged", - __func__, - lane01_status_address); - return status; - } - - for (lane = 0; lane < - (uint32_t)(link_training_setting->link_settings.lane_count); - lane++) { - - ln_status[lane].raw = - get_nibble_at_index(&dpcd_buf[0], lane); - ln_adjust[lane].raw = - get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); - } - - ln_align->raw = dpcd_buf[2]; - - if (is_repeater(link_training_setting, offset)) { - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", - __func__, - offset, - lane01_status_address, dpcd_buf[0], - lane01_status_address + 1, dpcd_buf[1]); - - lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", - __func__, - offset, - lane01_adjust_address, - dpcd_buf[lane_adjust_offset], - lane01_adjust_address + 1, - dpcd_buf[lane_adjust_offset + 1]); - } else { - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", - __func__, - lane01_status_address, dpcd_buf[0], - lane01_status_address + 1, dpcd_buf[1]); - - lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; - - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", - __func__, - lane01_adjust_address, - dpcd_buf[lane_adjust_offset], - lane01_adjust_address + 1, - dpcd_buf[lane_adjust_offset + 1]); - } - - return status; -} - -static enum dc_status dpcd_128b_132b_set_lane_settings( - struct dc_link *link, - const struct link_training_settings *link_training_setting) -{ - enum dc_status status = core_link_write_dpcd(link, - DP_TRAINING_LANE0_SET, - (uint8_t *)(link_training_setting->dpcd_lane_settings), - sizeof(link_training_setting->dpcd_lane_settings)); - - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", - __func__, - DP_TRAINING_LANE0_SET, - link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); - return status; -} - - -enum dc_status dpcd_set_lane_settings( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - uint32_t offset) -{ - unsigned int lane0_set_address; - enum dc_status status; - - lane0_set_address = DP_TRAINING_LANE0_SET; - - if (is_repeater(link_training_setting, offset)) - lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - status = core_link_write_dpcd(link, - lane0_set_address, - (uint8_t *)(link_training_setting->dpcd_lane_settings), - link_training_setting->link_settings.lane_count); - - if (is_repeater(link_training_setting, offset)) { - DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" - " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - offset, - lane0_set_address, - link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - - } else { - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - lane0_set_address, - link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - } - - return status; -} - -bool dp_is_max_vs_reached( - const struct link_training_settings *lt_settings) -{ - uint32_t lane; - for (lane = 0; lane < - (uint32_t)(lt_settings->link_settings.lane_count); - lane++) { - if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET - == VOLTAGE_SWING_MAX_LEVEL) - return true; - } - return false; - -} - -static bool perform_post_lt_adj_req_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum dc_lane_count lane_count = - lt_settings->link_settings.lane_count; - - uint32_t adj_req_count; - uint32_t adj_req_timer; - bool req_drv_setting_changed; - uint32_t lane; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - req_drv_setting_changed = false; - for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; - adj_req_count++) { - - req_drv_setting_changed = false; - - for (adj_req_timer = 0; - adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; - adj_req_timer++) { - - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - DPRX); - - if (dpcd_lane_status_updated.bits. - POST_LT_ADJ_REQ_IN_PROGRESS == 0) - return true; - - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) - return false; - - if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || - !dp_is_symbol_locked(lane_count, dpcd_lane_status) || - !dp_is_interlane_aligned(dpcd_lane_status_updated)) - return false; - - for (lane = 0; lane < (uint32_t)(lane_count); lane++) { - - if (lt_settings-> - dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { - - req_drv_setting_changed = true; - break; - } - } - - if (req_drv_setting_changed) { - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - - dc_link_dp_set_drive_settings(link, - link_res, - lt_settings); - break; - } - - msleep(1); - } - - if (!req_drv_setting_changed) { - DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", - __func__); - - ASSERT(0); - return true; - } - } - DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", - __func__); - - ASSERT(0); - return true; - -} - -/* Only used for channel equalization */ -uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) -{ - unsigned int aux_rd_interval_us = 400; - - switch (dpcd_aux_read_interval) { - case 0x01: - aux_rd_interval_us = 4000; - break; - case 0x02: - aux_rd_interval_us = 8000; - break; - case 0x03: - aux_rd_interval_us = 12000; - break; - case 0x04: - aux_rd_interval_us = 16000; - break; - case 0x05: - aux_rd_interval_us = 32000; - break; - case 0x06: - aux_rd_interval_us = 64000; - break; - default: - break; - } - - return aux_rd_interval_us; -} - -enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - enum link_training_result result = LINK_TRAINING_SUCCESS; - - if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE0; - else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE1; - else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE23; - else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE23; - return result; -} - -static enum link_training_result perform_channel_equalization_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - uint32_t offset) -{ - enum dc_dp_training_pattern tr_pattern; - uint32_t retries_ch_eq; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; - - if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) - tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - - dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); - - for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; - retries_ch_eq++) { - - dp_set_hw_lane_settings(link, link_res, lt_settings, offset); - - /* 2. update DPCD*/ - if (!retries_ch_eq) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration - */ - - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - tr_pattern, offset); - else - dpcd_set_lane_settings(link, lt_settings, offset); - - /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - if (is_repeater(lt_settings, offset)) - wait_time_microsec = - dp_translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested - * drive settings as set by the sink*/ - - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - offset); - - /* 5. check CR done*/ - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) - return dpcd_lane_status[0].bits.CR_DONE_0 ? - LINK_TRAINING_EQ_FAIL_CR_PARTIAL : - LINK_TRAINING_EQ_FAIL_CR; - - /* 6. check CHEQ done*/ - if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) - return LINK_TRAINING_SUCCESS; - - /* 7. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - } - - return LINK_TRAINING_EQ_FAIL_EQ; - -} - -static void start_clock_recovery_pattern_early(struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - uint32_t offset) -{ - DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", - __func__); - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); - dp_set_hw_lane_settings(link, link_res, lt_settings, offset); - udelay(400); -} - -static enum link_training_result perform_clock_recovery_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - uint32_t offset) -{ - uint32_t retries_cr; - uint32_t retry_count; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - retries_cr = 0; - retry_count = 0; - - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); - - if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); - - /* najeeb - The synaptics MST hub can put the LT in - * infinite loop by switching the VS - */ - /* between level 0 and level 1 continuously, here - * we try for CR lock for LinkTrainingMaxCRRetry count*/ - while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - - /* 1. call HWSS to set lane settings*/ - dp_set_hw_lane_settings( - link, - link_res, - lt_settings, - offset); - - /* 2. update DPCD of the receiver*/ - if (!retry_count) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration.*/ - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - lt_settings->pattern_for_cr, - offset); - else - dpcd_set_lane_settings( - link, - lt_settings, - offset); - - /* 3. wait receiver to lock-on*/ - wait_time_microsec = lt_settings->cr_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested drive - * settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - offset); - - /* 5. check CR done*/ - if (dp_is_cr_done(lane_count, dpcd_lane_status)) - return LINK_TRAINING_SUCCESS; - - /* 6. max VS reached*/ - if ((dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) && - dp_is_max_vs_reached(lt_settings)) - break; - - /* 7. same lane settings*/ - /* Note: settings are the same for all lanes, - * so comparing first lane is sufficient*/ - if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == - dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) - retries_cr++; - else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && - lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == - dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) - retries_cr++; - else - retries_cr = 0; - - /* 8. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - retry_count++; - } - - if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { - ASSERT(0); - DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", - __func__, - LINK_TRAINING_MAX_CR_RETRY); - - } - - return dp_get_cr_failure(lane_count, dpcd_lane_status); -} - -static inline enum link_training_result dp_transition_to_video_idle( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - enum link_training_result status) -{ - union lane_count_set lane_count_set = {0}; - - /* 4. mainlink output idle pattern*/ - dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - - /* - * 5. post training adjust if required - * If the upstream DPTX and downstream DPRX both support TPS4, - * TPS4 must be used instead of POST_LT_ADJ_REQ. - */ - if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || - lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { - /* delay 5ms after Main Link output idle pattern and then check - * DPCD 0202h. - */ - if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { - msleep(5); - status = dp_check_link_loss_status(link, lt_settings); - } - return status; - } - - if (status == LINK_TRAINING_SUCCESS && - perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) - status = LINK_TRAINING_LQA_FAIL; - - lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - core_link_write_dpcd( - link, - DP_LANE_COUNT_SET, - &lane_count_set.raw, - sizeof(lane_count_set)); - - return status; -} - -enum link_training_result dp_check_link_loss_status( - struct dc_link *link, - const struct link_training_settings *link_training_setting) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - union lane_status lane_status; - uint8_t dpcd_buf[6] = {0}; - uint32_t lane; - - core_link_read_dpcd( - link, - DP_SINK_COUNT, - (uint8_t *)(dpcd_buf), - sizeof(dpcd_buf)); - - /*parse lane status*/ - for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { - /* - * check lanes status - */ - lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane); - - if (!lane_status.bits.CHANNEL_EQ_DONE_0 || - !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { - /* if one of the channel equalization, clock - * recovery or symbol lock is dropped - * consider it as (link has been - * dropped) dp sink status has changed - */ - status = LINK_TRAINING_LINK_LOSS; - break; - } - } - - return status; -} - -static inline void decide_8b_10b_training_settings( - struct dc_link *link, - const struct dc_link_settings *link_setting, - struct link_training_settings *lt_settings) -{ - memset(lt_settings, '\0', sizeof(struct link_training_settings)); - - /* Initialize link settings */ - lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; - lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; - lt_settings->link_settings.link_rate = link_setting->link_rate; - lt_settings->link_settings.lane_count = link_setting->lane_count; - /* TODO hard coded to SS for now - * lt_settings.link_settings.link_spread = - * dal_display_path_is_ss_supported( - * path_mode->display_path) ? - * LINK_SPREAD_05_DOWNSPREAD_30KHZ : - * LINK_SPREAD_DISABLED; - */ - lt_settings->link_settings.link_spread = link->dp_ss_off ? - LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; - lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); - lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); - lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); - lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); - lt_settings->enhanced_framing = 1; - lt_settings->should_set_fec_ready = true; - lt_settings->disallow_per_lane_settings = true; - lt_settings->always_match_dpcd_with_hw_lane_settings = true; - lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); - dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -} - -static inline void decide_128b_132b_training_settings(struct dc_link *link, - const struct dc_link_settings *link_settings, - struct link_training_settings *lt_settings) -{ - memset(lt_settings, 0, sizeof(*lt_settings)); - - lt_settings->link_settings = *link_settings; - /* TODO: should decide link spread when populating link_settings */ - lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : - LINK_SPREAD_05_DOWNSPREAD_30KHZ; - - lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); - lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); - lt_settings->eq_pattern_time = 2500; - lt_settings->eq_wait_time_limit = 400000; - lt_settings->eq_loop_count_limit = 20; - lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; - lt_settings->cds_pattern_time = 2500; - lt_settings->cds_wait_time_limit = (dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; - lt_settings->disallow_per_lane_settings = true; - lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); - dp_hw_to_dpcd_lane_settings(lt_settings, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -} - -void dp_decide_training_settings( - struct dc_link *link, - const struct dc_link_settings *link_settings, - struct link_training_settings *lt_settings) -{ - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) - decide_8b_10b_training_settings(link, link_settings, lt_settings); - else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) - decide_128b_132b_training_settings(link, link_settings, lt_settings); -} - -static void override_training_settings( - struct dc_link *link, - const struct dc_link_training_overrides *overrides, - struct link_training_settings *lt_settings) -{ - uint32_t lane; - - /* Override link spread */ - if (!link->dp_ss_off && overrides->downspread != NULL) - lt_settings->link_settings.link_spread = *overrides->downspread ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ - : LINK_SPREAD_DISABLED; - - /* Override lane settings */ - if (overrides->voltage_swing != NULL) - lt_settings->voltage_swing = overrides->voltage_swing; - if (overrides->pre_emphasis != NULL) - lt_settings->pre_emphasis = overrides->pre_emphasis; - if (overrides->post_cursor2 != NULL) - lt_settings->post_cursor2 = overrides->post_cursor2; - if (overrides->ffe_preset != NULL) - lt_settings->ffe_preset = overrides->ffe_preset; - /* Override HW lane settings with BIOS forced values if present */ - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && - lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { - lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; - lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; - lt_settings->always_match_dpcd_with_hw_lane_settings = false; - } - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = - lt_settings->voltage_swing != NULL ? - *lt_settings->voltage_swing : - VOLTAGE_SWING_LEVEL0; - lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = - lt_settings->pre_emphasis != NULL ? - *lt_settings->pre_emphasis - : PRE_EMPHASIS_DISABLED; - lt_settings->hw_lane_settings[lane].POST_CURSOR2 = - lt_settings->post_cursor2 != NULL ? - *lt_settings->post_cursor2 - : POST_CURSOR2_DISABLED; - } - - if (lt_settings->always_match_dpcd_with_hw_lane_settings) - dp_hw_to_dpcd_lane_settings(lt_settings, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - - /* Initialize training timings */ - if (overrides->cr_pattern_time != NULL) - lt_settings->cr_pattern_time = *overrides->cr_pattern_time; - - if (overrides->eq_pattern_time != NULL) - lt_settings->eq_pattern_time = *overrides->eq_pattern_time; - - if (overrides->pattern_for_cr != NULL) - lt_settings->pattern_for_cr = *overrides->pattern_for_cr; - if (overrides->pattern_for_eq != NULL) - lt_settings->pattern_for_eq = *overrides->pattern_for_eq; - - if (overrides->enhanced_framing != NULL) - lt_settings->enhanced_framing = *overrides->enhanced_framing; - - if (link->preferred_training_settings.fec_enable != NULL) - lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; - - #if defined(CONFIG_DRM_AMD_DC_DCN) - /* Check DP tunnel LTTPR mode debug option. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) - lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; - -#endif - dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); - -} - -uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count) -{ - switch (lttpr_repeater_count) { - case 0x80: // 1 lttpr repeater - return 1; - case 0x40: // 2 lttpr repeaters - return 2; - case 0x20: // 3 lttpr repeaters - return 3; - case 0x10: // 4 lttpr repeaters - return 4; - case 0x08: // 5 lttpr repeaters - return 5; - case 0x04: // 6 lttpr repeaters - return 6; - case 0x02: // 7 lttpr repeaters - return 7; - case 0x01: // 8 lttpr repeaters - return 8; - default: - break; - } - return 0; // invalid value -} - -static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) -{ - uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); - return core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); -} - -static enum dc_status configure_lttpr_mode_non_transparent( - struct dc_link *link, - const struct link_training_settings *lt_settings) -{ - /* aux timeout is already set to extended */ - /* RESET/SET lttpr mode to enable non transparent mode */ - uint8_t repeater_cnt; - uint32_t aux_interval_address; - uint8_t repeater_id; - enum dc_status result = DC_ERROR_UNEXPECTED; - uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - - enum dp_link_encoding encoding = dp_get_link_encoding_format(<_settings->link_settings); - - if (encoding == DP_8b_10b_ENCODING) { - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); - result = core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); - - } - - if (result == DC_OK) { - link->dpcd_caps.lttpr_caps.mode = repeater_mode; - } - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); - - repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; - result = core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); - - if (result == DC_OK) { - link->dpcd_caps.lttpr_caps.mode = repeater_mode; - } - - if (encoding == DP_8b_10b_ENCODING) { - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - - /* Driver does not need to train the first hop. Skip DPCD read and clear - * AUX_RD_INTERVAL for DPTX-to-DPIA hop. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; - - for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { - aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); - core_link_read_dpcd( - link, - aux_interval_address, - (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], - sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); - link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; - } - } - } - - return result; -} - -static void repeater_training_done(struct dc_link *link, uint32_t offset) -{ - union dpcd_training_pattern dpcd_pattern = {0}; - - const uint32_t dpcd_base_lt_offset = - DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - /* Set training not in progress*/ - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; - - core_link_write_dpcd( - link, - dpcd_base_lt_offset, - &dpcd_pattern.raw, - 1); - - DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -} - -static void print_status_message( - struct dc_link *link, - const struct link_training_settings *lt_settings, - enum link_training_result status) -{ - char *link_rate = "Unknown"; - char *lt_result = "Unknown"; - char *lt_spread = "Disabled"; - - switch (lt_settings->link_settings.link_rate) { - case LINK_RATE_LOW: - link_rate = "RBR"; - break; - case LINK_RATE_RATE_2: - link_rate = "R2"; - break; - case LINK_RATE_RATE_3: - link_rate = "R3"; - break; - case LINK_RATE_HIGH: - link_rate = "HBR"; - break; - case LINK_RATE_RBR2: - link_rate = "RBR2"; - break; - case LINK_RATE_RATE_6: - link_rate = "R6"; - break; - case LINK_RATE_HIGH2: - link_rate = "HBR2"; - break; - case LINK_RATE_HIGH3: - link_rate = "HBR3"; - break; - case LINK_RATE_UHBR10: - link_rate = "UHBR10"; - break; - case LINK_RATE_UHBR13_5: - link_rate = "UHBR13.5"; - break; - case LINK_RATE_UHBR20: - link_rate = "UHBR20"; - break; - default: - break; - } - - switch (status) { - case LINK_TRAINING_SUCCESS: - lt_result = "pass"; - break; - case LINK_TRAINING_CR_FAIL_LANE0: - lt_result = "CR failed lane0"; - break; - case LINK_TRAINING_CR_FAIL_LANE1: - lt_result = "CR failed lane1"; - break; - case LINK_TRAINING_CR_FAIL_LANE23: - lt_result = "CR failed lane23"; - break; - case LINK_TRAINING_EQ_FAIL_CR: - lt_result = "CR failed in EQ"; - break; - case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: - lt_result = "CR failed in EQ partially"; - break; - case LINK_TRAINING_EQ_FAIL_EQ: - lt_result = "EQ failed"; - break; - case LINK_TRAINING_LQA_FAIL: - lt_result = "LQA failed"; - break; - case LINK_TRAINING_LINK_LOSS: - lt_result = "Link loss"; - break; - case DP_128b_132b_LT_FAILED: - lt_result = "LT_FAILED received"; - break; - case DP_128b_132b_MAX_LOOP_COUNT_REACHED: - lt_result = "max loop count reached"; - break; - case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: - lt_result = "channel EQ timeout"; - break; - case DP_128b_132b_CDS_DONE_TIMEOUT: - lt_result = "CDS timeout"; - break; - default: - break; - } - - switch (lt_settings->link_settings.link_spread) { - case LINK_SPREAD_DISABLED: - lt_spread = "Disabled"; - break; - case LINK_SPREAD_05_DOWNSPREAD_30KHZ: - lt_spread = "0.5% 30KHz"; - break; - case LINK_SPREAD_05_DOWNSPREAD_33KHZ: - lt_spread = "0.5% 33KHz"; - break; - default: - break; - } - - /* Connectivity log: link training */ - - /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ - - CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", - link_rate, - lt_settings->link_settings.lane_count, - lt_result, - lt_settings->hw_lane_settings[0].VOLTAGE_SWING, - lt_settings->hw_lane_settings[0].PRE_EMPHASIS, - lt_spread); -} - -void dc_link_dp_set_drive_settings( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - /* program ASIC PHY settings*/ - dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - - dp_hw_to_dpcd_lane_settings(lt_settings, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - - /* Notify DP sink the PHY settings from source */ - dpcd_set_lane_settings(link, lt_settings, DPRX); -} - -bool dc_link_dp_perform_link_training_skip_aux( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_setting) -{ - struct link_training_settings lt_settings = {0}; - - dp_decide_training_settings( - link, - link_setting, - <_settings); - override_training_settings( - link, - &link->preferred_training_settings, - <_settings); - - /* 1. Perform_clock_recovery_sequence. */ - - /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); - - /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); - - /* wait receiver to lock-on*/ - dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); - - /* 2. Perform_channel_equalization_sequence. */ - - /* transmit training pattern for channel equalization. */ - dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); - - /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); - - /* wait receiver to lock-on. */ - dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); - - /* 3. Perform_link_training_int. */ - - /* Mainlink output idle pattern. */ - dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - - print_status_message(link, <_settings, LINK_TRAINING_SUCCESS); - - return true; -} - -enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) -{ - enum dc_status status = DC_OK; - - if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) - status = configure_lttpr_mode_transparent(link); - - else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - status = configure_lttpr_mode_non_transparent(link, lt_settings); - - return status; -} - -static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) -{ - uint8_t sink_status = 0; - uint8_t i; - - /* clear training pattern set */ - dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); - - if (encoding == DP_128b_132b_ENCODING) { - /* poll for intra-hop disable */ - for (i = 0; i < 10; i++) { - if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && - (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) - break; - udelay(1000); - } - } -} - -enum dc_status dpcd_configure_channel_coding(struct dc_link *link, - struct link_training_settings *lt_settings) -{ - enum dp_link_encoding encoding = - dp_get_link_encoding_format( - <_settings->link_settings); - enum dc_status status; - - status = core_link_write_dpcd( - link, - DP_MAIN_LINK_CHANNEL_CODING_SET, - (uint8_t *) &encoding, - 1); - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", - __func__, - DP_MAIN_LINK_CHANNEL_CODING_SET, - encoding); - - return status; -} - -static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, - uint32_t *interval_in_us) -{ - union dp_128b_132b_training_aux_rd_interval dpcd_interval; - uint32_t interval_unit = 0; - - dpcd_interval.raw = 0; - core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL, - &dpcd_interval.raw, sizeof(dpcd_interval.raw)); - interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ - /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * - * INTERVAL_UNIT. The maximum is 256 ms - */ - *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; -} - -static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - uint8_t loop_count; - uint32_t aux_rd_interval = 0; - uint32_t wait_time = 0; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - enum dc_status status = DC_OK; - enum link_training_result result = LINK_TRAINING_SUCCESS; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Transmit 128b/132b_TPS1 over Main-Link */ - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); - /* Set TRAINING_PATTERN_SET to 01h */ - dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); - - /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ - dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); - dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); - - /* Set loop counter to start from 1 */ - loop_count = 1; - - /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ - dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, - lt_settings->pattern_for_eq, DPRX); - - /* poll for channel EQ done */ - while (result == LINK_TRAINING_SUCCESS) { - dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); - wait_time += aux_rd_interval; - status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); - if (status != DC_OK) { - result = LINK_TRAINING_ABORT; - } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, - dpcd_lane_status)) { - /* pass */ - break; - } else if (loop_count >= lt_settings->eq_loop_count_limit) { - result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; - } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { - result = DP_128b_132b_LT_FAILED; - } else { - dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - dpcd_128b_132b_set_lane_settings(link, lt_settings); - } - loop_count++; - } - - /* poll for EQ interlane align done */ - while (result == LINK_TRAINING_SUCCESS) { - if (status != DC_OK) { - result = LINK_TRAINING_ABORT; - } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { - /* pass */ - break; - } else if (wait_time >= lt_settings->eq_wait_time_limit) { - result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; - } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { - result = DP_128b_132b_LT_FAILED; - } else { - dp_wait_for_training_aux_rd_interval(link, - lt_settings->eq_pattern_time); - wait_time += lt_settings->eq_pattern_time; - status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - } - } - - return result; -} - -static enum link_training_result dp_perform_128b_132b_cds_done_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - /* Assumption: assume hardware has transmitted eq pattern */ - enum dc_status status = DC_OK; - enum link_training_result result = LINK_TRAINING_SUCCESS; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - uint32_t wait_time = 0; - - /* initiate CDS done sequence */ - dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); - - /* poll for CDS interlane align done and symbol lock */ - while (result == LINK_TRAINING_SUCCESS) { - dp_wait_for_training_aux_rd_interval(link, - lt_settings->cds_pattern_time); - wait_time += lt_settings->cds_pattern_time; - status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - if (status != DC_OK) { - result = LINK_TRAINING_ABORT; - } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && - dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { - /* pass */ - break; - } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { - result = DP_128b_132b_LT_FAILED; - } else if (wait_time >= lt_settings->cds_wait_time_limit) { - result = DP_128b_132b_CDS_DONE_TIMEOUT; - } - } - - return result; -} - -static enum link_training_result dp_perform_8b_10b_link_training( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - - uint8_t repeater_cnt; - uint8_t repeater_id; - uint8_t lane = 0; - - if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); - - /* 1. set link rate, lane count and spread. */ - dpcd_set_link_settings(link, lt_settings); - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - - /* 2. perform link training (set link training done - * to false is done as well) - */ - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - - for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); - repeater_id--) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) { - repeater_training_done(link, repeater_id); - break; - } - - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - repeater_id); - - repeater_training_done(link, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) - break; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->dpcd_lane_settings[lane].raw = 0; - lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; - lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; - } - } - } - - if (status == LINK_TRAINING_SUCCESS) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); - if (status == LINK_TRAINING_SUCCESS) { - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - DPRX); - } - } - - return status; -} - -static enum link_training_result dp_perform_128b_132b_link_training( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum link_training_result result = LINK_TRAINING_SUCCESS; - - /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ - if (link->dc->debug.legacy_dp2_lt) { - struct link_training_settings legacy_settings; - - decide_8b_10b_training_settings(link, - <_settings->link_settings, - &legacy_settings); - return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); - } - - dpcd_set_link_settings(link, lt_settings); - - if (result == LINK_TRAINING_SUCCESS) - result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); - - if (result == LINK_TRAINING_SUCCESS) - result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); - - return result; -} - -static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t lane = 0; - uint8_t toggle_rate = 0x6; - uint8_t target_rate = 0x6; - bool apply_toggle_rate_wa = false; - uint8_t repeater_cnt; - uint8_t repeater_id; - - /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ - if (lt_settings->cr_pattern_time < 16000) - lt_settings->cr_pattern_time = 16000; - - /* Fixed VS/PE specific: Toggle link rate */ - apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); - target_rate = get_dpcd_link_rate(<_settings->link_settings); - toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; - - if (apply_toggle_rate_wa) - lt_settings->link_settings.link_rate = toggle_rate; - - if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); - - /* 1. set link rate, lane count and spread. */ - dpcd_set_link_settings(link, lt_settings); - - /* Fixed VS/PE specific: Toggle link rate back*/ - if (apply_toggle_rate_wa) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &target_rate, - 1); - } - - link->vendor_specific_lttpr_link_rate_wa = target_rate; - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - - /* 2. perform link training (set link training done - * to false is done as well) - */ - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - - for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); - repeater_id--) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) { - repeater_training_done(link, repeater_id); - break; - } - - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - repeater_id); - - repeater_training_done(link, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) - break; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->dpcd_lane_settings[lane].raw = 0; - lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; - lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; - } - } - } - - if (status == LINK_TRAINING_SUCCESS) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); - if (status == LINK_TRAINING_SUCCESS) { - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - DPRX); - } - } - - return status; -} - -static enum link_training_result dp_perform_fixed_vs_pe_training_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; - const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; - uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; - uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t vendor_lttpr_write_address = 0xF004F; - enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t lane = 0; - union down_spread_ctrl downspread = {0}; - union lane_count_set lane_count_set = {0}; - uint8_t toggle_rate; - uint8_t rate; - - /* Only 8b/10b is supported */ - ASSERT(dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING); - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); - return status; - } - - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; - } - - /* Vendor specific: Reset lane settings */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - - /* Vendor specific: Enable intercept */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); - - /* 1. set link rate, lane count and spread. */ - - downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); - - lane_count_set.bits.LANE_COUNT_SET = - lt_settings->link_settings.lane_count; - - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - - if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = - link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; - } - - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); - - core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); - - rate = get_dpcd_link_rate(<_settings->link_settings); - - /* Vendor specific: Toggle link rate */ - toggle_rate = (rate == 0x6) ? 0xA : 0x6; - - if (link->vendor_specific_lttpr_link_rate_wa == rate) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &toggle_rate, - 1); - } - - link->vendor_specific_lttpr_link_rate_wa = rate; - - core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_BW_SET, - lt_settings->link_settings.link_rate, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - - /* 2. Perform link training */ - - /* Perform Clock Recovery Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - const uint8_t max_vendor_dpcd_retries = 10; - uint32_t retries_cr; - uint32_t retry_count; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - enum dc_status dpcd_status = DC_OK; - uint8_t i = 0; - - retries_cr = 0; - retry_count = 0; - - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); - - while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - - /* 1. call HWSS to set lane settings */ - dp_set_hw_lane_settings( - link, - link_res, - lt_settings, - 0); - - /* 2. update DPCD of the receiver */ - if (!retry_count) { - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration. - */ - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - lt_settings->pattern_for_cr, - 0); - /* Vendor specific: Disable intercept */ - for (i = 0; i < max_vendor_dpcd_retries; i++) { - msleep(pre_disable_intercept_delay_ms); - dpcd_status = core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis)); - - if (dpcd_status == DC_OK) - break; - - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); - } - } else { - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - - dpcd_set_lane_settings( - link, - lt_settings, - 0); - } - - /* 3. wait receiver to lock-on*/ - wait_time_microsec = lt_settings->cr_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested drive - * settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 6. max VS reached*/ - if (dp_is_max_vs_reached(lt_settings)) - break; - - /* 7. same lane settings */ - /* Note: settings are the same for all lanes, - * so comparing first lane is sufficient - */ - if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == - dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) - retries_cr++; - else - retries_cr = 0; - - /* 8. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - retry_count++; - } - - if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { - ASSERT(0); - DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", - __func__, - LINK_TRAINING_MAX_CR_RETRY); - - } - - status = dp_get_cr_failure(lane_count, dpcd_lane_status); - } - - /* Perform Channel EQ Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - enum dc_dp_training_pattern tr_pattern; - uint32_t retries_ch_eq; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; - - dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); - - status = LINK_TRAINING_EQ_FAIL_EQ; - - for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; - retries_ch_eq++) { - - dp_set_hw_lane_settings(link, link_res, lt_settings, 0); - - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - - /* 2. update DPCD*/ - if (!retries_ch_eq) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration - */ - - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - tr_pattern, 0); - else - dpcd_set_lane_settings(link, lt_settings, 0); - - /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested - * drive settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_EQ_FAIL_CR; - break; - } - - /* 6. check CHEQ done*/ - if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 7. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - } - } - - return status; -} - - -enum link_training_result dc_link_dp_perform_link_training( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_settings, - bool skip_video_pattern) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - struct link_training_settings lt_settings = {0}; - enum dp_link_encoding encoding = - dp_get_link_encoding_format(link_settings); - - /* decide training settings */ - dp_decide_training_settings( - link, - link_settings, - <_settings); - - override_training_settings( - link, - &link->preferred_training_settings, - <_settings); - - /* reset previous training states */ - dpcd_exit_training_mode(link, encoding); - - /* configure link prior to entering training mode */ - dpcd_configure_lttpr_mode(link, <_settings); - dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); - dpcd_configure_channel_coding(link, <_settings); - - /* enter training mode: - * Per DP specs starting from here, DPTX device shall not issue - * Non-LT AUX transactions inside training mode. - */ - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING) - status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); - else if (encoding == DP_8b_10b_ENCODING) - status = dp_perform_8b_10b_link_training(link, link_res, <_settings); - else if (encoding == DP_128b_132b_ENCODING) - status = dp_perform_128b_132b_link_training(link, link_res, <_settings); - else - ASSERT(0); - - /* exit training mode */ - dpcd_exit_training_mode(link, encoding); - - /* switch to video idle */ - if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) - status = dp_transition_to_video_idle(link, - link_res, - <_settings, - status); - - /* dump debug data */ - print_status_message(link, <_settings, status); - if (status != LINK_TRAINING_SUCCESS) - link->ctx->dc->debug_data.ltFailCount++; - return status; -} - -bool perform_link_training_with_retries( - const struct dc_link_settings *link_setting, - bool skip_video_pattern, - int attempts, - struct pipe_ctx *pipe_ctx, - enum signal_type signal, - bool do_fallback) -{ - int j; - uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - enum dp_panel_mode panel_mode = dp_get_panel_mode(link); - enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; - struct dc_link_settings cur_link_settings = *link_setting; - struct dc_link_settings max_link_settings = *link_setting; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - int fail_count = 0; - bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ - bool is_link_bw_min = /* RBR x 1 */ - (cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE); - - dp_trace_commit_lt_init(link); - - if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) - /* We need to do this before the link training to ensure the idle - * pattern in SST mode will be sent right after the link training - */ - link_hwss->setup_stream_encoder(pipe_ctx); - - dp_trace_set_lt_start_timestamp(link, false); - j = 0; - while (j < attempts && fail_count < (attempts * 10)) { - - DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n", - __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, - cur_link_settings.lane_count); - - dp_enable_link_phy( - link, - &pipe_ctx->link_res, - signal, - pipe_ctx->clock_source->id, - &cur_link_settings); - - if (stream->sink_patches.dppowerup_delay > 0) { - int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - - msleep(delay_dp_power_up_in_ms); - } - -#ifdef CONFIG_DRM_AMD_DC_HDCP - if (panel_mode == DP_PANEL_MODE_EDP) { - struct cp_psp *cp_psp = &stream->ctx->cp_psp; - - if (cp_psp && cp_psp->funcs.enable_assr) - /* ASSR is bound to fail with unsigned PSP - * verstage used during devlopment phase. - * Report and continue with eDP panel mode to - * perform eDP link training with right settings - */ - cp_psp->funcs.enable_assr(cp_psp->handle, link); - } -#endif - - dp_set_panel_mode(link, panel_mode); - - if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); - return true; - } else { - /** @todo Consolidate USB4 DP and DPx.x training. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - status = dc_link_dpia_perform_link_training(link, - &pipe_ctx->link_res, - &cur_link_settings, - skip_video_pattern); - - /* Transmit idle pattern once training successful. */ - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { - dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - /* Update verified link settings to current one - * Because DPIA LT might fallback to lower link setting. - */ - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; - link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; - dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); - } - } - } else { - status = dc_link_dp_perform_link_training(link, - &pipe_ctx->link_res, - &cur_link_settings, - skip_video_pattern); - } - - dp_trace_lt_total_count_increment(link, false); - dp_trace_lt_result_update(link, status, false); - dp_trace_set_lt_end_timestamp(link, false); - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) - return true; - } - - fail_count++; - dp_trace_lt_fail_count_update(link, fail_count, false); - if (link->ep_type == DISPLAY_ENDPOINT_PHY) { - /* latest link training still fail or link training is aborted - * skip delay and keep PHY on - */ - if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) - break; - } - - DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n", - __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, - cur_link_settings.lane_count, status); - - dp_disable_link_phy(link, &pipe_ctx->link_res, signal); - - /* Abort link training if failure due to sink being unplugged. */ - if (status == LINK_TRAINING_ABORT) { - enum dc_connection_type type = dc_connection_none; - - dc_link_detect_sink(link, &type); - if (type == dc_connection_none) { - DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); - break; - } - } - - /* Try to train again at original settings if: - * - not falling back between training attempts; - * - aborted previous attempt due to reasons other than sink unplug; - * - successfully trained but at a link rate lower than that required by stream; - * - reached minimum link bandwidth. - */ - if (!do_fallback || (status == LINK_TRAINING_ABORT) || - (status == LINK_TRAINING_SUCCESS && is_link_bw_low) || - is_link_bw_min) { - j++; - cur_link_settings = *link_setting; - delay_between_attempts += LINK_TRAINING_RETRY_DELAY; - is_link_bw_low = false; - is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE); - - } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ - uint32_t req_bw; - uint32_t link_bw; - - decide_fallback_link_setting(link, &max_link_settings, - &cur_link_settings, status); - /* Fail link training if reduced link bandwidth no longer meets - * stream requirements. - */ - req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); - link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); - is_link_bw_low = (req_bw > link_bw); - is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - if (is_link_bw_low) - DC_LOG_WARNING( - "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", - __func__, link->link_index, req_bw, link_bw); - } - - msleep(delay_between_attempts); - } - return false; -} - -static enum clock_source_id get_clock_source_id(struct dc_link *link) -{ - enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; - struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; - - if (dp_cs != NULL) { - dp_cs_id = dp_cs->id; - } else { - /* - * dp clock source is not initialized for some reason. - * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used - */ - ASSERT(dp_cs); - } - - return dp_cs_id; -} - -static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res, - bool mst_enable) -{ - if (mst_enable == false && - link->type == dc_connection_mst_branch) { - /* Disable MST on link. Use only local sink. */ - dp_disable_link_phy_mst(link, link_res, link->connector_signal); - - link->type = dc_connection_single; - link->local_sink = link->remote_sinks[0]; - link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT; - dc_sink_retain(link->local_sink); - dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - } else if (mst_enable == true && - link->type == dc_connection_single && - link->remote_sinks[0] != NULL) { - /* Re-enable MST on link. */ - dp_disable_link_phy(link, link_res, link->connector_signal); - dp_enable_mst_on_sink(link, true); - - link->type = dc_connection_mst_branch; - link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST; - } -} - -bool dc_link_dp_sync_lt_begin(struct dc_link *link) -{ - /* Begin Sync LT. During this time, - * DPCD:600h must not be powered down. - */ - link->sync_lt_in_progress = true; - - /*Clear any existing preferred settings.*/ - memset(&link->preferred_training_settings, 0, - sizeof(struct dc_link_training_overrides)); - memset(&link->preferred_link_setting, 0, - sizeof(struct dc_link_settings)); - - return true; -} - -enum link_training_result dc_link_dp_sync_lt_attempt( - struct dc_link *link, - const struct link_resource *link_res, - struct dc_link_settings *link_settings, - struct dc_link_training_overrides *lt_overrides) -{ - struct link_training_settings lt_settings = {0}; - enum link_training_result lt_status = LINK_TRAINING_SUCCESS; - enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT; - enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; - bool fec_enable = false; - - dp_decide_training_settings( - link, - link_settings, - <_settings); - override_training_settings( - link, - lt_overrides, - <_settings); - /* Setup MST Mode */ - if (lt_overrides->mst_enable) - set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable); - - /* Disable link */ - dp_disable_link_phy(link, link_res, link->connector_signal); - - /* Enable link */ - dp_cs_id = get_clock_source_id(link); - dp_enable_link_phy(link, link_res, link->connector_signal, - dp_cs_id, link_settings); - - /* Set FEC enable */ - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { - fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; - dp_set_fec_ready(link, NULL, fec_enable); - } - - if (lt_overrides->alternate_scrambler_reset) { - if (*lt_overrides->alternate_scrambler_reset) - panel_mode = DP_PANEL_MODE_EDP; - else - panel_mode = DP_PANEL_MODE_DEFAULT; - } else - panel_mode = dp_get_panel_mode(link); - - dp_set_panel_mode(link, panel_mode); - - /* Attempt to train with given link training settings */ - if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, link_res, <_settings, DPRX); - - /* Set link rate, lane count and spread. */ - dpcd_set_link_settings(link, <_settings); - - /* 2. perform link training (set link training done - * to false is done as well) - */ - lt_status = perform_clock_recovery_sequence(link, link_res, <_settings, DPRX); - if (lt_status == LINK_TRAINING_SUCCESS) { - lt_status = perform_channel_equalization_sequence(link, - link_res, - <_settings, - DPRX); - } - - /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ - /* 4. print status message*/ - print_status_message(link, <_settings, lt_status); - - return lt_status; -} - -bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) -{ - /* If input parameter is set, shut down phy. - * Still shouldn't turn off dp_receiver (DPCD:600h) - */ - if (link_down == true) { - struct dc_link_settings link_settings = link->cur_link_settings; - dp_disable_link_phy(link, NULL, link->connector_signal); - if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) - dp_set_fec_ready(link, NULL, false); - } - - link->sync_lt_in_progress = false; - return true; -} - -static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) -{ - enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; - - if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) - lttpr_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) - lttpr_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) - lttpr_max_link_rate = LINK_RATE_UHBR10; - - return lttpr_max_link_rate; -} - -static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) -{ - enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; - - if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) - cable_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) - cable_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) - cable_max_link_rate = LINK_RATE_UHBR10; - - return cable_max_link_rate; -} - -bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) -{ - struct link_encoder *link_enc = NULL; - - if (!max_link_enc_cap) { - DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); - return false; - } - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - if (link_enc && link_enc->funcs->get_max_link_cap) { - link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); - return true; - } - - DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); - max_link_enc_cap->lane_count = 1; - max_link_enc_cap->link_rate = 6; - return false; -} - - -struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) -{ - struct dc_link_settings max_link_cap = {0}; - enum dc_link_rate lttpr_max_link_rate; - enum dc_link_rate cable_max_link_rate; - struct link_encoder *link_enc = NULL; - - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - /* get max link encoder capability */ - if (link_enc) - link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); - - /* Lower link settings based on sink's link cap */ - if (link->reported_link_cap.lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = - link->reported_link_cap.lane_count; - if (link->reported_link_cap.link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = - link->reported_link_cap.link_rate; - if (link->reported_link_cap.link_spread < - max_link_cap.link_spread) - max_link_cap.link_spread = - link->reported_link_cap.link_spread; - - /* Lower link settings based on cable attributes - * Cable ID is a DP2 feature to identify max certified link rate that - * a cable can carry. The cable identification method requires both - * cable and display hardware support. Since the specs comes late, it is - * anticipated that the first round of DP2 cables and displays may not - * be fully compatible to reliably return cable ID data. Therefore the - * decision of our cable id policy is that if the cable can return non - * zero cable id data, we will take cable's link rate capability into - * account. However if we get zero data, the cable link rate capability - * is considered inconclusive. In this case, we will not take cable's - * capability into account to avoid of over limiting hardware capability - * from users. The max overall link rate capability is still determined - * after actual dp pre-training. Cable id is considered as an auxiliary - * method of determining max link bandwidth capability. - */ - cable_max_link_rate = get_cable_max_link_rate(link); - - if (!link->dc->debug.ignore_cable_id && - cable_max_link_rate != LINK_RATE_UNKNOWN && - cable_max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = cable_max_link_rate; - - /* account for lttpr repeaters cap - * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). - */ - if (dp_is_lttpr_present(link)) { - if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; - lttpr_max_link_rate = get_lttpr_max_link_rate(link); - - if (lttpr_max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = lttpr_max_link_rate; - - DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", - __func__, - max_link_cap.lane_count, - max_link_cap.link_rate); - } - - if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && - link->dc->debug.disable_uhbr) - max_link_cap.link_rate = LINK_RATE_HIGH3; - - return max_link_cap; -} - -static enum dc_status read_hpd_rx_irq_data( - struct dc_link *link, - union hpd_irq_data *irq_data) -{ - static enum dc_status retval; - - /* The HW reads 16 bytes from 200h on HPD, - * but if we get an AUX_DEFER, the HW cannot retry - * and this causes the CTS tests 4.3.2.1 - 3.2.4 to - * fail, so we now explicitly read 6 bytes which is - * the req from the above mentioned test cases. - * - * For DP 1.4 we need to read those from 2002h range. - */ - if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) - retval = core_link_read_dpcd( - link, - DP_SINK_COUNT, - irq_data->raw, - sizeof(union hpd_irq_data)); - else { - /* Read 14 bytes in a single read and then copy only the required fields. - * This is more efficient than doing it in two separate AUX reads. */ - - uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; - - retval = core_link_read_dpcd( - link, - DP_SINK_COUNT_ESI, - tmp, - sizeof(tmp)); - - if (retval != DC_OK) - return retval; - - irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; - } - - return retval; -} - -bool hpd_rx_irq_check_link_loss_status( - struct dc_link *link, - union hpd_irq_data *hpd_irq_dpcd_data) -{ - uint8_t irq_reg_rx_power_state = 0; - enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; - union lane_status lane_status; - uint32_t lane; - bool sink_status_changed; - bool return_code; - - sink_status_changed = false; - return_code = false; - - if (link->cur_link_settings.lane_count == 0) - return return_code; - - /*1. Check that Link Status changed, before re-training.*/ - - /*parse lane status*/ - for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { - /* check status of lanes 0,1 - * changed DpcdAddress_Lane01Status (0x202) - */ - lane_status.raw = get_nibble_at_index( - &hpd_irq_dpcd_data->bytes.lane01_status.raw, - lane); - - if (!lane_status.bits.CHANNEL_EQ_DONE_0 || - !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { - /* if one of the channel equalization, clock - * recovery or symbol lock is dropped - * consider it as (link has been - * dropped) dp sink status has changed - */ - sink_status_changed = true; - break; - } - } - - /* Check interlane align.*/ - if (sink_status_changed || - !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { - - DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); - - return_code = true; - - /*2. Check that we can handle interrupt: Not in FS DOS, - * Not in "Display Timeout" state, Link is trained. - */ - dpcd_result = core_link_read_dpcd(link, - DP_SET_POWER, - &irq_reg_rx_power_state, - sizeof(irq_reg_rx_power_state)); - - if (dpcd_result != DC_OK) { - DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", - __func__); - } else { - if (irq_reg_rx_power_state != DP_SET_POWER_D0) - return_code = false; - } - } - - return return_code; -} - -static bool dp_verify_link_cap( - struct dc_link *link, - struct dc_link_settings *known_limit_link_setting, - int *fail_count) -{ - struct dc_link_settings cur_link_settings = {0}; - struct dc_link_settings max_link_settings = *known_limit_link_setting; - bool success = false; - bool skip_video_pattern; - enum clock_source_id dp_cs_id = get_clock_source_id(link); - enum link_training_result status = LINK_TRAINING_SUCCESS; - union hpd_irq_data irq_data; - struct link_resource link_res; - - memset(&irq_data, 0, sizeof(irq_data)); - cur_link_settings = max_link_settings; - - /* Grant extended timeout request */ - if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { - uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; - - core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); - } - - do { - if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) - continue; - - skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; - dp_enable_link_phy( - link, - &link_res, - link->connector_signal, - dp_cs_id, - &cur_link_settings); - - status = dc_link_dp_perform_link_training( - link, - &link_res, - &cur_link_settings, - skip_video_pattern); - - if (status == LINK_TRAINING_SUCCESS) { - success = true; - udelay(1000); - if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK && - hpd_rx_irq_check_link_loss_status( - link, - &irq_data)) - (*fail_count)++; - - } else { - (*fail_count)++; - } - dp_trace_lt_total_count_increment(link, true); - dp_trace_lt_result_update(link, status, true); - dp_disable_link_phy(link, &link_res, link->connector_signal); - } while (!success && decide_fallback_link_setting(link, - &max_link_settings, &cur_link_settings, status)); - - link->verified_link_cap = success ? - cur_link_settings : fail_safe_link_settings; - return success; -} - -static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, - struct dc_link_settings *link_settings) -{ - /* Temporary Renoir-specific workaround PHY will sometimes be in bad - * state on hotplugging display from certain USB-C dongle, so add extra - * cycle of enabling and disabling the PHY before first link training. - */ - struct link_resource link_res = {0}; - enum clock_source_id dp_cs_id = get_clock_source_id(link); - - dp_enable_link_phy(link, &link_res, link->connector_signal, - dp_cs_id, link_settings); - dp_disable_link_phy(link, &link_res, link->connector_signal); -} - -bool dp_verify_link_cap_with_retries( - struct dc_link *link, - struct dc_link_settings *known_limit_link_setting, - int attempts) -{ - int i = 0; - bool success = false; - int fail_count = 0; - - dp_trace_detect_lt_init(link); - - if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && - link->dc->debug.usbc_combo_phy_reset_wa) - apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); - - dp_trace_set_lt_start_timestamp(link, false); - for (i = 0; i < attempts; i++) { - enum dc_connection_type type = dc_connection_none; - - memset(&link->verified_link_cap, 0, - sizeof(struct dc_link_settings)); - if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { - link->verified_link_cap = fail_safe_link_settings; - break; - } else if (dp_verify_link_cap(link, known_limit_link_setting, - &fail_count) && fail_count == 0) { - success = true; - break; - } - msleep(10); - } - - dp_trace_lt_fail_count_update(link, fail_count, true); - dp_trace_set_lt_end_timestamp(link, true); - - return success; -} - -/* in DP compliance test, DPR-120 may have - * a random value in its MAX_LINK_BW dpcd field. - * We map it to the maximum supported link rate that - * is smaller than MAX_LINK_BW in this case. - */ -static enum dc_link_rate get_link_rate_from_max_link_bw( - uint8_t max_link_bw) -{ - enum dc_link_rate link_rate; - - if (max_link_bw >= LINK_RATE_HIGH3) { - link_rate = LINK_RATE_HIGH3; - } else if (max_link_bw < LINK_RATE_HIGH3 - && max_link_bw >= LINK_RATE_HIGH2) { - link_rate = LINK_RATE_HIGH2; - } else if (max_link_bw < LINK_RATE_HIGH2 - && max_link_bw >= LINK_RATE_HIGH) { - link_rate = LINK_RATE_HIGH; - } else if (max_link_bw < LINK_RATE_HIGH - && max_link_bw >= LINK_RATE_LOW) { - link_rate = LINK_RATE_LOW; - } else { - link_rate = LINK_RATE_UNKNOWN; - } - - return link_rate; -} - -static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) -{ - return lane_count <= LANE_COUNT_ONE; -} - -static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) -{ - return link_rate <= LINK_RATE_LOW; -} - -static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) -{ - switch (lane_count) { - case LANE_COUNT_FOUR: - return LANE_COUNT_TWO; - case LANE_COUNT_TWO: - return LANE_COUNT_ONE; - case LANE_COUNT_ONE: - return LANE_COUNT_UNKNOWN; - default: - return LANE_COUNT_UNKNOWN; - } -} - -static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) -{ - switch (link_rate) { - case LINK_RATE_UHBR20: - return LINK_RATE_UHBR13_5; - case LINK_RATE_UHBR13_5: - return LINK_RATE_UHBR10; - case LINK_RATE_UHBR10: - return LINK_RATE_HIGH3; - case LINK_RATE_HIGH3: - return LINK_RATE_HIGH2; - case LINK_RATE_HIGH2: - return LINK_RATE_HIGH; - case LINK_RATE_HIGH: - return LINK_RATE_LOW; - case LINK_RATE_LOW: - return LINK_RATE_UNKNOWN; - default: - return LINK_RATE_UNKNOWN; - } -} - -static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) -{ - switch (lane_count) { - case LANE_COUNT_ONE: - return LANE_COUNT_TWO; - case LANE_COUNT_TWO: - return LANE_COUNT_FOUR; - default: - return LANE_COUNT_UNKNOWN; - } -} - -static enum dc_link_rate increase_link_rate(struct dc_link *link, - enum dc_link_rate link_rate) -{ - switch (link_rate) { - case LINK_RATE_LOW: - return LINK_RATE_HIGH; - case LINK_RATE_HIGH: - return LINK_RATE_HIGH2; - case LINK_RATE_HIGH2: - return LINK_RATE_HIGH3; - case LINK_RATE_HIGH3: - return LINK_RATE_UHBR10; - case LINK_RATE_UHBR10: - /* upto DP2.x specs UHBR13.5 is the only link rate that could be - * not supported by DPRX when higher link rate is supported. - * so we treat it as a special case for code simplicity. When we - * have new specs with more link rates like this, we should - * consider a more generic solution to handle discrete link - * rate capabilities. - */ - return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? - LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; - case LINK_RATE_UHBR13_5: - return LINK_RATE_UHBR20; - default: - return LINK_RATE_UNKNOWN; - } -} - -static bool decide_fallback_link_setting_max_bw_policy( - struct dc_link *link, - const struct dc_link_settings *max, - struct dc_link_settings *cur, - enum link_training_result training_result) -{ - uint8_t cur_idx = 0, next_idx; - bool found = false; - - if (training_result == LINK_TRAINING_ABORT) - return false; - - while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) - /* find current index */ - if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && - dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) - break; - else - cur_idx++; - - next_idx = cur_idx + 1; - - while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) - /* find next index */ - if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || - dp_lt_fallbacks[next_idx].link_rate > max->link_rate) - next_idx++; - else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && - link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) - /* upto DP2.x specs UHBR13.5 is the only link rate that - * could be not supported by DPRX when higher link rate - * is supported. so we treat it as a special case for - * code simplicity. When we have new specs with more - * link rates like this, we should consider a more - * generic solution to handle discrete link rate - * capabilities. - */ - next_idx++; - else - break; - - if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { - cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; - cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; - found = true; - } - - return found; -} - -/* - * function: set link rate and lane count fallback based - * on current link setting and last link training result - * return value: - * true - link setting could be set - * false - has reached minimum setting - * and no further fallback could be done - */ -static bool decide_fallback_link_setting( - struct dc_link *link, - struct dc_link_settings *max, - struct dc_link_settings *cur, - enum link_training_result training_result) -{ - if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || - link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(link, max, cur, - training_result); - - switch (training_result) { - case LINK_TRAINING_CR_FAIL_LANE0: - case LINK_TRAINING_CR_FAIL_LANE1: - case LINK_TRAINING_CR_FAIL_LANE23: - case LINK_TRAINING_LQA_FAIL: - { - if (!reached_minimum_link_rate(cur->link_rate)) { - cur->link_rate = reduce_link_rate(cur->link_rate); - } else if (!reached_minimum_lane_count(cur->lane_count)) { - cur->link_rate = max->link_rate; - if (training_result == LINK_TRAINING_CR_FAIL_LANE0) - return false; - else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) - cur->lane_count = LANE_COUNT_ONE; - else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) - cur->lane_count = LANE_COUNT_TWO; - else - cur->lane_count = reduce_lane_count(cur->lane_count); - } else { - return false; - } - break; - } - case LINK_TRAINING_EQ_FAIL_EQ: - case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: - { - if (!reached_minimum_lane_count(cur->lane_count)) { - cur->lane_count = reduce_lane_count(cur->lane_count); - } else if (!reached_minimum_link_rate(cur->link_rate)) { - cur->link_rate = reduce_link_rate(cur->link_rate); - /* Reduce max link rate to avoid potential infinite loop. - * Needed so that any subsequent CR_FAIL fallback can't - * re-set the link rate higher than the link rate from - * the latest EQ_FAIL fallback. - */ - max->link_rate = cur->link_rate; - cur->lane_count = max->lane_count; - } else { - return false; - } - break; - } - case LINK_TRAINING_EQ_FAIL_CR: - { - if (!reached_minimum_link_rate(cur->link_rate)) { - cur->link_rate = reduce_link_rate(cur->link_rate); - /* Reduce max link rate to avoid potential infinite loop. - * Needed so that any subsequent CR_FAIL fallback can't - * re-set the link rate higher than the link rate from - * the latest EQ_FAIL fallback. - */ - max->link_rate = cur->link_rate; - cur->lane_count = max->lane_count; - } else { - return false; - } - break; - } - default: - return false; - } - return true; -} - -bool dp_validate_mode_timing( - struct dc_link *link, - const struct dc_crtc_timing *timing) -{ - uint32_t req_bw; - uint32_t max_bw; - - const struct dc_link_settings *link_setting; - - /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && - !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && - dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) - return false; - - /*always DP fail safe mode*/ - if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && - timing->h_addressable == (uint32_t) 640 && - timing->v_addressable == (uint32_t) 480) - return true; - - link_setting = dc_link_get_link_cap(link); - - /* TODO: DYNAMIC_VALIDATION needs to be implemented */ - /*if (flags.DYNAMIC_VALIDATION == 1 && - link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN) - link_setting = &link->verified_link_cap; - */ - - req_bw = dc_bandwidth_in_kbps_from_timing(timing); - max_bw = dc_link_bandwidth_kbps(link, link_setting); - - if (req_bw <= max_bw) { - /* remember the biggest mode here, during - * initial link training (to get - * verified_link_cap), LS sends event about - * cannot train at reported cap to upper - * layer and upper layer will re-enumerate modes. - * this is not necessary if the lower - * verified_link_cap is enough to drive - * all the modes */ - - /* TODO: DYNAMIC_VALIDATION needs to be implemented */ - /* if (flags.DYNAMIC_VALIDATION == 1) - dpsst->max_req_bw_for_verified_linkcap = dal_max( - dpsst->max_req_bw_for_verified_linkcap, req_bw); */ - return true; - } else - return false; -} - -static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) -{ - struct dc_link_settings initial_link_setting = { - LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; - struct dc_link_settings current_link_setting = - initial_link_setting; - uint32_t link_bw; - - if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) - return false; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - link->verified_link_cap.link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); - current_link_setting.lane_count = - initial_link_setting.lane_count; - } - } - - return false; -} - -bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) -{ - struct dc_link_settings initial_link_setting; - struct dc_link_settings current_link_setting; - uint32_t link_bw; - - /* - * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. - * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" - */ - if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || - link->dpcd_caps.edp_supported_link_rates_count == 0) { - *link_setting = link->verified_link_cap; - return true; - } - - memset(&initial_link_setting, 0, sizeof(initial_link_setting)); - initial_link_setting.lane_count = LANE_COUNT_ONE; - initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; - initial_link_setting.link_spread = LINK_SPREAD_DISABLED; - initial_link_setting.use_link_rate_set = true; - initial_link_setting.link_rate_set = 0; - current_link_setting = initial_link_setting; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - link->verified_link_cap.link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - current_link_setting.lane_count = - initial_link_setting.lane_count; - } else - break; - } - } - return false; -} - -static bool decide_edp_link_settings_with_dsc(struct dc_link *link, - struct dc_link_settings *link_setting, - uint32_t req_bw, - enum dc_link_rate max_link_rate) -{ - struct dc_link_settings initial_link_setting; - struct dc_link_settings current_link_setting; - uint32_t link_bw; - - unsigned int policy = 0; - - policy = link->panel_config.dsc.force_dsc_edp_policy; - if (max_link_rate == LINK_RATE_UNKNOWN) - max_link_rate = link->verified_link_cap.link_rate; - /* - * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. - * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" - */ - if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || - link->dpcd_caps.edp_supported_link_rates_count == 0)) { - /* for DSC enabled case, we search for minimum lane count */ - memset(&initial_link_setting, 0, sizeof(initial_link_setting)); - initial_link_setting.lane_count = LANE_COUNT_ONE; - initial_link_setting.link_rate = LINK_RATE_LOW; - initial_link_setting.link_spread = LINK_SPREAD_DISABLED; - initial_link_setting.use_link_rate_set = false; - initial_link_setting.link_rate_set = 0; - current_link_setting = initial_link_setting; - if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) - return false; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - max_link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - if (policy) { - /* minimize lane */ - if (current_link_setting.link_rate < max_link_rate) { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); - } else { - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - current_link_setting.link_rate = initial_link_setting.link_rate; - } else - break; - } - } else { - /* minimize link rate */ - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); - current_link_setting.lane_count = - initial_link_setting.lane_count; - } - } - } - return false; - } - - /* if optimize edp link is supported */ - memset(&initial_link_setting, 0, sizeof(initial_link_setting)); - initial_link_setting.lane_count = LANE_COUNT_ONE; - initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; - initial_link_setting.link_spread = LINK_SPREAD_DISABLED; - initial_link_setting.use_link_rate_set = true; - initial_link_setting.link_rate_set = 0; - current_link_setting = initial_link_setting; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - max_link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - if (policy) { - /* minimize lane */ - if (current_link_setting.link_rate_set < - link->dpcd_caps.edp_supported_link_rates_count - && current_link_setting.link_rate < max_link_rate) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - } else { - if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - current_link_setting.link_rate_set = initial_link_setting.link_rate_set; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - } else - break; - } - } else { - /* minimize link rate */ - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - current_link_setting.lane_count = - initial_link_setting.lane_count; - } else - break; - } - } - } - return false; -} - -static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) -{ - *link_setting = link->verified_link_cap; - return true; -} - -bool decide_link_settings(struct dc_stream_state *stream, - struct dc_link_settings *link_setting) -{ - struct dc_link *link = stream->link; - uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); - - memset(link_setting, 0, sizeof(*link_setting)); - - /* if preferred is specified through AMDDP, use it, if it's enough - * to drive the mode - */ - if (link->preferred_link_setting.lane_count != - LANE_COUNT_UNKNOWN && - link->preferred_link_setting.link_rate != - LINK_RATE_UNKNOWN) { - *link_setting = link->preferred_link_setting; - return true; - } - - /* MST doesn't perform link training for now - * TODO: add MST specific link training routine - */ - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - decide_mst_link_settings(link, link_setting); - } else if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* enable edp link optimization for DSC eDP case */ - if (stream->timing.flags.DSC) { - enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; - - if (link->panel_config.dsc.force_dsc_edp_policy) { - /* calculate link max link rate cap*/ - struct dc_link_settings tmp_link_setting; - struct dc_crtc_timing tmp_timing = stream->timing; - uint32_t orig_req_bw; - - tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; - tmp_timing.flags.DSC = 0; - orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); - decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); - max_link_rate = tmp_link_setting.link_rate; - } - decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); - } else { - decide_edp_link_settings(link, link_setting, req_bw); - } - } else { - decide_dp_link_settings(link, link_setting, req_bw); - } - - return link_setting->lane_count != LANE_COUNT_UNKNOWN && - link_setting->link_rate != LINK_RATE_UNKNOWN; -} - -/*************************Short Pulse IRQ***************************/ -bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) -{ - /* - * Don't handle RX IRQ unless one of following is met: - * 1) The link is established (cur_link_settings != unknown) - * 2) We know we're dealing with a branch device, SST or MST - */ - - if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - is_dp_branch_device(link)) - return true; - - return false; -} - -static bool handle_hpd_irq_psr_sink(struct dc_link *link) -{ - union dpcd_psr_configuration psr_configuration; - - if (!link->psr_settings.psr_feature_enabled) - return false; - - dm_helpers_dp_read_dpcd( - link->ctx, - link, - 368,/*DpcdAddress_PSR_Enable_Cfg*/ - &psr_configuration.raw, - sizeof(psr_configuration.raw)); - - if (psr_configuration.bits.ENABLE) { - unsigned char dpcdbuf[3] = {0}; - union psr_error_status psr_error_status; - union psr_sink_psr_status psr_sink_psr_status; - - dm_helpers_dp_read_dpcd( - link->ctx, - link, - 0x2006, /*DpcdAddress_PSR_Error_Status*/ - (unsigned char *) dpcdbuf, - sizeof(dpcdbuf)); - - /*DPCD 2006h ERROR STATUS*/ - psr_error_status.raw = dpcdbuf[0]; - /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/ - psr_sink_psr_status.raw = dpcdbuf[2]; - - if (psr_error_status.bits.LINK_CRC_ERROR || - psr_error_status.bits.RFB_STORAGE_ERROR || - psr_error_status.bits.VSC_SDP_ERROR) { - bool allow_active; - - /* Acknowledge and clear error bits */ - dm_helpers_dp_write_dpcd( - link->ctx, - link, - 8198,/*DpcdAddress_PSR_Error_Status*/ - &psr_error_status.raw, - sizeof(psr_error_status.raw)); - - /* PSR error, disable and re-enable PSR */ - if (link->psr_settings.psr_allow_active) { - allow_active = false; - dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); - allow_active = true; - dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); - } - - return true; - } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == - PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){ - /* No error is detect, PSR is active. - * We should return with IRQ_HPD handled without - * checking for loss of sync since PSR would have - * powered down main link. - */ - return true; - } - } - return false; -} - -static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) -{ - switch (test_rate) { - case DP_TEST_LINK_RATE_RBR: - return LINK_RATE_LOW; - case DP_TEST_LINK_RATE_HBR: - return LINK_RATE_HIGH; - case DP_TEST_LINK_RATE_HBR2: - return LINK_RATE_HIGH2; - case DP_TEST_LINK_RATE_HBR3: - return LINK_RATE_HIGH3; - case DP_TEST_LINK_RATE_UHBR10: - return LINK_RATE_UHBR10; - case DP_TEST_LINK_RATE_UHBR20: - return LINK_RATE_UHBR20; - case DP_TEST_LINK_RATE_UHBR13_5: - return LINK_RATE_UHBR13_5; - default: - return LINK_RATE_UNKNOWN; - } -} - -static void dp_test_send_link_training(struct dc_link *link) -{ - struct dc_link_settings link_settings = {0}; - uint8_t test_rate = 0; - - core_link_read_dpcd( - link, - DP_TEST_LANE_COUNT, - (unsigned char *)(&link_settings.lane_count), - 1); - core_link_read_dpcd( - link, - DP_TEST_LINK_RATE, - &test_rate, - 1); - link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); - - /* Set preferred link settings */ - link->verified_link_cap.lane_count = link_settings.lane_count; - link->verified_link_cap.link_rate = link_settings.link_rate; - - dp_retrain_link_dp_test(link, &link_settings, false); -} - -/* TODO Raven hbr2 compliance eye output is unstable - * (toggling on and off) with debugger break - * This caueses intermittent PHY automation failure - * Need to look into the root cause */ -static void dp_test_send_phy_test_pattern(struct dc_link *link) -{ - union phy_test_pattern dpcd_test_pattern; - union lane_adjust dpcd_lane_adjustment[2]; - unsigned char dpcd_post_cursor_2_adjustment = 0; - unsigned char test_pattern_buffer[ - (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - - DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; - unsigned int test_pattern_size = 0; - enum dp_test_pattern test_pattern; - union lane_adjust dpcd_lane_adjust; - unsigned int lane; - struct link_training_settings link_training_settings; - - dpcd_test_pattern.raw = 0; - memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); - memset(&link_training_settings, 0, sizeof(link_training_settings)); - - /* get phy test pattern and pattern parameters from DP receiver */ - core_link_read_dpcd( - link, - DP_PHY_TEST_PATTERN, - &dpcd_test_pattern.raw, - sizeof(dpcd_test_pattern)); - core_link_read_dpcd( - link, - DP_ADJUST_REQUEST_LANE0_1, - &dpcd_lane_adjustment[0].raw, - sizeof(dpcd_lane_adjustment)); - - /* prepare link training settings */ - link_training_settings.link_settings = link->cur_link_settings; - - link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings); - - if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && - link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) - dp_fixed_vs_pe_read_lane_adjust( - link, - link_training_settings.dpcd_lane_settings); - - /*get post cursor 2 parameters - * For DP 1.1a or eariler, this DPCD register's value is 0 - * For DP 1.2 or later: - * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1 - * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3 - */ - core_link_read_dpcd( - link, - DP_ADJUST_REQUEST_POST_CURSOR2, - &dpcd_post_cursor_2_adjustment, - sizeof(dpcd_post_cursor_2_adjustment)); - - /* translate request */ - switch (dpcd_test_pattern.bits.PATTERN) { - case PHY_TEST_PATTERN_D10_2: - test_pattern = DP_TEST_PATTERN_D102; - break; - case PHY_TEST_PATTERN_SYMBOL_ERROR: - test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR; - break; - case PHY_TEST_PATTERN_PRBS7: - test_pattern = DP_TEST_PATTERN_PRBS7; - break; - case PHY_TEST_PATTERN_80BIT_CUSTOM: - test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM; - break; - case PHY_TEST_PATTERN_CP2520_1: - /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? - DP_TEST_PATTERN_TRAINING_PATTERN4 : - DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; - break; - case PHY_TEST_PATTERN_CP2520_2: - /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? - DP_TEST_PATTERN_TRAINING_PATTERN4 : - DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; - break; - case PHY_TEST_PATTERN_CP2520_3: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; - break; - case PHY_TEST_PATTERN_128b_132b_TPS1: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; - break; - case PHY_TEST_PATTERN_128b_132b_TPS2: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; - break; - case PHY_TEST_PATTERN_PRBS9: - test_pattern = DP_TEST_PATTERN_PRBS9; - break; - case PHY_TEST_PATTERN_PRBS11: - test_pattern = DP_TEST_PATTERN_PRBS11; - break; - case PHY_TEST_PATTERN_PRBS15: - test_pattern = DP_TEST_PATTERN_PRBS15; - break; - case PHY_TEST_PATTERN_PRBS23: - test_pattern = DP_TEST_PATTERN_PRBS23; - break; - case PHY_TEST_PATTERN_PRBS31: - test_pattern = DP_TEST_PATTERN_PRBS31; - break; - case PHY_TEST_PATTERN_264BIT_CUSTOM: - test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; - break; - case PHY_TEST_PATTERN_SQUARE_PULSE: - test_pattern = DP_TEST_PATTERN_SQUARE_PULSE; - break; - default: - test_pattern = DP_TEST_PATTERN_VIDEO_MODE; - break; - } - - if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { - test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - - DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; - core_link_read_dpcd( - link, - DP_TEST_80BIT_CUSTOM_PATTERN_7_0, - test_pattern_buffer, - test_pattern_size); - } - - if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) { - test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) - core_link_read_dpcd( - link, - DP_PHY_SQUARE_PATTERN, - test_pattern_buffer, - test_pattern_size); - } - - if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { - test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- - DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; - core_link_read_dpcd( - link, - DP_TEST_264BIT_CUSTOM_PATTERN_7_0, - test_pattern_buffer, - test_pattern_size); - } - - for (lane = 0; lane < - (unsigned int)(link->cur_link_settings.lane_count); - lane++) { - dpcd_lane_adjust.raw = - get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); - if (dp_get_link_encoding_format(&link->cur_link_settings) == - DP_8b_10b_ENCODING) { - link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing) - (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); - link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis) - (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); - link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = - (enum dc_post_cursor2) - ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); - } else if (dp_get_link_encoding_format(&link->cur_link_settings) == - DP_128b_132b_ENCODING) { - link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw = - dpcd_lane_adjust.tx_ffe.PRESET_VALUE; - } - } - - dp_hw_to_dpcd_lane_settings(&link_training_settings, - link_training_settings.hw_lane_settings, - link_training_settings.dpcd_lane_settings); - /*Usage: Measure DP physical lane signal - * by DP SI test equipment automatically. - * PHY test pattern request is generated by equipment via HPD interrupt. - * HPD needs to be active all the time. HPD should be active - * all the time. Do not touch it. - * forward request to DS - */ - dc_link_dp_set_test_pattern( - link, - test_pattern, - DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, - &link_training_settings, - test_pattern_buffer, - test_pattern_size); -} - -static void dp_test_send_link_test_pattern(struct dc_link *link) -{ - union link_test_pattern dpcd_test_pattern; - union test_misc dpcd_test_params; - enum dp_test_pattern test_pattern; - enum dp_test_pattern_color_space test_pattern_color_space = - DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; - enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; - struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; - struct pipe_ctx *pipe_ctx = NULL; - int i; - - memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); - memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); - - for (i = 0; i < MAX_PIPES; i++) { - if (pipes[i].stream == NULL) - continue; - - if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { - pipe_ctx = &pipes[i]; - break; - } - } - - if (pipe_ctx == NULL) - return; - - /* get link test pattern and pattern parameters */ - core_link_read_dpcd( - link, - DP_TEST_PATTERN, - &dpcd_test_pattern.raw, - sizeof(dpcd_test_pattern)); - core_link_read_dpcd( - link, - DP_TEST_MISC0, - &dpcd_test_params.raw, - sizeof(dpcd_test_params)); - - switch (dpcd_test_pattern.bits.PATTERN) { - case LINK_TEST_PATTERN_COLOR_RAMP: - test_pattern = DP_TEST_PATTERN_COLOR_RAMP; - break; - case LINK_TEST_PATTERN_VERTICAL_BARS: - test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; - break; /* black and white */ - case LINK_TEST_PATTERN_COLOR_SQUARES: - test_pattern = (dpcd_test_params.bits.DYN_RANGE == - TEST_DYN_RANGE_VESA ? - DP_TEST_PATTERN_COLOR_SQUARES : - DP_TEST_PATTERN_COLOR_SQUARES_CEA); - break; - default: - test_pattern = DP_TEST_PATTERN_VIDEO_MODE; - break; - } - - if (dpcd_test_params.bits.CLR_FORMAT == 0) - test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; - else - test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? - DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : - DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; - - switch (dpcd_test_params.bits.BPC) { - case 0: // 6 bits - requestColorDepth = COLOR_DEPTH_666; - break; - case 1: // 8 bits - requestColorDepth = COLOR_DEPTH_888; - break; - case 2: // 10 bits - requestColorDepth = COLOR_DEPTH_101010; - break; - case 3: // 12 bits - requestColorDepth = COLOR_DEPTH_121212; - break; - default: - break; - } - - switch (dpcd_test_params.bits.CLR_FORMAT) { - case 0: - pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB; - break; - case 1: - pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422; - break; - case 2: - pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444; - break; - default: - pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB; - break; - } - - - if (requestColorDepth != COLOR_DEPTH_UNDEFINED - && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) { - DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n", - __func__, - pipe_ctx->stream->timing.display_color_depth, - requestColorDepth); - pipe_ctx->stream->timing.display_color_depth = requestColorDepth; - } - - dp_update_dsc_config(pipe_ctx); - - dc_link_dp_set_test_pattern( - link, - test_pattern, - test_pattern_color_space, - NULL, - NULL, - 0); -} - -static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) -{ - union audio_test_mode dpcd_test_mode = {0}; - struct audio_test_pattern_type dpcd_pattern_type = {0}; - union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; - enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; - - struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; - struct pipe_ctx *pipe_ctx = &pipes[0]; - unsigned int channel_count; - unsigned int channel = 0; - unsigned int modes = 0; - unsigned int sampling_rate_in_hz = 0; - - // get audio test mode and test pattern parameters - core_link_read_dpcd( - link, - DP_TEST_AUDIO_MODE, - &dpcd_test_mode.raw, - sizeof(dpcd_test_mode)); - - core_link_read_dpcd( - link, - DP_TEST_AUDIO_PATTERN_TYPE, - &dpcd_pattern_type.value, - sizeof(dpcd_pattern_type)); - - channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); - - // read pattern periods for requested channels when sawTooth pattern is requested - if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || - dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { - - test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? - DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; - // read period for each channel - for (channel = 0; channel < channel_count; channel++) { - core_link_read_dpcd( - link, - DP_TEST_AUDIO_PERIOD_CH1 + channel, - &dpcd_pattern_period[channel].raw, - sizeof(dpcd_pattern_period[channel])); - } - } - - // translate sampling rate - switch (dpcd_test_mode.bits.sampling_rate) { - case AUDIO_SAMPLING_RATE_32KHZ: - sampling_rate_in_hz = 32000; - break; - case AUDIO_SAMPLING_RATE_44_1KHZ: - sampling_rate_in_hz = 44100; - break; - case AUDIO_SAMPLING_RATE_48KHZ: - sampling_rate_in_hz = 48000; - break; - case AUDIO_SAMPLING_RATE_88_2KHZ: - sampling_rate_in_hz = 88200; - break; - case AUDIO_SAMPLING_RATE_96KHZ: - sampling_rate_in_hz = 96000; - break; - case AUDIO_SAMPLING_RATE_176_4KHZ: - sampling_rate_in_hz = 176400; - break; - case AUDIO_SAMPLING_RATE_192KHZ: - sampling_rate_in_hz = 192000; - break; - default: - sampling_rate_in_hz = 0; - break; - } - - link->audio_test_data.flags.test_requested = 1; - link->audio_test_data.flags.disable_video = disable_video; - link->audio_test_data.sampling_rate = sampling_rate_in_hz; - link->audio_test_data.channel_count = channel_count; - link->audio_test_data.pattern_type = test_pattern; - - if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { - for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { - link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; - } - } -} - -void dc_link_dp_handle_automated_test(struct dc_link *link) -{ - union test_request test_request; - union test_response test_response; - - memset(&test_request, 0, sizeof(test_request)); - memset(&test_response, 0, sizeof(test_response)); - - core_link_read_dpcd( - link, - DP_TEST_REQUEST, - &test_request.raw, - sizeof(union test_request)); - if (test_request.bits.LINK_TRAINING) { - /* ACK first to let DP RX test box monitor LT sequence */ - test_response.bits.ACK = 1; - core_link_write_dpcd( - link, - DP_TEST_RESPONSE, - &test_response.raw, - sizeof(test_response)); - dp_test_send_link_training(link); - /* no acknowledge request is needed again */ - test_response.bits.ACK = 0; - } - if (test_request.bits.LINK_TEST_PATTRN) { - dp_test_send_link_test_pattern(link); - test_response.bits.ACK = 1; - } - - if (test_request.bits.AUDIO_TEST_PATTERN) { - dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); - test_response.bits.ACK = 1; - } - - if (test_request.bits.PHY_TEST_PATTERN) { - dp_test_send_phy_test_pattern(link); - test_response.bits.ACK = 1; - } - - /* send request acknowledgment */ - if (test_response.bits.ACK) - core_link_write_dpcd( - link, - DP_TEST_RESPONSE, - &test_response.raw, - sizeof(test_response)); -} - -void dc_link_dp_handle_link_loss(struct dc_link *link) -{ - int i; - struct pipe_ctx *pipe_ctx; - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) - break; - } - - if (pipe_ctx == NULL || pipe_ctx->stream == NULL) - return; - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) - core_link_disable_stream(pipe_ctx); - } - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off - && pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { - // Always use max settings here for DP 1.4a LL Compliance CTS - if (link->is_automated) { - pipe_ctx->link_config.dp_link_settings.lane_count = - link->verified_link_cap.lane_count; - pipe_ctx->link_config.dp_link_settings.link_rate = - link->verified_link_cap.link_rate; - pipe_ctx->link_config.dp_link_settings.link_spread = - link->verified_link_cap.link_spread; - } - core_link_enable_stream(link->dc->current_state, pipe_ctx); - } - } -} - -bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, - bool defer_handling, bool *has_left_work) -{ - union hpd_irq_data hpd_irq_dpcd_data = {0}; - union device_service_irq device_service_clear = {0}; - enum dc_status result; - bool status = false; - - if (out_link_loss) - *out_link_loss = false; - - if (has_left_work) - *has_left_work = false; - /* For use cases related to down stream connection status change, - * PSR and device auto test, refer to function handle_sst_hpd_irq - * in DAL2.1*/ - - DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n", - __func__, link->link_index); - - - /* All the "handle_hpd_irq_xxx()" methods - * should be called only after - * dal_dpsst_ls_read_hpd_irq_data - * Order of calls is important too - */ - result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data); - if (out_hpd_irq_dpcd_data) - *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; - - if (result != DC_OK) { - DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n", - __func__); - return false; - } - - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { - // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC - link->is_automated = true; - device_service_clear.bits.AUTOMATED_TEST = 1; - core_link_write_dpcd( - link, - DP_DEVICE_SERVICE_IRQ_VECTOR, - &device_service_clear.raw, - sizeof(device_service_clear.raw)); - device_service_clear.raw = 0; - if (defer_handling && has_left_work) - *has_left_work = true; - else - dc_link_dp_handle_automated_test(link); - return false; - } - - if (!dc_link_dp_allow_hpd_rx_irq(link)) { - DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", - __func__, link->link_index); - return false; - } - - if (handle_hpd_irq_psr_sink(link)) - /* PSR-related error was detected and handled */ - return true; - - /* If PSR-related error handled, Main link may be off, - * so do not handle as a normal sink status change interrupt. - */ - - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { - if (defer_handling && has_left_work) - *has_left_work = true; - return true; - } - - /* check if we have MST msg and return since we poll for it */ - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - if (defer_handling && has_left_work) - *has_left_work = true; - return false; - } - - /* For now we only handle 'Downstream port status' case. - * If we got sink count changed it means - * Downstream port status changed, - * then DM should call DC to do the detection. - * NOTE: Do not handle link loss on eDP since it is internal link*/ - if ((link->connector_signal != SIGNAL_TYPE_EDP) && - hpd_rx_irq_check_link_loss_status( - link, - &hpd_irq_dpcd_data)) { - /* Connectivity log: link loss */ - CONN_DATA_LINK_LOSS(link, - hpd_irq_dpcd_data.raw, - sizeof(hpd_irq_dpcd_data), - "Status: "); - - if (defer_handling && has_left_work) - *has_left_work = true; - else - dc_link_dp_handle_link_loss(link); - - status = false; - if (out_link_loss) - *out_link_loss = true; - - dp_trace_link_loss_increment(link); - } - - if (link->type == dc_connection_sst_branch && - hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT - != link->dpcd_sink_count) - status = true; - - /* reasons for HPD RX: - * 1. Link Loss - ie Re-train the Link - * 2. MST sideband message - * 3. Automated Test - ie. Internal Commit - * 4. CP (copy protection) - (not interesting for DM???) - * 5. DRR - * 6. Downstream Port status changed - * -ie. Detect - this the only one - * which is interesting for DM because - * it must call dc_link_detect. - */ - return status; -} - -/*query dpcd for version and mst cap addresses*/ -bool is_mst_supported(struct dc_link *link) -{ - bool mst = false; - enum dc_status st = DC_OK; - union dpcd_rev rev; - union mstm_cap cap; - - if (link->preferred_training_settings.mst_enable && - *link->preferred_training_settings.mst_enable == false) { - return false; - } - - rev.raw = 0; - cap.raw = 0; - - st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, - sizeof(rev)); - - if (st == DC_OK && rev.raw >= DPCD_REV_12) { - - st = core_link_read_dpcd(link, DP_MSTM_CAP, - &cap.raw, sizeof(cap)); - if (st == DC_OK && cap.bits.MST_CAP == 1) - mst = true; - } - return mst; - -} - -bool is_dp_active_dongle(const struct dc_link *link) -{ - return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && - (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); -} - -bool is_dp_branch_device(const struct dc_link *link) -{ - return link->dpcd_caps.is_branch_dev; -} - -static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) -{ - switch (bpc) { - case DOWN_STREAM_MAX_8BPC: - return 8; - case DOWN_STREAM_MAX_10BPC: - return 10; - case DOWN_STREAM_MAX_12BPC: - return 12; - case DOWN_STREAM_MAX_16BPC: - return 16; - default: - break; - } - - return -1; -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) -{ - switch (bw) { - case 0b001: - return 9000000; - case 0b010: - return 18000000; - case 0b011: - return 24000000; - case 0b100: - return 32000000; - case 0b101: - return 40000000; - case 0b110: - return 48000000; - } - - return 0; -} - -/* - * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. - */ -static uint32_t intersect_frl_link_bw_support( - const uint32_t max_supported_frl_bw_in_kbps, - const union hdmi_encoded_link_bw hdmi_encoded_link_bw) -{ - uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; - - // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) - if (hdmi_encoded_link_bw.bits.FRL_MODE) { - if (hdmi_encoded_link_bw.bits.BW_48Gbps) - supported_bw_in_kbps = 48000000; - else if (hdmi_encoded_link_bw.bits.BW_40Gbps) - supported_bw_in_kbps = 40000000; - else if (hdmi_encoded_link_bw.bits.BW_32Gbps) - supported_bw_in_kbps = 32000000; - else if (hdmi_encoded_link_bw.bits.BW_24Gbps) - supported_bw_in_kbps = 24000000; - else if (hdmi_encoded_link_bw.bits.BW_18Gbps) - supported_bw_in_kbps = 18000000; - else if (hdmi_encoded_link_bw.bits.BW_9Gbps) - supported_bw_in_kbps = 9000000; - } - - return supported_bw_in_kbps; -} -#endif - -static void read_dp_device_vendor_id(struct dc_link *link) -{ - struct dp_device_vendor_id dp_id; - - /* read IEEE branch device id */ - core_link_read_dpcd( - link, - DP_BRANCH_OUI, - (uint8_t *)&dp_id, - sizeof(dp_id)); - - link->dpcd_caps.branch_dev_id = - (dp_id.ieee_oui[0] << 16) + - (dp_id.ieee_oui[1] << 8) + - dp_id.ieee_oui[2]; - - memmove( - link->dpcd_caps.branch_dev_name, - dp_id.ieee_device_id, - sizeof(dp_id.ieee_device_id)); -} - - - -static void get_active_converter_info( - uint8_t data, struct dc_link *link) -{ - union dp_downstream_port_present ds_port = { .byte = data }; - memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); - - /* decode converter info*/ - if (!ds_port.fields.PORT_PRESENT) { - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; - ddc_service_set_dongle_type(link->ddc, - link->dpcd_caps.dongle_type); - link->dpcd_caps.is_branch_dev = false; - return; - } - - /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ - link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; - - switch (ds_port.fields.PORT_TYPE) { - case DOWNSTREAM_VGA: - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; - break; - case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: - /* At this point we don't know is it DVI or HDMI or DP++, - * assume DVI.*/ - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; - break; - default: - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; - break; - } - - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { - uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ - union dwnstream_port_caps_byte0 *port_caps = - (union dwnstream_port_caps_byte0 *)det_caps; - if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, - det_caps, sizeof(det_caps)) == DC_OK) { - - switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { - /*Handle DP case as DONGLE_NONE*/ - case DOWN_STREAM_DETAILED_DP: - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; - break; - case DOWN_STREAM_DETAILED_VGA: - link->dpcd_caps.dongle_type = - DISPLAY_DONGLE_DP_VGA_CONVERTER; - break; - case DOWN_STREAM_DETAILED_DVI: - link->dpcd_caps.dongle_type = - DISPLAY_DONGLE_DP_DVI_CONVERTER; - break; - case DOWN_STREAM_DETAILED_HDMI: - case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: - /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ - link->dpcd_caps.dongle_type = - DISPLAY_DONGLE_DP_HDMI_CONVERTER; - - link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; - if (ds_port.fields.DETAILED_CAPS) { - - union dwnstream_port_caps_byte3_hdmi - hdmi_caps = {.raw = det_caps[3] }; - union dwnstream_port_caps_byte2 - hdmi_color_caps = {.raw = det_caps[2] }; - link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = - det_caps[1] * 2500; - - link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = - hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; - /*YCBCR capability only for HDMI case*/ - if (port_caps->bits.DWN_STRM_PORTX_TYPE - == DOWN_STREAM_DETAILED_HDMI) { - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = - hdmi_caps.bits.YCrCr422_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = - hdmi_caps.bits.YCrCr420_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = - hdmi_caps.bits.YCrCr422_CONVERSION; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = - hdmi_caps.bits.YCrCr420_CONVERSION; - } - - link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = - translate_dpcd_max_bpc( - hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (link->dc->caps.dp_hdmi21_pcon_support) { - union hdmi_encoded_link_bw hdmi_encoded_link_bw; - - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = - dc_link_bw_kbps_from_raw_frl_link_rate_data( - hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); - - // Intersect reported max link bw support with the supported link rate post FRL link training - if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, - &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, - hdmi_encoded_link_bw); - } - - if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) - link->dpcd_caps.dongle_caps.extendedCapValid = true; - } -#endif - - if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) - link->dpcd_caps.dongle_caps.extendedCapValid = true; - } - - break; - } - } - } - - ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); - - { - struct dp_sink_hw_fw_revision dp_hw_fw_revision; - - core_link_read_dpcd( - link, - DP_BRANCH_REVISION_START, - (uint8_t *)&dp_hw_fw_revision, - sizeof(dp_hw_fw_revision)); - - link->dpcd_caps.branch_hw_revision = - dp_hw_fw_revision.ieee_hw_rev; - - memmove( - link->dpcd_caps.branch_fw_revision, - dp_hw_fw_revision.ieee_fw_rev, - sizeof(dp_hw_fw_revision.ieee_fw_rev)); - } - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && - link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { - union dp_dfp_cap_ext dfp_cap_ext; - memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); - core_link_read_dpcd( - link, - DP_DFP_CAPABILITY_EXTENSION_SUPPORT, - dfp_cap_ext.raw, - sizeof(dfp_cap_ext.raw)); - link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; - link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = - dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + - (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); - link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = - dfp_cap_ext.fields.max_video_h_active_width[0] + - (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); - link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = - dfp_cap_ext.fields.max_video_v_active_height[0] + - (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); - link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = - dfp_cap_ext.fields.encoding_format_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = - dfp_cap_ext.fields.rgb_color_depth_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = - dfp_cap_ext.fields.ycbcr444_color_depth_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = - dfp_cap_ext.fields.ycbcr422_color_depth_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = - dfp_cap_ext.fields.ycbcr420_color_depth_caps; - DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); - DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); - DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); - DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); - DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); - } -} - -static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, - int length) -{ - int retry = 0; - - if (!link->dpcd_caps.dpcd_rev.raw) { - do { - dp_receiver_power_ctrl(link, true); - core_link_read_dpcd(link, DP_DPCD_REV, - dpcd_data, length); - link->dpcd_caps.dpcd_rev.raw = dpcd_data[ - DP_DPCD_REV - - DP_DPCD_REV]; - } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); - } - - if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { - switch (link->dpcd_caps.branch_dev_id) { - /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down - * all internal circuits including AUX communication preventing - * reading DPCD table and EDID (spec violation). - * Encoder will skip DP RX power down on disable_output to - * keep receiver powered all the time.*/ - case DP_BRANCH_DEVICE_ID_0010FA: - case DP_BRANCH_DEVICE_ID_0080E1: - case DP_BRANCH_DEVICE_ID_00E04C: - link->wa_flags.dp_keep_receiver_powered = true; - break; - - /* TODO: May need work around for other dongles. */ - default: - link->wa_flags.dp_keep_receiver_powered = false; - break; - } - } else - link->wa_flags.dp_keep_receiver_powered = false; -} - -/* Read additional sink caps defined in source specific DPCD area - * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) - */ -static bool dpcd_read_sink_ext_caps(struct dc_link *link) -{ - uint8_t dpcd_data; - - if (!link) - return false; - - if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) - return false; - - link->dpcd_sink_ext_caps.raw = dpcd_data; - return true; -} - -enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) -{ - uint8_t lttpr_dpcd_data[8]; - enum dc_status status = DC_ERROR_UNEXPECTED; - bool is_lttpr_present = false; - - /* Logic to determine LTTPR support*/ - bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; - - if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) - return false; - - /* By reading LTTPR capability, RX assumes that we will enable - * LTTPR extended aux timeout if LTTPR is present. - */ - status = core_link_read_dpcd(link, - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, - lttpr_dpcd_data, - sizeof(lttpr_dpcd_data)); - - link->dpcd_caps.lttpr_caps.revision.raw = - lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.max_link_rate = - lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.phy_repeater_cnt = - lttpr_dpcd_data[DP_PHY_REPEATER_CNT - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.max_lane_count = - lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.mode = - lttpr_dpcd_data[DP_PHY_REPEATER_MODE - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.max_ext_timeout = - lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = - lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = - lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - /* If this chip cap is set, at least one retimer must exist in the chain - * Override count to 1 if we receive a known bad count (0 or an invalid value) - */ - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && - (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { - ASSERT(0); - link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; - DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - } - - /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ - is_lttpr_present = dp_is_lttpr_present(link); - - if (is_lttpr_present) - CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); - - DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); - return status; -} - -bool dp_is_lttpr_present(struct dc_link *link) -{ - return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && - link->dpcd_caps.lttpr_caps.max_lane_count > 0 && - link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && - link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); -} - -enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting) -{ - enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting); - - if (encoding == DP_8b_10b_ENCODING) - return dp_decide_8b_10b_lttpr_mode(link); - else if (encoding == DP_128b_132b_ENCODING) - return dp_decide_128b_132b_lttpr_mode(link); - - ASSERT(0); - return LTTPR_MODE_NON_LTTPR; -} - -void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) -{ - if (!dp_is_lttpr_present(link)) - return; - - if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { - *override = LTTPR_MODE_TRANSPARENT; - } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { - *override = LTTPR_MODE_NON_TRANSPARENT; - } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { - *override = LTTPR_MODE_NON_LTTPR; - } - DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); -} - -enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) -{ - bool is_lttpr_present = dp_is_lttpr_present(link); - bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; - bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; - - if (!is_lttpr_present) - return LTTPR_MODE_NON_LTTPR; - - if (vbios_lttpr_aware) { - if (vbios_lttpr_force_non_transparent) { - DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); - return LTTPR_MODE_NON_TRANSPARENT; - } else { - DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); - return LTTPR_MODE_TRANSPARENT; - } - } - - if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && - link->dc->caps.extended_aux_timeout_support) { - DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); - return LTTPR_MODE_NON_TRANSPARENT; - } - - DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); - return LTTPR_MODE_NON_LTTPR; -} - -enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) -{ - enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; - - if (dp_is_lttpr_present(link)) - mode = LTTPR_MODE_NON_TRANSPARENT; - - DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); - return mode; -} - -static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) -{ - union dmub_rb_cmd cmd; - - if (!link->ctx->dmub_srv || - link->ep_type != DISPLAY_ENDPOINT_PHY || - link->link_enc->features.flags.bits.DP_IS_USB_C == 0) - return false; - - memset(&cmd, 0, sizeof(cmd)); - cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; - cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); - cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( - link->dc, link->link_enc->transmitter); - if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && - cmd.cable_id.header.ret_status == 1) { - cable_id->raw = cmd.cable_id.data.output_raw; - DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); - } - return cmd.cable_id.header.ret_status == 1; -} - -static union dp_cable_id intersect_cable_id( - union dp_cable_id *a, union dp_cable_id *b) -{ - union dp_cable_id out; - - out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, - b->bits.UHBR10_20_CAPABILITY); - out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, - b->bits.UHBR13_5_CAPABILITY); - out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); - - return out; -} - -static void retrieve_cable_id(struct dc_link *link) -{ - union dp_cable_id usbc_cable_id; - - link->dpcd_caps.cable_id.raw = 0; - core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, - &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); - - if (get_usbc_cable_id(link, &usbc_cable_id)) - link->dpcd_caps.cable_id = intersect_cable_id( - &link->dpcd_caps.cable_id, &usbc_cable_id); -} - -static enum dc_status wake_up_aux_channel(struct dc_link *link) -{ - enum dc_status status = DC_ERROR_UNEXPECTED; - uint32_t aux_channel_retry_cnt = 0; - uint8_t dpcd_power_state = '\0'; - - while (status != DC_OK && aux_channel_retry_cnt < 10) { - status = core_link_read_dpcd(link, DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - - /* Delay 1 ms if AUX CH is in power down state. Based on spec - * section 2.3.1.2, if AUX CH may be powered down due to - * write to DPCD 600h = 2. Sink AUX CH is monitoring differential - * signal and may need up to 1 ms before being able to reply. - */ - if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { - udelay(1000); - aux_channel_retry_cnt++; - } - } - - if (status != DC_OK) { - dpcd_power_state = DP_SET_POWER_D0; - status = core_link_write_dpcd( - link, - DP_SET_POWER, - &dpcd_power_state, - sizeof(dpcd_power_state)); - - dpcd_power_state = DP_SET_POWER_D3; - status = core_link_write_dpcd( - link, - DP_SET_POWER, - &dpcd_power_state, - sizeof(dpcd_power_state)); - return DC_ERROR_UNEXPECTED; - } - - return DC_OK; -} - -static bool retrieve_link_cap(struct dc_link *link) -{ - /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, - * which means size 16 will be good for both of those DPCD register block reads - */ - uint8_t dpcd_data[16]; - /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. - */ - uint8_t dpcd_dprx_data = '\0'; - - struct dp_device_vendor_id sink_id; - union down_stream_port_count down_strm_port_count; - union edp_configuration_cap edp_config_cap; - union dp_downstream_port_present ds_port = { 0 }; - enum dc_status status = DC_ERROR_UNEXPECTED; - uint32_t read_dpcd_retry_cnt = 3; - int i; - struct dp_sink_hw_fw_revision dp_hw_fw_revision; - const uint32_t post_oui_delay = 30; // 30ms - - memset(dpcd_data, '\0', sizeof(dpcd_data)); - memset(&down_strm_port_count, - '\0', sizeof(union down_stream_port_count)); - memset(&edp_config_cap, '\0', - sizeof(union edp_configuration_cap)); - - /* if extended timeout is supported in hardware, - * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer - * CTS 4.2.1.1 regression introduced by CTS specs requirement update. - */ - dc_link_aux_try_to_configure_timeout(link->ddc, - LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); - - status = dp_retrieve_lttpr_cap(link); - - if (status != DC_OK) { - status = wake_up_aux_channel(link); - if (status == DC_OK) - dp_retrieve_lttpr_cap(link); - else - return false; - } - - if (dp_is_lttpr_present(link)) - configure_lttpr_mode_transparent(link); - - /* Read DP tunneling information. */ - status = dpcd_get_tunneling_device_data(link); - - dpcd_set_source_specific_data(link); - /* Sink may need to configure internals based on vendor, so allow some - * time before proceeding with possibly vendor specific transactions - */ - msleep(post_oui_delay); - - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); - if (status == DC_OK) - break; - } - - if (status != DC_OK) { - dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); - return false; - } - - if (!dp_is_lttpr_present(link)) - dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); - - { - union training_aux_rd_interval aux_rd_interval; - - aux_rd_interval.raw = - dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; - - link->dpcd_caps.ext_receiver_cap_field_present = - aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; - - if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { - uint8_t ext_cap_data[16]; - - memset(ext_cap_data, '\0', sizeof(ext_cap_data)); - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DP13_DPCD_REV, - ext_cap_data, - sizeof(ext_cap_data)); - if (status == DC_OK) { - memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); - break; - } - } - if (status != DC_OK) - dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); - } - } - - link->dpcd_caps.dpcd_rev.raw = - dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; - - if (link->dpcd_caps.ext_receiver_cap_field_present) { - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPRX_FEATURE_ENUMERATION_LIST, - &dpcd_dprx_data, - sizeof(dpcd_dprx_data)); - if (status == DC_OK) - break; - } - - link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; - - if (status != DC_OK) - dm_error("%s: Read DPRX caps data failed.\n", __func__); - } - - else { - link->dpcd_caps.dprx_feature.raw = 0; - } - - - /* Error condition checking... - * It is impossible for Sink to report Max Lane Count = 0. - * It is possible for Sink to report Max Link Rate = 0, if it is - * an eDP device that is reporting specialized link rates in the - * SUPPORTED_LINK_RATE table. - */ - if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) - return false; - - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - - read_dp_device_vendor_id(link); - - /* TODO - decouple raw mst capability from policy decision */ - link->dpcd_caps.is_mst_capable = is_mst_supported(link); - - get_active_converter_info(ds_port.byte, link); - - dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); - - down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - - DP_DPCD_REV]; - - link->dpcd_caps.allow_invalid_MSA_timing_param = - down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; - - link->dpcd_caps.max_ln_count.raw = dpcd_data[ - DP_MAX_LANE_COUNT - DP_DPCD_REV]; - - link->dpcd_caps.max_down_spread.raw = dpcd_data[ - DP_MAX_DOWNSPREAD - DP_DPCD_REV]; - - link->reported_link_cap.lane_count = - link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; - link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( - dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); - link->reported_link_cap.link_spread = - link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; - - edp_config_cap.raw = dpcd_data[ - DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; - link->dpcd_caps.panel_mode_edp = - edp_config_cap.bits.ALT_SCRAMBLER_RESET; - link->dpcd_caps.dpcd_display_control_capable = - edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; - link->dpcd_caps.channel_coding_cap.raw = - dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; - link->test_pattern_enabled = false; - link->compliance_test_state.raw = 0; - - /* read sink count */ - core_link_read_dpcd(link, - DP_SINK_COUNT, - &link->dpcd_caps.sink_count.raw, - sizeof(link->dpcd_caps.sink_count.raw)); - - /* read sink ieee oui */ - core_link_read_dpcd(link, - DP_SINK_OUI, - (uint8_t *)(&sink_id), - sizeof(sink_id)); - - link->dpcd_caps.sink_dev_id = - (sink_id.ieee_oui[0] << 16) + - (sink_id.ieee_oui[1] << 8) + - (sink_id.ieee_oui[2]); - - memmove( - link->dpcd_caps.sink_dev_id_str, - sink_id.ieee_device_id, - sizeof(sink_id.ieee_device_id)); - - /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ - { - uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; - - if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && - !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, - sizeof(str_mbp_2017))) { - link->reported_link_cap.link_rate = 0x0c; - } - } - - core_link_read_dpcd( - link, - DP_SINK_HW_REVISION_START, - (uint8_t *)&dp_hw_fw_revision, - sizeof(dp_hw_fw_revision)); - - link->dpcd_caps.sink_hw_revision = - dp_hw_fw_revision.ieee_hw_rev; - - memmove( - link->dpcd_caps.sink_fw_revision, - dp_hw_fw_revision.ieee_fw_rev, - sizeof(dp_hw_fw_revision.ieee_fw_rev)); - - /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ - { - uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; - uint8_t fwrev_mbp_2018[] = { 7, 4 }; - uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; - - /* We also check for the firmware revision as 16,1 models have an - * identical device id and are incorrectly quirked otherwise. - */ - if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && - !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, - sizeof(str_mbp_2018)) && - (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, - sizeof(fwrev_mbp_2018)) || - !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, - sizeof(fwrev_mbp_2018_vega)))) { - link->reported_link_cap.link_rate = LINK_RATE_RBR2; - } - } - - memset(&link->dpcd_caps.dsc_caps, '\0', - sizeof(link->dpcd_caps.dsc_caps)); - memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); - /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { - status = core_link_read_dpcd( - link, - DP_FEC_CAPABILITY, - &link->dpcd_caps.fec_cap.raw, - sizeof(link->dpcd_caps.fec_cap.raw)); - status = core_link_read_dpcd( - link, - DP_DSC_SUPPORT, - link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, - sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); - if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { - status = core_link_read_dpcd( - link, - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); - DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); - DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); - DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); - DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); - } - - /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode - * only if required. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && - link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && - link->dpcd_caps.is_branch_dev && - link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && - link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && - (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || - link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { - /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. - * Clear FEC and DSC capabilities as a work around if that is not the case. - */ - link->wa_flags.dpia_forced_tbt3_mode = true; - memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); - memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); - DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); - } else - link->wa_flags.dpia_forced_tbt3_mode = false; - } - - if (!dpcd_read_sink_ext_caps(link)) - link->dpcd_sink_ext_caps.raw = 0; - - if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { - DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); - - core_link_read_dpcd(link, - DP_128b_132b_SUPPORTED_LINK_RATES, - &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, - sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); - if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) - link->reported_link_cap.link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) - link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) - link->reported_link_cap.link_rate = LINK_RATE_UHBR10; - else - dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); - DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); - DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", - link->reported_link_cap.link_rate / 100, - link->reported_link_cap.link_rate % 100); - - core_link_read_dpcd(link, - DP_SINK_VIDEO_FALLBACK_FORMATS, - &link->dpcd_caps.fallback_formats.raw, - sizeof(link->dpcd_caps.fallback_formats.raw)); - DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); - if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) - DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); - if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) - DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); - if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) - DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); - if (link->dpcd_caps.fallback_formats.raw == 0) { - DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); - link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; - } - - core_link_read_dpcd(link, - DP_FEC_CAPABILITY_1, - &link->dpcd_caps.fec_cap1.raw, - sizeof(link->dpcd_caps.fec_cap1.raw)); - DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); - if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) - DC_LOG_DP2("\tFEC aggregated error counters are supported"); - } - - retrieve_cable_id(link); - dpcd_write_cable_id_to_dprx(link); - - /* Connectivity log: detection */ - CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); - - return true; -} - -bool dp_overwrite_extended_receiver_cap(struct dc_link *link) -{ - uint8_t dpcd_data[16]; - uint32_t read_dpcd_retry_cnt = 3; - enum dc_status status = DC_ERROR_UNEXPECTED; - union dp_downstream_port_present ds_port = { 0 }; - union down_stream_port_count down_strm_port_count; - union edp_configuration_cap edp_config_cap; - - int i; - - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); - if (status == DC_OK) - break; - } - - link->dpcd_caps.dpcd_rev.raw = - dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; - - if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) - return false; - - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - - get_active_converter_info(ds_port.byte, link); - - down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - - DP_DPCD_REV]; - - link->dpcd_caps.allow_invalid_MSA_timing_param = - down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; - - link->dpcd_caps.max_ln_count.raw = dpcd_data[ - DP_MAX_LANE_COUNT - DP_DPCD_REV]; - - link->dpcd_caps.max_down_spread.raw = dpcd_data[ - DP_MAX_DOWNSPREAD - DP_DPCD_REV]; - - link->reported_link_cap.lane_count = - link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; - link->reported_link_cap.link_rate = dpcd_data[ - DP_MAX_LINK_RATE - DP_DPCD_REV]; - link->reported_link_cap.link_spread = - link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; - - edp_config_cap.raw = dpcd_data[ - DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; - link->dpcd_caps.panel_mode_edp = - edp_config_cap.bits.ALT_SCRAMBLER_RESET; - link->dpcd_caps.dpcd_display_control_capable = - edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; - - return true; -} - -bool detect_dp_sink_caps(struct dc_link *link) -{ - return retrieve_link_cap(link); -} - -static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) -{ - enum dc_link_rate link_rate; - // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. - switch (link_rate_in_khz) { - case 1620000: - link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane - break; - case 2160000: - link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane - break; - case 2430000: - link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane - break; - case 2700000: - link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane - break; - case 3240000: - link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2) - 3.24 Gbps/Lane - break; - case 4320000: - link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane - break; - case 5400000: - link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2) - 5.40 Gbps/Lane - break; - case 8100000: - link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3) - 8.10 Gbps/Lane - break; - default: - link_rate = LINK_RATE_UNKNOWN; - break; - } - return link_rate; -} - -void detect_edp_sink_caps(struct dc_link *link) -{ - uint8_t supported_link_rates[16]; - uint32_t entry; - uint32_t link_rate_in_khz; - enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; - uint8_t backlight_adj_cap; - uint8_t general_edp_cap; - - retrieve_link_cap(link); - link->dpcd_caps.edp_supported_link_rates_count = 0; - memset(supported_link_rates, 0, sizeof(supported_link_rates)); - - /* - * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. - * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" - */ - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && - (link->panel_config.ilr.optimize_edp_link_rate || - link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { - // Read DPCD 00010h - 0001Fh 16 bytes at one shot - core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, - supported_link_rates, sizeof(supported_link_rates)); - - for (entry = 0; entry < 16; entry += 2) { - // DPCD register reports per-lane link rate = 16-bit link rate capability - // value X 200 kHz. Need multiplier to find link rate in kHz. - link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + - supported_link_rates[entry]) * 200; - - if (link_rate_in_khz != 0) { - link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); - link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; - link->dpcd_caps.edp_supported_link_rates_count++; - - if (link->reported_link_cap.link_rate < link_rate) - link->reported_link_cap.link_rate = link_rate; - } - } - } - core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, - &backlight_adj_cap, sizeof(backlight_adj_cap)); - - link->dpcd_caps.dynamic_backlight_capable_edp = - (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; - - core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, - &general_edp_cap, sizeof(general_edp_cap)); - - link->dpcd_caps.set_power_state_capable_edp = - (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; - - dc_link_set_default_brightness_aux(link); - - core_link_read_dpcd(link, DP_EDP_DPCD_REV, - &link->dpcd_caps.edp_rev, - sizeof(link->dpcd_caps.edp_rev)); - /* - * PSR is only valid for eDP v1.3 or higher. - */ - if (link->dpcd_caps.edp_rev >= DP_EDP_13) { - core_link_read_dpcd(link, DP_PSR_SUPPORT, - &link->dpcd_caps.psr_info.psr_version, - sizeof(link->dpcd_caps.psr_info.psr_version)); - if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) - core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, - &link->dpcd_caps.psr_info.force_psrsu_cap, - sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); - core_link_read_dpcd(link, DP_PSR_CAPS, - &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, - sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); - if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { - core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, - &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, - sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); - } - } - - /* - * ALPM is only valid for eDP v1.4 or higher. - */ - if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) - core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, - &link->dpcd_caps.alpm_caps.raw, - sizeof(link->dpcd_caps.alpm_caps.raw)); -} - -void dc_link_dp_enable_hpd(const struct dc_link *link) -{ - struct link_encoder *encoder = link->link_enc; - - if (encoder != NULL && encoder->funcs->enable_hpd != NULL) - encoder->funcs->enable_hpd(encoder); -} - -void dc_link_dp_disable_hpd(const struct dc_link *link) -{ - struct link_encoder *encoder = link->link_enc; - - if (encoder != NULL && encoder->funcs->enable_hpd != NULL) - encoder->funcs->disable_hpd(encoder); -} - -static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) -{ - if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && - test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || - test_pattern == DP_TEST_PATTERN_VIDEO_MODE) - return true; - else - return false; -} - -static void set_crtc_test_pattern(struct dc_link *link, - struct pipe_ctx *pipe_ctx, - enum dp_test_pattern test_pattern, - enum dp_test_pattern_color_space test_pattern_color_space) -{ - enum controller_dp_test_pattern controller_test_pattern; - enum dc_color_depth color_depth = pipe_ctx-> - stream->timing.display_color_depth; - struct bit_depth_reduction_params params; - struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; - int width = pipe_ctx->stream->timing.h_addressable + - pipe_ctx->stream->timing.h_border_left + - pipe_ctx->stream->timing.h_border_right; - int height = pipe_ctx->stream->timing.v_addressable + - pipe_ctx->stream->timing.v_border_bottom + - pipe_ctx->stream->timing.v_border_top; - - memset(¶ms, 0, sizeof(params)); - - switch (test_pattern) { - case DP_TEST_PATTERN_COLOR_SQUARES: - controller_test_pattern = - CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; - break; - case DP_TEST_PATTERN_COLOR_SQUARES_CEA: - controller_test_pattern = - CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA; - break; - case DP_TEST_PATTERN_VERTICAL_BARS: - controller_test_pattern = - CONTROLLER_DP_TEST_PATTERN_VERTICALBARS; - break; - case DP_TEST_PATTERN_HORIZONTAL_BARS: - controller_test_pattern = - CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS; - break; - case DP_TEST_PATTERN_COLOR_RAMP: - controller_test_pattern = - CONTROLLER_DP_TEST_PATTERN_COLORRAMP; - break; - default: - controller_test_pattern = - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; - break; - } - - switch (test_pattern) { - case DP_TEST_PATTERN_COLOR_SQUARES: - case DP_TEST_PATTERN_COLOR_SQUARES_CEA: - case DP_TEST_PATTERN_VERTICAL_BARS: - case DP_TEST_PATTERN_HORIZONTAL_BARS: - case DP_TEST_PATTERN_COLOR_RAMP: - { - /* disable bit depth reduction */ - pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - controller_test_pattern, color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; - enum controller_dp_color_space controller_color_space; - int opp_cnt = 1; - int offset = 0; - int dpg_width = width; - - switch (test_pattern_color_space) { - case DP_TEST_PATTERN_COLOR_SPACE_RGB: - controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; - break; - case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: - controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; - break; - case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: - controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; - break; - case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: - default: - controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; - DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); - ASSERT(0); - break; - } - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - dpg_width = width / opp_cnt; - offset = dpg_width; - - link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - dpg_width, - height, - 0); - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - - odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); - link->dc->hwss.set_disp_pattern_generator(link->dc, - odm_pipe, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - dpg_width, - height, - offset); - offset += offset; - } - } - } - break; - case DP_TEST_PATTERN_VIDEO_MODE: - { - /* restore bitdepth reduction */ - resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); - pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - int dpg_width; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - dpg_width = width / opp_cnt; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - - odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); - link->dc->hwss.set_disp_pattern_generator(link->dc, - odm_pipe, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - CONTROLLER_DP_COLOR_SPACE_UDEFINED, - color_depth, - NULL, - dpg_width, - height, - 0); - } - link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - CONTROLLER_DP_COLOR_SPACE_UDEFINED, - color_depth, - NULL, - dpg_width, - height, - 0); - } - } - break; - - default: - break; - } -} - -bool dc_link_dp_set_test_pattern( - struct dc_link *link, - enum dp_test_pattern test_pattern, - enum dp_test_pattern_color_space test_pattern_color_space, - const struct link_training_settings *p_link_settings, - const unsigned char *p_custom_pattern, - unsigned int cust_pattern_size) -{ - struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; - struct pipe_ctx *pipe_ctx = NULL; - unsigned int lane; - unsigned int i; - unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0}; - union dpcd_training_pattern training_pattern; - enum dpcd_phy_test_patterns pattern; - - memset(&training_pattern, 0, sizeof(training_pattern)); - - for (i = 0; i < MAX_PIPES; i++) { - if (pipes[i].stream == NULL) - continue; - - if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { - pipe_ctx = &pipes[i]; - break; - } - } - - if (pipe_ctx == NULL) - return false; - - /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ - if (link->test_pattern_enabled && test_pattern == - DP_TEST_PATTERN_VIDEO_MODE) { - /* Set CRTC Test Pattern */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); - dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, - (uint8_t *)p_custom_pattern, - (uint32_t)cust_pattern_size); - - /* Unblank Stream */ - link->dc->hwss.unblank_stream( - pipe_ctx, - &link->verified_link_cap); - /* TODO:m_pHwss->MuteAudioEndpoint - * (pPathMode->pDisplayPath, false); - */ - - /* Reset Test Pattern state */ - link->test_pattern_enabled = false; - - return true; - } - - /* Check for PHY Test Patterns */ - if (is_dp_phy_pattern(test_pattern)) { - /* Set DPCD Lane Settings before running test pattern */ - if (p_link_settings != NULL) { - if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && - p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { - dp_fixed_vs_pe_set_retimer_lane_settings( - link, - p_link_settings->dpcd_lane_settings, - p_link_settings->link_settings.lane_count); - } else { - dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); - } - dpcd_set_lane_settings(link, p_link_settings, DPRX); - } - - /* Blank stream if running test pattern */ - if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { - /*TODO: - * m_pHwss-> - * MuteAudioEndpoint(pPathMode->pDisplayPath, true); - */ - /* Blank stream */ - pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); - } - - dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, - (uint8_t *)p_custom_pattern, - (uint32_t)cust_pattern_size); - - if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { - /* Set Test Pattern state */ - link->test_pattern_enabled = true; - if (p_link_settings != NULL) - dpcd_set_link_settings(link, - p_link_settings); - } - - switch (test_pattern) { - case DP_TEST_PATTERN_VIDEO_MODE: - pattern = PHY_TEST_PATTERN_NONE; - break; - case DP_TEST_PATTERN_D102: - pattern = PHY_TEST_PATTERN_D10_2; - break; - case DP_TEST_PATTERN_SYMBOL_ERROR: - pattern = PHY_TEST_PATTERN_SYMBOL_ERROR; - break; - case DP_TEST_PATTERN_PRBS7: - pattern = PHY_TEST_PATTERN_PRBS7; - break; - case DP_TEST_PATTERN_80BIT_CUSTOM: - pattern = PHY_TEST_PATTERN_80BIT_CUSTOM; - break; - case DP_TEST_PATTERN_CP2520_1: - pattern = PHY_TEST_PATTERN_CP2520_1; - break; - case DP_TEST_PATTERN_CP2520_2: - pattern = PHY_TEST_PATTERN_CP2520_2; - break; - case DP_TEST_PATTERN_CP2520_3: - pattern = PHY_TEST_PATTERN_CP2520_3; - break; - case DP_TEST_PATTERN_128b_132b_TPS1: - pattern = PHY_TEST_PATTERN_128b_132b_TPS1; - break; - case DP_TEST_PATTERN_128b_132b_TPS2: - pattern = PHY_TEST_PATTERN_128b_132b_TPS2; - break; - case DP_TEST_PATTERN_PRBS9: - pattern = PHY_TEST_PATTERN_PRBS9; - break; - case DP_TEST_PATTERN_PRBS11: - pattern = PHY_TEST_PATTERN_PRBS11; - break; - case DP_TEST_PATTERN_PRBS15: - pattern = PHY_TEST_PATTERN_PRBS15; - break; - case DP_TEST_PATTERN_PRBS23: - pattern = PHY_TEST_PATTERN_PRBS23; - break; - case DP_TEST_PATTERN_PRBS31: - pattern = PHY_TEST_PATTERN_PRBS31; - break; - case DP_TEST_PATTERN_264BIT_CUSTOM: - pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; - break; - case DP_TEST_PATTERN_SQUARE_PULSE: - pattern = PHY_TEST_PATTERN_SQUARE_PULSE; - break; - default: - return false; - } - - if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE - /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/) - return false; - - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) - core_link_write_dpcd(link, - DP_LINK_SQUARE_PATTERN, - p_custom_pattern, - 1); - -#endif - /* tell receiver that we are sending qualification - * pattern DP 1.2 or later - DP receiver's link quality - * pattern is set using DPCD LINK_QUAL_LANEx_SET - * register (0x10B~0x10E)\ - */ - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) - link_qual_pattern[lane] = - (unsigned char)(pattern); - - core_link_write_dpcd(link, - DP_LINK_QUAL_LANE0_SET, - link_qual_pattern, - sizeof(link_qual_pattern)); - } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 || - link->dpcd_caps.dpcd_rev.raw == 0) { - /* tell receiver that we are sending qualification - * pattern DP 1.1a or earlier - DP receiver's link - * quality pattern is set using - * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET - * register (0x102). We will use v_1.3 when we are - * setting test pattern for DP 1.1. - */ - core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET, - &training_pattern.raw, - sizeof(training_pattern)); - training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern; - core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET, - &training_pattern.raw, - sizeof(training_pattern)); - } - } else { - enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; - - switch (test_pattern_color_space) { - case DP_TEST_PATTERN_COLOR_SPACE_RGB: - color_space = COLOR_SPACE_SRGB; - if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) - color_space = COLOR_SPACE_SRGB_LIMITED; - break; - - case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: - color_space = COLOR_SPACE_YCBCR601; - if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) - color_space = COLOR_SPACE_YCBCR601_LIMITED; - break; - case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: - color_space = COLOR_SPACE_YCBCR709; - if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) - color_space = COLOR_SPACE_YCBCR709_LIMITED; - break; - default: - break; - } - - if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { - union dmub_hw_lock_flags hw_locks = { 0 }; - struct dmub_hw_lock_inst_flags inst_flags = { 0 }; - - hw_locks.bits.lock_dig = 1; - inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; - - dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, - true, - &hw_locks, - &inst_flags); - } else - pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( - pipe_ctx->stream_res.tg); - } - - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); - /* update MSA to requested color space */ - pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, - &pipe_ctx->stream->timing, - color_space, - pipe_ctx->stream->use_vsc_sdp_for_colorimetry, - link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); - - if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { - if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) - pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range - else - pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); - resource_build_info_frame(pipe_ctx); - link->dc->hwss.update_info_frame(pipe_ctx); - } - - /* CRTC Patterns */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); - pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); - pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, - CRTC_STATE_VACTIVE); - pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, - CRTC_STATE_VBLANK); - pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, - CRTC_STATE_VACTIVE); - - if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { - if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { - union dmub_hw_lock_flags hw_locks = { 0 }; - struct dmub_hw_lock_inst_flags inst_flags = { 0 }; - - hw_locks.bits.lock_dig = 1; - inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; - - dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, - false, - &hw_locks, - &inst_flags); - } else - pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( - pipe_ctx->stream_res.tg); - } - - /* Set Test Pattern state */ - link->test_pattern_enabled = true; - } - - return true; -} - -void dp_enable_mst_on_sink(struct dc_link *link, bool enable) -{ - unsigned char mstmCntl; - - core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); - if (enable) - mstmCntl |= DP_MST_EN; - else - mstmCntl &= (~DP_MST_EN); - - core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); -} - -void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) -{ - union dpcd_edp_config edp_config_set; - bool panel_mode_edp = false; - - memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); - - if (panel_mode != DP_PANEL_MODE_DEFAULT) { - - switch (panel_mode) { - case DP_PANEL_MODE_EDP: - case DP_PANEL_MODE_SPECIAL: - panel_mode_edp = true; - break; - - default: - break; - } - - /*set edp panel mode in receiver*/ - core_link_read_dpcd( - link, - DP_EDP_CONFIGURATION_SET, - &edp_config_set.raw, - sizeof(edp_config_set.raw)); - - if (edp_config_set.bits.PANEL_MODE_EDP - != panel_mode_edp) { - enum dc_status result; - - edp_config_set.bits.PANEL_MODE_EDP = - panel_mode_edp; - result = core_link_write_dpcd( - link, - DP_EDP_CONFIGURATION_SET, - &edp_config_set.raw, - sizeof(edp_config_set.raw)); - - ASSERT(result == DC_OK); - } - } - DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " - "eDP panel mode enabled: %d \n", - link->link_index, - link->dpcd_caps.panel_mode_edp, - panel_mode_edp); -} - -enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) -{ - /* We need to explicitly check that connector - * is not DP. Some Travis_VGA get reported - * by video bios as DP. - */ - if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { - - switch (link->dpcd_caps.branch_dev_id) { - case DP_BRANCH_DEVICE_ID_0022B9: - /* alternate scrambler reset is required for Travis - * for the case when external chip does not - * provide sink device id, alternate scrambler - * scheme will be overriden later by querying - * Encoder features - */ - if (strncmp( - link->dpcd_caps.branch_dev_name, - DP_VGA_LVDS_CONVERTER_ID_2, - sizeof( - link->dpcd_caps. - branch_dev_name)) == 0) { - return DP_PANEL_MODE_SPECIAL; - } - break; - case DP_BRANCH_DEVICE_ID_00001A: - /* alternate scrambler reset is required for Travis - * for the case when external chip does not provide - * sink device id, alternate scrambler scheme will - * be overriden later by querying Encoder feature - */ - if (strncmp(link->dpcd_caps.branch_dev_name, - DP_VGA_LVDS_CONVERTER_ID_3, - sizeof( - link->dpcd_caps. - branch_dev_name)) == 0) { - return DP_PANEL_MODE_SPECIAL; - } - break; - default: - break; - } - } - - if (link->dpcd_caps.panel_mode_edp && - (link->connector_signal == SIGNAL_TYPE_EDP || - (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - link->is_internal_display))) { - return DP_PANEL_MODE_EDP; - } - - return DP_PANEL_MODE_DEFAULT; -} - -enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) -{ - /* FEC has to be "set ready" before the link training. - * The policy is to always train with FEC - * if the sink supports it and leave it enabled on link. - * If FEC is not supported, disable it. - */ - struct link_encoder *link_enc = NULL; - enum dc_status status = DC_OK; - uint8_t fec_config = 0; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - if (!dc_link_should_enable_fec(link)) - return status; - - if (link_enc->funcs->fec_set_ready && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { - if (ready) { - fec_config = 1; - status = core_link_write_dpcd(link, - DP_FEC_CONFIGURATION, - &fec_config, - sizeof(fec_config)); - if (status == DC_OK) { - link_enc->funcs->fec_set_ready(link_enc, true); - link->fec_state = dc_link_fec_ready; - } else { - link_enc->funcs->fec_set_ready(link_enc, false); - link->fec_state = dc_link_fec_not_ready; - dm_error("dpcd write failed to set fec_ready"); - } - } else if (link->fec_state == dc_link_fec_ready) { - fec_config = 0; - status = core_link_write_dpcd(link, - DP_FEC_CONFIGURATION, - &fec_config, - sizeof(fec_config)); - link_enc->funcs->fec_set_ready(link_enc, false); - link->fec_state = dc_link_fec_not_ready; - } - } - - return status; -} - -void dp_set_fec_enable(struct dc_link *link, bool enable) -{ - struct link_encoder *link_enc = NULL; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - if (!dc_link_should_enable_fec(link)) - return; - - if (link_enc->funcs->fec_set_enable && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { - if (link->fec_state == dc_link_fec_ready && enable) { - /* Accord to DP spec, FEC enable sequence can first - * be transmitted anytime after 1000 LL codes have - * been transmitted on the link after link training - * completion. Using 1 lane RBR should have the maximum - * time for transmitting 1000 LL codes which is 6.173 us. - * So use 7 microseconds delay instead. - */ - udelay(7); - link_enc->funcs->fec_set_enable(link_enc, true); - link->fec_state = dc_link_fec_enabled; - } else if (link->fec_state == dc_link_fec_enabled && !enable) { - link_enc->funcs->fec_set_enable(link_enc, false); - link->fec_state = dc_link_fec_ready; - } - } -} - -void dpcd_set_source_specific_data(struct dc_link *link) -{ - if (!link->dc->vendor_signature.is_valid) { - enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED; - struct dpcd_amd_signature amd_signature = {0}; - struct dpcd_amd_device_id amd_device_id = {0}; - - amd_device_id.device_id_byte1 = - (uint8_t)(link->ctx->asic_id.chip_id); - amd_device_id.device_id_byte2 = - (uint8_t)(link->ctx->asic_id.chip_id >> 8); - amd_device_id.dce_version = - (uint8_t)(link->ctx->dce_version); - amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? - amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? - - core_link_read_dpcd(link, DP_SOURCE_OUI, - (uint8_t *)(&amd_signature), - sizeof(amd_signature)); - - if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && - (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && - (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { - - amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; - amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; - amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; - - core_link_write_dpcd(link, DP_SOURCE_OUI, - (uint8_t *)(&amd_signature), - sizeof(amd_signature)); - } - - core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, - (uint8_t *)(&amd_device_id), - sizeof(amd_device_id)); - - if (link->ctx->dce_version >= DCN_VERSION_2_0 && - link->dc->caps.min_horizontal_blanking_period != 0) { - - uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; - - if (link->preferred_link_setting.dpcd_source_device_specific_field_support) { - result_write_min_hblank = core_link_write_dpcd(link, - DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), - sizeof(hblank_size)); - - if (result_write_min_hblank == DC_ERROR_UNEXPECTED) - link->preferred_link_setting.dpcd_source_device_specific_field_support = false; - } else { - DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n"); - } - } - - DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, - WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, - "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", - result_write_min_hblank, - link->link_index, - link->ctx->dce_version, - DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, - link->dc->caps.min_horizontal_blanking_period, - link->dpcd_caps.branch_dev_id, - link->dpcd_caps.branch_dev_name[0], - link->dpcd_caps.branch_dev_name[1], - link->dpcd_caps.branch_dev_name[2], - link->dpcd_caps.branch_dev_name[3], - link->dpcd_caps.branch_dev_name[4], - link->dpcd_caps.branch_dev_name[5]); - } else { - core_link_write_dpcd(link, DP_SOURCE_OUI, - link->dc->vendor_signature.data.raw, - sizeof(link->dc->vendor_signature.data.raw)); - } -} - -void dpcd_write_cable_id_to_dprx(struct dc_link *link) -{ - if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || - link->dpcd_caps.cable_id.raw == 0 || - link->dprx_states.cable_id_written) - return; - - core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, - &link->dpcd_caps.cable_id.raw, - sizeof(link->dpcd_caps.cable_id.raw)); - - link->dprx_states.cable_id_written = 1; -} - -bool dc_link_set_backlight_level_nits(struct dc_link *link, - bool isHDR, - uint32_t backlight_millinits, - uint32_t transition_time_in_ms) -{ - struct dpcd_source_backlight_set dpcd_backlight_set; - uint8_t backlight_control = isHDR ? 1 : 0; - - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && - link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) - return false; - - // OLEDs have no PWM, they can only use AUX - if (link->dpcd_sink_ext_caps.bits.oled == 1) - backlight_control = 1; - - *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; - *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - - - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *)(&dpcd_backlight_set), - sizeof(dpcd_backlight_set)) != DC_OK) - return false; - - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, - &backlight_control, 1) != DC_OK) - return false; - - return true; -} - -bool dc_link_get_backlight_level_nits(struct dc_link *link, - uint32_t *backlight_millinits_avg, - uint32_t *backlight_millinits_peak) -{ - union dpcd_source_backlight_get dpcd_backlight_get; - - memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); - - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && - link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) - return false; - - if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, - dpcd_backlight_get.raw, - sizeof(union dpcd_source_backlight_get)) != DC_OK) - return false; - - *backlight_millinits_avg = - dpcd_backlight_get.bytes.backlight_millinits_avg; - *backlight_millinits_peak = - dpcd_backlight_get.bytes.backlight_millinits_peak; - - /* On non-supported panels dpcd_read usually succeeds with 0 returned */ - if (*backlight_millinits_avg == 0 || - *backlight_millinits_avg > *backlight_millinits_peak) - return false; - - return true; -} - -bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable) -{ - uint8_t backlight_enable = enable ? 1 : 0; - - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && - link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) - return false; - - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, - &backlight_enable, 1) != DC_OK) - return false; - - return true; -} - -// we read default from 0x320 because we expect BIOS wrote it there -// regular get_backlight_nit reads from panel set at 0x326 -bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) -{ - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && - link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) - return false; - - if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *) backlight_millinits, - sizeof(uint32_t)) != DC_OK) - return false; - - return true; -} - -bool dc_link_set_default_brightness_aux(struct dc_link *link) -{ - uint32_t default_backlight; - - if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { - if (!dc_link_read_default_bl_aux(link, &default_backlight)) - default_backlight = 150000; - // if < 5 nits or > 5000, it might be wrong readback - if (default_backlight < 5000 || default_backlight > 5000000) - default_backlight = 150000; // - - return dc_link_set_backlight_level_nits(link, true, - default_backlight, 0); - } - return false; -} - -bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) -{ - struct dc_link_settings link_setting; - uint8_t link_bw_set; - uint8_t link_rate_set; - uint32_t req_bw; - union lane_count_set lane_count_set = {0}; - - ASSERT(link || crtc_timing); // invalid input - - if (link->dpcd_caps.edp_supported_link_rates_count == 0 || - !link->panel_config.ilr.optimize_edp_link_rate) - return false; - - - // Read DPCD 00100h to find if standard link rates are set - core_link_read_dpcd(link, DP_LINK_BW_SET, - &link_bw_set, sizeof(link_bw_set)); - - if (link_bw_set) { - DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); - return true; - } - - // Read DPCD 00115h to find the edp link rate set used - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - // Read DPCD 00101h to find out the number of lanes currently set - core_link_read_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, sizeof(lane_count_set)); - - req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); - - if (!crtc_timing->flags.DSC) - decide_edp_link_settings(link, &link_setting, req_bw); - else - decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); - - if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || - lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { - DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); - return true; - } - - DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); - return false; -} - -enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings) -{ - if ((link_settings->link_rate >= LINK_RATE_LOW) && - (link_settings->link_rate <= LINK_RATE_HIGH3)) - return DP_8b_10b_ENCODING; - else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && - (link_settings->link_rate <= LINK_RATE_UHBR20)) - return DP_128b_132b_ENCODING; - return DP_UNKNOWN_ENCODING; -} - -enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) -{ - struct dc_link_settings link_settings = {0}; - - if (!dc_is_dp_signal(link->connector_signal)) - return DP_UNKNOWN_ENCODING; - - if (link->preferred_link_setting.lane_count != - LANE_COUNT_UNKNOWN && - link->preferred_link_setting.link_rate != - LINK_RATE_UNKNOWN) { - link_settings = link->preferred_link_setting; - } else { - decide_mst_link_settings(link, &link_settings); - } - - return dp_get_link_encoding_format(&link_settings); -} - -// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) -static void get_lane_status( - struct dc_link *link, - uint32_t lane_count, - union lane_status *status, - union lane_align_status_updated *status_updated) -{ - unsigned int lane; - uint8_t dpcd_buf[3] = {0}; - - if (status == NULL || status_updated == NULL) { - return; - } - - core_link_read_dpcd( - link, - DP_LANE0_1_STATUS, - dpcd_buf, - sizeof(dpcd_buf)); - - for (lane = 0; lane < lane_count; lane++) { - status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); - } - - status_updated->raw = dpcd_buf[2]; -} - -bool dpcd_write_128b_132b_sst_payload_allocation_table( - const struct dc_stream_state *stream, - struct dc_link *link, - struct link_mst_stream_allocation_table *proposed_table, - bool allocate) -{ - const uint8_t vc_id = 1; /// VC ID always 1 for SST - const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST - bool result = false; - uint8_t req_slot_count = 0; - struct fixed31_32 avg_time_slots_per_mtp = { 0 }; - union payload_table_update_status update_status = { 0 }; - const uint32_t max_retries = 30; - uint32_t retries = 0; - - if (allocate) { - avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); - req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); - /// Validation should filter out modes that exceed link BW - ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); - if (req_slot_count > MAX_MTP_SLOT_COUNT) - return false; - } else { - /// Leave req_slot_count = 0 if allocate is false. - } - - proposed_table->stream_count = 1; /// Always 1 stream for SST - proposed_table->stream_allocations[0].slot_count = req_slot_count; - proposed_table->stream_allocations[0].vcp_id = vc_id; - - if (link->aux_access_disabled) - return true; - - /// Write DPCD 2C0 = 1 to start updating - update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; - core_link_write_dpcd( - link, - DP_PAYLOAD_TABLE_UPDATE_STATUS, - &update_status.raw, - 1); - - /// Program the changes in DPCD 1C0 - 1C2 - ASSERT(vc_id == 1); - core_link_write_dpcd( - link, - DP_PAYLOAD_ALLOCATE_SET, - &vc_id, - 1); - - ASSERT(start_time_slot == 0); - core_link_write_dpcd( - link, - DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, - &start_time_slot, - 1); - - core_link_write_dpcd( - link, - DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, - &req_slot_count, - 1); - - /// Poll till DPCD 2C0 read 1 - /// Try for at least 150ms (30 retries, with 5ms delay after each attempt) - - while (retries < max_retries) { - if (core_link_read_dpcd( - link, - DP_PAYLOAD_TABLE_UPDATE_STATUS, - &update_status.raw, - 1) == DC_OK) { - if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { - DC_LOG_DP2("SST Update Payload: downstream payload table updated."); - result = true; - break; - } - } else { - union dpcd_rev dpcdRev; - - if (core_link_read_dpcd( - link, - DP_DPCD_REV, - &dpcdRev.raw, - 1) != DC_OK) { - DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " - "of sink while polling payload table " - "updated status bit."); - break; - } - } - retries++; - msleep(5); - } - - if (!result && retries == max_retries) { - DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " - "continue on. Something is wrong with the branch."); - // TODO - DP2.0 Payload: Read and log the payload table from downstream branch - } - - return result; -} - -bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link) -{ - /* - * wait for ACT handled - */ - int i; - const int act_retries = 30; - enum act_return_status result = ACT_FAILED; - union payload_table_update_status update_status = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated lane_status_updated; - - if (link->aux_access_disabled) - return true; - for (i = 0; i < act_retries; i++) { - get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); - - if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || - !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || - !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || - !dp_is_interlane_aligned(lane_status_updated)) { - DC_LOG_ERROR("SST Update Payload: Link loss occurred while " - "polling for ACT handled."); - result = ACT_LINK_LOST; - break; - } - core_link_read_dpcd( - link, - DP_PAYLOAD_TABLE_UPDATE_STATUS, - &update_status.raw, - 1); - - if (update_status.bits.ACT_HANDLED == 1) { - DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); - result = ACT_SUCCESS; - break; - } - - msleep(5); - } - - if (result == ACT_FAILED) { - DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " - "continue on. Something is wrong with the branch."); - } - - return (result == ACT_SUCCESS); -} - -struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( - const struct dc_stream_state *stream, - const struct dc_link *link) -{ - struct fixed31_32 link_bw_effective = - dc_fixpt_from_int( - dc_link_bandwidth_kbps(link, &link->cur_link_settings)); - struct fixed31_32 timeslot_bw_effective = - dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); - struct fixed31_32 timing_bw = - dc_fixpt_from_int( - dc_bandwidth_in_kbps_from_timing(&stream->timing)); - struct fixed31_32 avg_time_slots_per_mtp = - dc_fixpt_div(timing_bw, timeslot_bw_effective); - - return avg_time_slots_per_mtp; -} - -bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) -{ - /* If this assert is hit then we have a link encoder dynamic management issue */ - ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); - return (pipe_ctx->stream_res.hpo_dp_stream_enc && - pipe_ctx->link_res.hpo_dp_link_enc && - dc_is_dp_signal(pipe_ctx->stream->signal)); -} - -void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) -{ - if (link->connector_signal != SIGNAL_TYPE_EDP) - return; - - link->dc->hwss.edp_power_control(link, true); - if (wait_for_hpd) - link->dc->hwss.edp_wait_for_hpd_ready(link, true); - if (link->dc->hwss.edp_backlight_control) - link->dc->hwss.edp_backlight_control(link, true); -} - -void dc_link_clear_dprx_states(struct dc_link *link) -{ - memset(&link->dprx_states, 0, sizeof(link->dprx_states)); -} - -void dp_receiver_power_ctrl(struct dc_link *link, bool on) -{ - uint8_t state; - - state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; - - if (link->sync_lt_in_progress) - return; - - core_link_write_dpcd(link, DP_SET_POWER, &state, - sizeof(state)); - -} - -void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) -{ - if (link != NULL && link->dc->debug.enable_driver_sequence_debug) - core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, - &dp_test_mode, sizeof(dp_test_mode)); -} - - -static uint8_t convert_to_count(uint8_t lttpr_repeater_count) -{ - switch (lttpr_repeater_count) { - case 0x80: // 1 lttpr repeater - return 1; - case 0x40: // 2 lttpr repeaters - return 2; - case 0x20: // 3 lttpr repeaters - return 3; - case 0x10: // 4 lttpr repeaters - return 4; - case 0x08: // 5 lttpr repeaters - return 5; - case 0x04: // 6 lttpr repeaters - return 6; - case 0x02: // 7 lttpr repeaters - return 7; - case 0x01: // 8 lttpr repeaters - return 8; - default: - break; - } - return 0; // invalid value -} - -static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) -{ - return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); -} - -void dp_enable_link_phy( - struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings) -{ - link->cur_link_settings = *link_settings; - link->dc->hwss.enable_dp_link_output(link, link_res, signal, - clock_source, link_settings); - dp_receiver_power_ctrl(link, true); -} - -void edp_add_delay_for_T9(struct dc_link *link) -{ - if (link && link->panel_config.pps.extra_delay_backlight_off > 0) - udelay(link->panel_config.pps.extra_delay_backlight_off * 1000); -} - -bool edp_receiver_ready_T9(struct dc_link *link) -{ - unsigned int tries = 0; - unsigned char sinkstatus = 0; - unsigned char edpRev = 0; - enum dc_status result = DC_OK; - - result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - - /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ - if (result == DC_OK && edpRev >= DP_EDP_12) { - do { - sinkstatus = 1; - result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); - if (sinkstatus == 0) - break; - if (result != DC_OK) - break; - udelay(100); //MAx T9 - } while (++tries < 50); - } - - return result; -} -bool edp_receiver_ready_T7(struct dc_link *link) -{ - unsigned char sinkstatus = 0; - unsigned char edpRev = 0; - enum dc_status result = DC_OK; - - /* use absolute time stamp to constrain max T7*/ - unsigned long long enter_timestamp = 0; - unsigned long long finish_timestamp = 0; - unsigned long long time_taken_in_ns = 0; - - result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - - if (result == DC_OK && edpRev >= DP_EDP_12) { - /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ - enter_timestamp = dm_get_timestamp(link->ctx); - do { - sinkstatus = 0; - result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); - if (sinkstatus == 1) - break; - if (result != DC_OK) - break; - udelay(25); - finish_timestamp = dm_get_timestamp(link->ctx); - time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); - } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms - } - - if (link && link->panel_config.pps.extra_t7_ms > 0) - udelay(link->panel_config.pps.extra_t7_ms * 1000); - - return result; -} - -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - struct dc *dc = link->ctx->dc; - - if (!link->wa_flags.dp_keep_receiver_powered) - dp_receiver_power_ctrl(link, false); - - dc->hwss.disable_link_output(link, link_res, signal); - /* Clear current link setting.*/ - memset(&link->cur_link_settings, 0, - sizeof(link->cur_link_settings)); - - if (dc->clk_mgr->funcs->notify_link_rate_change) - dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); -} - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count > 0) - return; - - dp_disable_link_phy(link, link_res, signal); - - /* set the sink to SST mode after disabling the link */ - dp_enable_mst_on_sink(link, false); -} - -bool dp_set_hw_training_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dc_dp_training_pattern pattern, - uint32_t offset) -{ - enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_2: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; - break; - case DP_TRAINING_PATTERN_SEQUENCE_3: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; - break; - case DP_128b_132b_TPS1: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; - break; - case DP_128b_132b_TPS2: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; - break; - default: - break; - } - - dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); - - return true; -} - -void dp_set_hw_lane_settings( - struct dc_link *link, - const struct link_resource *link_res, - const struct link_training_settings *link_settings, - uint32_t offset) -{ - const struct link_hwss *link_hwss = get_link_hwss(link, link_res); - - if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset)) - return; - - if (link_hwss->ext.set_dp_lane_settings) - link_hwss->ext.set_dp_lane_settings(link, link_res, - &link_settings->link_settings, - link_settings->hw_lane_settings); - - memmove(link->cur_lane_setting, - link_settings->hw_lane_settings, - sizeof(link->cur_lane_setting)); -} - -void dp_set_hw_test_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dp_test_pattern test_pattern, - uint8_t *custom_pattern, - uint32_t custom_pattern_size) -{ - const struct link_hwss *link_hwss = get_link_hwss(link, link_res); - struct encoder_set_dp_phy_pattern_param pattern_param = {0}; - - pattern_param.dp_phy_pattern = test_pattern; - pattern_param.custom_pattern = custom_pattern; - pattern_param.custom_pattern_size = custom_pattern_size; - pattern_param.dp_panel_mode = dp_get_panel_mode(link); - - if (link_hwss->ext.set_dp_link_test_pattern) - link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); -} - -void dp_retrain_link_dp_test(struct dc_link *link, - struct dc_link_settings *link_setting, - bool skip_video_pattern) -{ - struct pipe_ctx *pipes = - &link->dc->current_state->res_ctx.pipe_ctx[0]; - unsigned int i; - bool do_fallback = false; - - - for (i = 0; i < MAX_PIPES; i++) { - if (pipes[i].stream != NULL && - !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && - pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL && - pipes[i].stream->link == link) { - udelay(100); - - pipes[i].stream_res.stream_enc->funcs->dp_blank(link, - pipes[i].stream_res.stream_enc); - - /* disable any test pattern that might be active */ - dp_set_hw_test_pattern(link, &pipes[i].link_res, - DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - - dp_receiver_power_ctrl(link, false); - - link->dc->hwss.disable_stream(&pipes[i]); - if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) - (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); - - if (link->link_enc) - link->link_enc->funcs->disable_output( - link->link_enc, - SIGNAL_TYPE_DISPLAY_PORT); - - /* Clear current link setting. */ - memset(&link->cur_link_settings, 0, - sizeof(link->cur_link_settings)); - - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - do_fallback = true; - - perform_link_training_with_retries( - link_setting, - skip_video_pattern, - LINK_TRAINING_ATTEMPTS, - &pipes[i], - SIGNAL_TYPE_DISPLAY_PORT, - do_fallback); - - link->dc->hwss.enable_stream(&pipes[i]); - - link->dc->hwss.unblank_stream(&pipes[i], - link_setting); - - link->dc->hwss.enable_audio_stream(&pipes[i]); - } - } -} - -#undef DC_LOGGER -#define DC_LOGGER \ - dsc->ctx->logger -static void dsc_optc_config_log(struct display_stream_compressor *dsc, - struct dsc_optc_config *config) -{ - uint32_t precision = 1 << 28; - uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; - uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; - uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; - - /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC - * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is - * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal - */ - ll_bytes_per_pix_fraq *= 10000000; - ll_bytes_per_pix_fraq /= precision; - - DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", - config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); - DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); - DC_LOG_DSC("\tslice_width %d", config->slice_width); -} - -bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - bool result = false; - - if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - result = true; - else - result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); - return result; -} - -/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, - * i.e. after dp_enable_dsc_on_rx() had been called - */ -void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - if (enable) { - struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; - enum optc_dsc_mode optc_dsc_mode; - - /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; - dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; - dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; - dsc_cfg.color_depth = stream->timing.display_color_depth; - dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; - dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; - ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); - dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; - - dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); - dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; - - odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); - odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); - } - dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; - dsc_cfg.pic_width *= opp_cnt; - - optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; - - /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) - && !is_dp_128b_132b_signal(pipe_ctx)) { - DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); - dsc_optc_config_log(dsc, &dsc_optc_cfg); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, - optc_dsc_mode, - dsc_optc_cfg.bytes_per_pixel, - dsc_optc_cfg.slice_width); - - /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ - } - - /* Enable DSC in OPTC */ - DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); - dsc_optc_config_log(dsc, &dsc_optc_cfg); - pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, - optc_dsc_mode, - dsc_optc_cfg.bytes_per_pixel, - dsc_optc_cfg.slice_width); - } else { - /* disable DSC in OPTC */ - pipe_ctx->stream_res.tg->funcs->set_dsc_config( - pipe_ctx->stream_res.tg, - OPTC_DSC_DISABLED, 0, 0); - - /* disable DSC in stream encoder */ - if (dc_is_dp_signal(stream->signal)) { - if (is_dp_128b_132b_signal(pipe_ctx)) - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.hpo_dp_stream_enc, - false, - NULL, - true); - else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( - pipe_ctx->stream_res.stream_enc, - OPTC_DSC_DISABLED, 0, 0); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL, true); - } - } - - /* disable DSC block */ - pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); - } -} - -bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - bool result = false; - - if (!pipe_ctx->stream->timing.flags.DSC) - goto out; - if (!dsc) - goto out; - - if (enable) { - { - dp_set_dsc_on_stream(pipe_ctx, true); - result = true; - } - } else { - dp_set_dsc_on_rx(pipe_ctx, false); - dp_set_dsc_on_stream(pipe_ctx, false); - result = true; - } -out: - return result; -} - -/* - * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; - * hence PPS info packet update need to use frame update instead of immediate update. - * Added parameter immediate_update for this purpose. - * The decision to use frame update is hard-coded in function dp_update_dsc_config(), - * which is the only place where a "false" would be passed in for param immediate_update. - * - * immediate_update is only applicable when DSC is enabled. - */ -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc_stream_state *stream = pipe_ctx->stream; - - if (!pipe_ctx->stream->timing.flags.DSC || !dsc) - return false; - - if (enable) { - struct dsc_config dsc_cfg; - uint8_t dsc_packed_pps[128]; - - memset(&dsc_cfg, 0, sizeof(dsc_cfg)); - memset(dsc_packed_pps, 0, 128); - - /* Enable DSC hw block */ - dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; - dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; - dsc_cfg.color_depth = stream->timing.display_color_depth; - dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; - dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; - - DC_LOG_DSC(" "); - dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); - memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); - if (dc_is_dp_signal(stream->signal)) { - DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); - if (is_dp_128b_132b_signal(pipe_ctx)) - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.hpo_dp_stream_enc, - true, - &dsc_packed_pps[0], - immediate_update); - else - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, - true, - &dsc_packed_pps[0], - immediate_update); - } - } else { - /* disable DSC PPS in stream encoder */ - memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); - if (dc_is_dp_signal(stream->signal)) { - if (is_dp_128b_132b_signal(pipe_ctx)) - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.hpo_dp_stream_enc, - false, - NULL, - true); - else - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL, true); - } - } - - return true; -} - - -bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - - if (!pipe_ctx->stream->timing.flags.DSC) - return false; - if (!dsc) - return false; - - dp_set_dsc_on_stream(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true, false); - return true; -} - -#undef DC_LOGGER -#define DC_LOGGER \ - link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 614f022d1cffa2..74e465ba158d2c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -24,7 +24,7 @@ #include "link_enc_cfg.h" #include "resource.h" -#include "dc_link_dp.h" +#include "link.h" #define DC_LOGGER dc->ctx->logger @@ -48,7 +48,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream) /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ struct dc_link_settings link_settings = {0}; - decide_link_settings(stream, &link_settings); + link_decide_link_settings(stream, &link_settings); if ((link_settings.link_rate >= LINK_RATE_LOW) && link_settings.link_rate <= LINK_RATE_HIGH3) { is_dig_stream = true; @@ -305,15 +305,17 @@ void link_enc_cfg_link_encs_assign( for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; + /* skip it if the link is mappable endpoint. */ + if (stream->link->is_dig_mapping_flexible) + continue; + /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) continue; /* Physical endpoints have a fixed mapping to DIG link encoders. */ - if (!stream->link->is_dig_mapping_flexible) { - eng_id = stream->link->eng_id; - add_link_enc_assignment(state, stream, eng_id); - } + eng_id = stream->link->eng_id; + add_link_enc_assignment(state, stream, eng_id); } /* (b) Retain previous assignments for mappable endpoints if encoders still available. */ @@ -325,11 +327,12 @@ void link_enc_cfg_link_encs_assign( for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = state->streams[i]; - /* Skip stream if not supported by DIG link encoder. */ - if (!is_dig_link_enc_stream(stream)) + /* Skip it if the link is NOT mappable endpoint. */ + if (!stream->link->is_dig_mapping_flexible) continue; - if (!stream->link->is_dig_mapping_flexible) + /* Skip stream if not supported by DIG link encoder. */ + if (!is_dig_link_enc_stream(stream)) continue; for (j = 0; j < prev_state->stream_count; j++) { @@ -338,6 +341,7 @@ void link_enc_cfg_link_encs_assign( if (stream == prev_stream && stream->link == prev_stream->link && prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) { eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id; + if (is_avail_link_enc(state, eng_id, stream)) add_link_enc_assignment(state, stream, eng_id); } @@ -350,6 +354,15 @@ void link_enc_cfg_link_encs_assign( for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; + struct link_encoder *link_enc = NULL; + + /* Skip it if the link is NOT mappable endpoint. */ + if (!stream->link->is_dig_mapping_flexible) + continue; + + /* Skip if encoder assignment retained in step (b) above. */ + if (stream->link_enc) + continue; /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) { @@ -358,24 +371,18 @@ void link_enc_cfg_link_encs_assign( } /* Mappable endpoints have a flexible mapping to DIG link encoders. */ - if (stream->link->is_dig_mapping_flexible) { - struct link_encoder *link_enc = NULL; - /* Skip if encoder assignment retained in step (b) above. */ - if (stream->link_enc) - continue; + /* For MST, multiple streams will share the same link / display + * endpoint. These streams should use the same link encoder + * assigned to that endpoint. + */ + link_enc = get_link_enc_used_by_link(state, stream->link); + if (link_enc == NULL) + eng_id = find_first_avail_link_enc(stream->ctx, state); + else + eng_id = link_enc->preferred_engine; - /* For MST, multiple streams will share the same link / display - * endpoint. These streams should use the same link encoder - * assigned to that endpoint. - */ - link_enc = get_link_enc_used_by_link(state, stream->link); - if (link_enc == NULL) - eng_id = find_first_avail_link_enc(stream->ctx, state); - else - eng_id = link_enc->preferred_engine; - add_link_enc_assignment(state, stream, eng_id); - } + add_link_enc_assignment(state, stream, eng_id); } link_enc_cfg_validate(dc, state); @@ -420,10 +427,6 @@ void link_enc_cfg_link_enc_unassign( { enum engine_id eng_id = ENGINE_ID_UNKNOWN; - /* Only DIG link encoders. */ - if (!is_dig_link_enc_stream(stream)) - return; - if (stream->link_enc) eng_id = stream->link_enc->preferred_engine; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c new file mode 100644 index 00000000000000..a951e10416ee63 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -0,0 +1,103 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file provides single entrance to link functionality declared in dc + * public headers. The file is intended to be used as a thin translation layer + * that directly calls link internal functions without adding new functional + * behavior. + * + * When exporting a new link related dc function, add function declaration in + * dc.h with detail interface documentation, then add function implementation + * in this file which calls link functions. + */ +#include "link.h" + +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + return link_detect(link, reason); +} + +bool dc_link_detect_connection_type(struct dc_link *link, + enum dc_connection_type *type) +{ + return link_detect_connection_type(link, type); +} + +const struct dc_link_status *dc_link_get_status(const struct dc_link *link) +{ + return link_get_status(link); +} +#ifdef CONFIG_DRM_AMD_DC_HDCP + +/* return true if the connected receiver supports the hdcp version */ +bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal) +{ + return link_is_hdcp14(link, signal); +} + +bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal) +{ + return link_is_hdcp22(link, signal); +} +#endif + +void dc_link_clear_dprx_states(struct dc_link *link) +{ + link_clear_dprx_states(link); +} + +bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link) +{ + return link_reset_cur_dp_mst_topology(link); +} + +uint32_t dc_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + return dp_link_bandwidth_kbps(link, link_settings); +} + +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing) +{ + return link_timing_bandwidth_kbps(timing); +} + +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) +{ + link_get_cur_res_map(dc, map); +} + +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) +{ + link_restore_res_map(dc, map); +} + +bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx) +{ + return link_update_dsc_config(pipe_ctx); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index da164685547d98..d9f2ef242b0fb4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -40,11 +40,11 @@ #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" #include "link_enc_cfg.h" -#include "dc_link_dp.h" +#include "link.h" #include "virtual/virtual_link_hwss.h" -#include "link/link_hwss_dio.h" -#include "link/link_hwss_dpia.h" -#include "link/link_hwss_hpo_dp.h" +#include "link/hwss/link_hwss_dio.h" +#include "link/hwss/link_hwss_dpia.h" +#include "link/hwss/link_hwss_hpo_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" @@ -2213,7 +2213,7 @@ enum dc_status dc_remove_stream_from_ctx( del_pipe->stream_res.stream_enc, false); - if (is_dp_128b_132b_signal(del_pipe)) { + if (link_is_dp_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( &new_ctx->res_ctx, dc->res_pool, del_pipe->stream_res.hpo_dp_stream_enc, @@ -2513,9 +2513,9 @@ enum dc_status resource_map_pool_resources( * and link settings */ if (dc_is_dp_signal(stream->signal)) { - if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) + if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) return DC_FAIL_DP_LINK_BANDWIDTH; - if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { + if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { pipe_ctx->stream_res.hpo_dp_stream_enc = find_first_free_match_hpo_dp_stream_enc_for_link( &context->res_ctx, pool, stream); @@ -3269,6 +3269,50 @@ static void set_hfvs_info_packet( *info_packet = stream->hfvsif_infopacket; } +static void adaptive_sync_override_dp_info_packets_sdp_line_num( + const struct dc_crtc_timing *timing, + struct enc_sdp_line_num *sdp_line_num, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param) +{ + uint32_t asic_blank_start = 0; + uint32_t asic_blank_end = 0; + uint32_t v_update = 0; + + const struct dc_crtc_timing *tg = timing; + + /* blank_start = frame end - front porch */ + asic_blank_start = tg->v_total - tg->v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = (asic_blank_start - tg->v_border_bottom - + tg->v_addressable - tg->v_border_top); + + if (pipe_dlg_param->vstartup_start > asic_blank_end) { + v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end)); + sdp_line_num->adaptive_sync_line_num_valid = true; + sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1); + } else { + sdp_line_num->adaptive_sync_line_num_valid = false; + sdp_line_num->adaptive_sync_line_num = 0; + } +} + +static void set_adaptive_sync_info_packet( + struct dc_info_packet *info_packet, + const struct dc_stream_state *stream, + struct encoder_info_frame *info_frame, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param) +{ + if (!stream->adaptive_sync_infopacket.valid) + return; + + adaptive_sync_override_dp_info_packets_sdp_line_num( + &stream->timing, + &info_frame->sdp_line_num, + pipe_dlg_param); + + *info_packet = stream->adaptive_sync_infopacket; +} static void set_vtem_info_packet( struct dc_info_packet *info_packet, @@ -3361,6 +3405,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx) info->vsc.valid = false; info->hfvsif.valid = false; info->vtem.valid = false; + info->adaptive_sync.valid = false; signal = pipe_ctx->stream->signal; /* HDMi and DP have different info packets*/ @@ -3381,6 +3426,10 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx) set_spd_info_packet(&info->spd, pipe_ctx->stream); set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream); + set_adaptive_sync_info_packet(&info->adaptive_sync, + pipe_ctx->stream, + info, + &pipe_ctx->pipe_dlg_param); } patch_gamut_packet_checksum(&info->gamut); @@ -3636,7 +3685,7 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) /* TODO: validate audio ASIC caps, encoder */ if (res == DC_OK) - res = dc_link_validate_mode_timing(stream, + res = link_validate_mode_timing(stream, link, &stream->timing); @@ -3763,7 +3812,7 @@ bool get_temp_dp_link_res(struct dc_link *link, memset(link_res, 0, sizeof(*link_res)); - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, dc->res_pool, link); if (!link_res->hpo_dp_link_enc) @@ -3820,9 +3869,20 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, pipe_ctx_check = &context->res_ctx.pipe_ctx[i]; if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && - IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) + IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) { + struct pipe_ctx *first_pipe = pipe_ctx_check; + + while (first_pipe->prev_odm_pipe) + first_pipe = first_pipe->prev_odm_pipe; + /* When ODM combine is enabled, this case is expected. If the disabled pipe + * is part of the ODM tree, then we should not print an error. + * */ + if (first_pipe->pipe_idx == disabled_master_pipe_idx) + continue; + DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", - i, disabled_master_pipe_idx); + i, disabled_master_pipe_idx); + } } } @@ -3981,3 +4041,42 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm( return true; } + +enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { + if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) { + pipe_ctx->stream_res.hpo_dp_stream_enc = + find_first_free_match_hpo_dp_stream_enc_for_link( + &context->res_ctx, dc->res_pool, pipe_ctx->stream); + + if (!pipe_ctx->stream_res.hpo_dp_stream_enc) + return DC_NO_STREAM_ENC_RESOURCE; + + update_hpo_dp_stream_engine_usage( + &context->res_ctx, dc->res_pool, + pipe_ctx->stream_res.hpo_dp_stream_enc, + true); + } + + if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) { + if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream)) + return DC_NO_LINK_ENC_RESOURCE; + } + } else { + if (pipe_ctx->stream_res.hpo_dp_stream_enc) { + update_hpo_dp_stream_engine_usage( + &context->res_ctx, dc->res_pool, + pipe_ctx->stream_res.hpo_dp_stream_enc, + false); + pipe_ctx->stream_res.hpo_dp_stream_enc = NULL; + } + if (pipe_ctx->link_res.hpo_dp_link_enc) + remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream); + } + + return DC_OK; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 4b372aa528012d..6c06587dd88c28 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -65,6 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification /* For HPD/HPD RX, convert dpia port index into link index */ if (notify->type == DMUB_NOTIFICATION_HPD || notify->type == DMUB_NOTIFICATION_HPD_IRQ || + notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = get_link_index_from_dpia_port_index(dc, notify->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 20e534f7351372..72b261ad95870d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -408,7 +408,7 @@ bool dc_stream_set_cursor_position( struct dc_stream_state *stream, const struct dc_cursor_position *position) { - struct dc *dc = stream->ctx->dc; + struct dc *dc; bool reset_idle_optimizations = false; if (NULL == stream) { @@ -481,6 +481,7 @@ bool dc_stream_add_writeback(struct dc *dc, } if (!isDrc) { + ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES); stream->writeback_info[stream->num_wb_info++] = *wb_info; } @@ -526,6 +527,11 @@ bool dc_stream_remove_writeback(struct dc *dc, return false; } + if (stream->num_wb_info > MAX_DWB_PIPES) { + dm_error("DC: num_wb_info is invalid!\n"); + return false; + } + // stream->writeback_info[dwb_pipe_inst].wb_enabled = false; for (i = 0; i < stream->num_wb_info; i++) { /*dynamic update*/ @@ -540,7 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc, if (stream->writeback_info[i].wb_enabled) { if (j < i) /* trim the array */ - stream->writeback_info[j] = stream->writeback_info[i]; + memcpy(&stream->writeback_info[j], &stream->writeback_info[i], + sizeof(struct dc_writeback_info)); j++; } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 85ebeaa2de1861..1fde4337868942 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,12 +47,11 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.215" +#define DC_VER "3.2.223" #define MAX_SURFACES 3 #define MAX_PLANES 6 #define MAX_STREAMS 6 -#define MAX_SINKS_PER_LINK 4 #define MIN_VIEWPORT_SIZE 12 #define MAX_NUM_EDP 2 @@ -410,7 +409,7 @@ struct dc_config { bool force_bios_enable_lttpr; uint8_t force_bios_fixed_vs; int sdpif_request_limit_words_per_umc; - + bool disable_subvp_drr; }; enum visual_confirm { @@ -872,6 +871,9 @@ struct dc_debug_options { enum lttpr_mode lttpr_mode_override; unsigned int dsc_delay_factor_wa_x1000; unsigned int min_prefetch_in_strobe_ns; + bool disable_unbounded_requesting; + bool dig_fifo_off_in_blank; + bool temp_mst_deallocation_sequence; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1369,108 +1371,128 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx); void dc_retain_state(struct dc_state *context); void dc_release_state(struct dc_state *context); +struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, + struct dc_stream_state *stream, + int mpcc_inst); + + +uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); + /* Link Interfaces */ +/* TODO: remove this after resolving external dependencies */ +#include "dc_link.h" -struct dpcd_caps { - union dpcd_rev dpcd_rev; - union max_lane_count max_ln_count; - union max_down_spread max_down_spread; - union dprx_feature dprx_feature; - - /* valid only for eDP v1.4 or higher*/ - uint8_t edp_supported_link_rates_count; - enum dc_link_rate edp_supported_link_rates[8]; - - /* dongle type (DP converter, CV smart dongle) */ - enum display_dongle_type dongle_type; - bool is_dongle_type_one; - /* branch device or sink device */ - bool is_branch_dev; - /* Dongle's downstream count. */ - union sink_count sink_count; - bool is_mst_capable; - /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, - indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ - struct dc_dongle_caps dongle_caps; - - uint32_t sink_dev_id; - int8_t sink_dev_id_str[6]; - int8_t sink_hw_revision; - int8_t sink_fw_revision[2]; - - uint32_t branch_dev_id; - int8_t branch_dev_name[6]; - int8_t branch_hw_revision; - int8_t branch_fw_revision[2]; - - bool allow_invalid_MSA_timing_param; - bool panel_mode_edp; - bool dpcd_display_control_capable; - bool ext_receiver_cap_field_present; - bool set_power_state_capable_edp; - bool dynamic_backlight_capable_edp; - union dpcd_fec_capability fec_cap; - struct dpcd_dsc_capabilities dsc_caps; - struct dc_lttpr_caps lttpr_caps; - struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; - - union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; - union dp_main_line_channel_coding_cap channel_coding_cap; - union dp_sink_video_fallback_formats fallback_formats; - union dp_fec_capability1 fec_cap1; - union dp_cable_id cable_id; - uint8_t edp_rev; - union edp_alpm_caps alpm_caps; - struct edp_psr_info psr_info; -}; - -union dpcd_sink_ext_caps { - struct { - /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode - * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode. - */ - uint8_t sdr_aux_backlight_control : 1; - uint8_t hdr_aux_backlight_control : 1; - uint8_t reserved_1 : 2; - uint8_t oled : 1; - uint8_t reserved : 3; - } bits; - uint8_t raw; -}; +/* The function initiates detection handshake over the given link. It first + * determines if there are display connections over the link. If so it initiates + * detection protocols supported by the connected receiver device. The function + * contains protocol specific handshake sequences which are sometimes mandatory + * to establish a proper connection between TX and RX. So it is always + * recommended to call this function as the first link operation upon HPD event + * or power up event. Upon completion, the function will update link structure + * in place based on latest RX capabilities. The function may also cause dpms + * to be reset to off for all currently enabled streams to the link. It is DM's + * responsibility to serialize detection and DPMS updates. + * + * @reason - Indicate which event triggers this detection. dc may customize + * detection flow depending on the triggering events. + * return false - if detection is not fully completed. This could happen when + * there is an unrecoverable error during detection or detection is partially + * completed (detection has been delegated to dm mst manager ie. + * link->connection_type == dc_connection_mst_branch when returning false). + * return true - detection is completed, link has been fully updated with latest + * detection result. + */ +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason); -#if defined(CONFIG_DRM_AMD_DC_HDCP) -union hdcp_rx_caps { - struct { - uint8_t version; - uint8_t reserved; - struct { - uint8_t repeater : 1; - uint8_t hdcp_capable : 1; - uint8_t reserved : 6; - } byte0; - } fields; - uint8_t raw[3]; -}; +/* determine if there is a sink connected to the link + * + * @type - dc_connection_single if connected, dc_connection_none otherwise. + * return - false if an unexpected error occurs, true otherwise. + * + * NOTE: This function doesn't detect downstream sink connections i.e + * dc_connection_mst_branch, dc_connection_sst_branch. In this case, it will + * return dc_connection_single if the branch device is connected despite of + * downstream sink's connection status. + */ +bool dc_link_detect_connection_type(struct dc_link *link, + enum dc_connection_type *type); -union hdcp_bcaps { - struct { - uint8_t HDCP_CAPABLE:1; - uint8_t REPEATER:1; - uint8_t RESERVED:6; - } bits; - uint8_t raw; -}; +/* Getter for cached link status from given link */ +const struct dc_link_status *dc_link_get_status(const struct dc_link *link); -struct hdcp_caps { - union hdcp_rx_caps rx_caps; - union hdcp_bcaps bcaps; -}; +#ifdef CONFIG_DRM_AMD_DC_HDCP +/* return true if the connected receiver supports the hdcp version */ +bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal); +bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal); #endif -#include "dc_link.h" +/* The function clears recorded DP RX states in the link. DM should call this + * function when it is resuming from S3 power state to previously connected links. + * + * TODO - in the future we should consider to expand link resume interface to + * support clearing previous rx states. So we don't have to rely on dm to call + * this interface explicitly. + */ +void dc_link_clear_dprx_states(struct dc_link *link); -uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); +/* Destruct the mst topology of the link and reset the allocated payload table + * + * NOTE: this should only be called if DM chooses not to call dc_link_detect but + * still wants to reset MST topology on an unplug event */ +bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link); + +/* The function calculates effective DP link bandwidth when a given link is + * using the given link settings. + * + * return - total effective link bandwidth in kbps. + */ +uint32_t dc_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_setting); + +/* The function returns minimum bandwidth required to drive a given timing + * return - minimum required timing bandwidth in kbps. + */ +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing); +/* The function takes a snapshot of current link resource allocation state + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to capture a snapshot of current link resource allocation mapping + * and store it in its persistent storage. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + */ +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); + +/* This function restores link resource allocation state from a snapshot + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to call this function after initial link detection on boot and + * before first commit streams to restore link resource allocation state + * from previous boot session. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + */ +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); + +/* TODO: this is not meant to be exposed to DM. Should switch to stream update + * interface i.e stream_update->dsc_config + */ +bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx); /* Sink Interfaces - A sink corresponds to a display output device */ struct dc_container_id { @@ -1502,6 +1524,11 @@ struct dc_sink_fec_caps { bool is_topology_fec_supported; }; +struct scdc_caps { + union hdmi_scdc_manufacturer_OUI_data manufacturer_OUI; + union hdmi_scdc_device_id_data device_id; +}; + /* * The sink structure contains EDID and other display device properties */ @@ -1515,6 +1542,7 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; + struct scdc_caps scdc_caps; struct dc_sink_dsc_caps dsc_caps; struct dc_sink_fec_caps fec_caps; diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 260ac4458870bf..be9aa1a71847d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -140,7 +140,8 @@ struct dc_vbios_funcs { enum bp_result (*enable_lvtma_control)( struct dc_bios *bios, uint8_t uc_pwr_on, - uint8_t panel_instance); + uint8_t panel_instance, + uint8_t bypass_panel_control_wait); enum bp_result (*get_soc_bb_info)( struct dc_bios *dcb, diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 7769bd099a5a03..428e3a9ab65afb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -77,6 +77,32 @@ struct aux_reply_transaction_data { uint8_t *data; }; +struct aux_payload { + /* set following flag to read/write I2C data, + * reset it to read/write DPCD data */ + bool i2c_over_aux; + /* set following flag to write data, + * reset it to read data */ + bool write; + bool mot; + bool write_status_update; + + uint32_t address; + uint32_t length; + uint8_t *data; + /* + * used to return the reply type of the transaction + * ignored if NULL + */ + uint8_t *reply; + /* expressed in milliseconds + * zero means "use default value" + */ + uint32_t defer_delay; + +}; +#define DEFAULT_AUX_MAX_DATA_SIZE 16 + struct i2c_payload { bool write; uint8_t address; @@ -90,6 +116,8 @@ enum i2c_command_engine { I2C_COMMAND_ENGINE_HW }; +#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW + struct i2c_command { struct i2c_payload *payloads; uint8_t number_of_payloads; @@ -150,6 +178,9 @@ enum display_dongle_type { DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE, }; +#define DC_MAX_EDID_BUFFER_SIZE 2048 +#define DC_EDID_BLOCK_SIZE 128 + struct ddc_service { struct ddc *ddc_pin; struct ddc_flags flags; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 6ccf477d1c4dc3..c2092775ca88fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -698,7 +698,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, * * @dc: [in] current dc state * @context: [in] new dc state - * @cmd: [in] DMUB cmd to be populated with SubVP info + * @enable: [in] if true enables the pipes population * * This function loops through each pipe and populates the DMUB SubVP CMD info * based on the pipe (e.g. SubVP, VBLANK). diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 2c54b6e0498bff..809a1851f19659 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -27,6 +27,7 @@ #define DC_DP_TYPES_H #include "os_types.h" +#include "dc_ddc_types.h" enum dc_lane_count { LANE_COUNT_UNKNOWN = 0, @@ -149,7 +150,6 @@ struct dc_link_settings { enum dc_link_spread link_spread; bool use_link_rate_set; uint8_t link_rate_set; - bool dpcd_source_device_specific_field_support; }; union dc_dp_ffe_preset { @@ -362,14 +362,10 @@ enum dpcd_downstream_port_detailed_type { union dwnstream_port_caps_byte2 { struct { uint8_t MAX_BITS_PER_COLOR_COMPONENT:2; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3; uint8_t SOURCE_CONTROL_MODE_SUPPORT:1; uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1; uint8_t RESERVED:1; -#else - uint8_t RESERVED:6; -#endif } bits; uint8_t raw; }; @@ -407,7 +403,6 @@ union dwnstream_port_caps_byte3_hdmi { uint8_t raw; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) union hdmi_sink_encoded_link_bw_support { struct { uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3; @@ -429,7 +424,6 @@ union hdmi_encoded_link_bw { } bits; uint8_t raw; }; -#endif /*4-byte structure for detailed capabilities of a down-stream port (DP-to-TMDS converter).*/ @@ -509,7 +503,11 @@ union down_spread_ctrl { 1 = Main link signal is downspread <= 0.5% with frequency in the range of 30kHz ~ 33kHz*/ uint8_t SPREAD_AMP:1; - uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/ + uint8_t RESERVED2:1;/*Bit 5 = RESERVED. Read all 0s*/ + /* Bit 6 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE. + 0 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is not enabled by the Source device (default) + 1 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is enabled by Source device */ + uint8_t FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE:1; /*Bit 7 = MSA_TIMING_PAR_IGNORE_EN 0 = Source device will send valid data for the MSA Timing Params 1 = Source device may send invalid data for these MSA Timing Params*/ @@ -865,6 +863,21 @@ struct psr_caps { unsigned int psr_power_opt_flag; }; +union dpcd_dprx_feature_enumeration_list_cont_1 { + struct { + uint8_t ADAPTIVE_SYNC_SDP_SUPPORT:1; + uint8_t AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED: 1; + uint8_t RESERVED0: 2; + uint8_t VSC_EXT_SDP_VER1_SUPPORT: 1; + uint8_t RESERVED1: 3; + } bits; + uint8_t raw; +}; + +struct adaptive_sync_caps { + union dpcd_dprx_feature_enumeration_list_cont_1 dp_adap_sync_caps; +}; + /* Length of router topology ID read from DPCD in bytes. */ #define DPCD_USB4_TOPOLOGY_ID_LEN 5 @@ -926,6 +939,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL #define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216 #endif +#ifndef DP_LINK_SQUARE_PATTERN +#define DP_LINK_SQUARE_PATTERN 0x10F +#endif #ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX #define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217 #endif @@ -973,6 +989,9 @@ struct dpcd_usb4_dp_tunneling_info { /* TODO - Use DRM header to replace above once available */ #endif // DP_INTRA_HOP_AUX_REPLY_INDICATION +#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 +#endif union dp_main_line_channel_coding_cap { struct { uint8_t DP_8b_10b_SUPPORTED :1; @@ -1107,4 +1126,139 @@ struct edp_psr_info { uint8_t force_psrsu_cap; }; +struct dprx_states { + bool cable_id_written; +}; + +enum dpcd_downstream_port_max_bpc { + DOWN_STREAM_MAX_8BPC = 0, + DOWN_STREAM_MAX_10BPC, + DOWN_STREAM_MAX_12BPC, + DOWN_STREAM_MAX_16BPC +}; + +enum link_training_offset { + DPRX = 0, + LTTPR_PHY_REPEATER1 = 1, + LTTPR_PHY_REPEATER2 = 2, + LTTPR_PHY_REPEATER3 = 3, + LTTPR_PHY_REPEATER4 = 4, + LTTPR_PHY_REPEATER5 = 5, + LTTPR_PHY_REPEATER6 = 6, + LTTPR_PHY_REPEATER7 = 7, + LTTPR_PHY_REPEATER8 = 8 +}; + +#define MAX_REPEATER_CNT 8 + +struct dc_lttpr_caps { + union dpcd_rev revision; + uint8_t mode; + uint8_t max_lane_count; + uint8_t max_link_rate; + uint8_t phy_repeater_cnt; + uint8_t max_ext_timeout; + union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; + union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; + uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; +}; + +struct dc_dongle_dfp_cap_ext { + bool supported; + uint16_t max_pixel_rate_in_mps; + uint16_t max_video_h_active_width; + uint16_t max_video_v_active_height; + struct dp_encoding_format_caps encoding_format_caps; + struct dp_color_depth_caps rgb_color_depth_caps; + struct dp_color_depth_caps ycbcr444_color_depth_caps; + struct dp_color_depth_caps ycbcr422_color_depth_caps; + struct dp_color_depth_caps ycbcr420_color_depth_caps; +}; + +struct dc_dongle_caps { + /* dongle type (DP converter, CV smart dongle) */ + enum display_dongle_type dongle_type; + bool extendedCapValid; + /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, + indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ + bool is_dp_hdmi_s3d_converter; + bool is_dp_hdmi_ycbcr422_pass_through; + bool is_dp_hdmi_ycbcr420_pass_through; + bool is_dp_hdmi_ycbcr422_converter; + bool is_dp_hdmi_ycbcr420_converter; + uint32_t dp_hdmi_max_bpc; + uint32_t dp_hdmi_max_pixel_clk_in_khz; + uint32_t dp_hdmi_frl_max_link_bw_in_kbps; + struct dc_dongle_dfp_cap_ext dfp_cap_ext; +}; + +struct dpcd_caps { + union dpcd_rev dpcd_rev; + union max_lane_count max_ln_count; + union max_down_spread max_down_spread; + union dprx_feature dprx_feature; + + /* valid only for eDP v1.4 or higher*/ + uint8_t edp_supported_link_rates_count; + enum dc_link_rate edp_supported_link_rates[8]; + + /* dongle type (DP converter, CV smart dongle) */ + enum display_dongle_type dongle_type; + bool is_dongle_type_one; + /* branch device or sink device */ + bool is_branch_dev; + /* Dongle's downstream count. */ + union sink_count sink_count; + bool is_mst_capable; + /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, + indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ + struct dc_dongle_caps dongle_caps; + + uint32_t sink_dev_id; + int8_t sink_dev_id_str[6]; + int8_t sink_hw_revision; + int8_t sink_fw_revision[2]; + + uint32_t branch_dev_id; + int8_t branch_dev_name[6]; + int8_t branch_hw_revision; + int8_t branch_fw_revision[2]; + + bool allow_invalid_MSA_timing_param; + bool panel_mode_edp; + bool dpcd_display_control_capable; + bool ext_receiver_cap_field_present; + bool set_power_state_capable_edp; + bool dynamic_backlight_capable_edp; + union dpcd_fec_capability fec_cap; + struct dpcd_dsc_capabilities dsc_caps; + struct dc_lttpr_caps lttpr_caps; + struct adaptive_sync_caps adaptive_sync_caps; + struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; + + union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; + union dp_main_line_channel_coding_cap channel_coding_cap; + union dp_sink_video_fallback_formats fallback_formats; + union dp_fec_capability1 fec_cap1; + union dp_cable_id cable_id; + uint8_t edp_rev; + union edp_alpm_caps alpm_caps; + struct edp_psr_info psr_info; +}; + +union dpcd_sink_ext_caps { + struct { + /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode + * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode. + */ + uint8_t sdr_aux_backlight_control : 1; + uint8_t hdr_aux_backlight_control : 1; + uint8_t reserved_1 : 2; + uint8_t oled : 1; + uint8_t reserved_2 : 1; + uint8_t miniled : 1; + uint8_t reserved : 1; + } bits; + uint8_t raw; +}; #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h new file mode 100644 index 00000000000000..c364744b4c835c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h @@ -0,0 +1,134 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DC_HDMI_TYPES_H +#define DC_HDMI_TYPES_H + +#include "os_types.h" + +/* Address range from 0x00 to 0x1F.*/ +#define DP_ADAPTOR_TYPE2_SIZE 0x20 +#define DP_ADAPTOR_TYPE2_REG_ID 0x10 +#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D +/* Identifies adaptor as Dual-mode adaptor */ +#define DP_ADAPTOR_TYPE2_ID 0xA0 +/* MHz*/ +#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600 +/* MHz*/ +#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25 +/* kHZ*/ +#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000 +/* kHZ*/ +#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000 + +struct dp_hdmi_dongle_signature_data { + int8_t id[15];/* "DP-HDMI ADAPTOR"*/ + uint8_t eot;/* end of transmition '\x4' */ +}; + +/* DP-HDMI dongle slave address for retrieving dongle signature*/ +#define DP_HDMI_DONGLE_ADDRESS 0x40 +static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; +#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04 + + +/* SCDC Address defines (HDMI 2.0)*/ +#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3 +#define HDMI_SCDC_ADDRESS 0x54 +#define HDMI_SCDC_SINK_VERSION 0x01 +#define HDMI_SCDC_SOURCE_VERSION 0x02 +#define HDMI_SCDC_UPDATE_0 0x10 +#define HDMI_SCDC_TMDS_CONFIG 0x20 +#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 +#define HDMI_SCDC_CONFIG_0 0x30 +#define HDMI_SCDC_CONFIG_1 0x31 +#define HDMI_SCDC_SOURCE_TEST_REQ 0x35 +#define HDMI_SCDC_STATUS_FLAGS 0x40 +#define HDMI_SCDC_ERR_DETECT 0x50 +#define HDMI_SCDC_TEST_CONFIG 0xC0 + +#define HDMI_SCDC_MANUFACTURER_OUI 0xD0 +#define HDMI_SCDC_DEVICE_ID 0xDB + +union hdmi_scdc_update_read_data { + uint8_t byte[2]; + struct { + uint8_t STATUS_UPDATE:1; + uint8_t CED_UPDATE:1; + uint8_t RR_TEST:1; + uint8_t RESERVED:5; + uint8_t RESERVED2:8; + } fields; +}; + +union hdmi_scdc_status_flags_data { + uint8_t byte; + struct { + uint8_t CLOCK_DETECTED:1; + uint8_t CH0_LOCKED:1; + uint8_t CH1_LOCKED:1; + uint8_t CH2_LOCKED:1; + uint8_t RESERVED:4; + } fields; +}; + +union hdmi_scdc_ced_data { + uint8_t byte[11]; + struct { + uint8_t CH0_8LOW:8; + uint8_t CH0_7HIGH:7; + uint8_t CH0_VALID:1; + uint8_t CH1_8LOW:8; + uint8_t CH1_7HIGH:7; + uint8_t CH1_VALID:1; + uint8_t CH2_8LOW:8; + uint8_t CH2_7HIGH:7; + uint8_t CH2_VALID:1; + uint8_t CHECKSUM:8; + uint8_t RESERVED:8; + uint8_t RESERVED2:8; + uint8_t RESERVED3:8; + uint8_t RESERVED4:4; + } fields; +}; + +union hdmi_scdc_manufacturer_OUI_data { + uint8_t byte[3]; + struct { + uint8_t Manufacturer_OUI_1:8; + uint8_t Manufacturer_OUI_2:8; + uint8_t Manufacturer_OUI_3:8; + } fields; +}; + +union hdmi_scdc_device_id_data { + uint8_t byte; + struct { + uint8_t Hardware_Minor_Rev:4; + uint8_t Hardware_Major_Rev:4; + } fields; +}; + +#endif /* DC_HDMI_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 848db8676adfd5..cc3d6fb3936405 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -797,6 +797,29 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; +#define DC_DSC_QP_SET_SIZE 15 +#define DC_DSC_RC_BUF_THRESH_SIZE 14 +struct dc_dsc_rc_params_override { + int32_t rc_model_size; + int32_t rc_buf_thresh[DC_DSC_RC_BUF_THRESH_SIZE]; + int32_t rc_minqp[DC_DSC_QP_SET_SIZE]; + int32_t rc_maxqp[DC_DSC_QP_SET_SIZE]; + int32_t rc_offset[DC_DSC_QP_SET_SIZE]; + + int32_t rc_tgt_offset_hi; + int32_t rc_tgt_offset_lo; + int32_t rc_edge_factor; + int32_t rc_quant_incr_limit0; + int32_t rc_quant_incr_limit1; + + int32_t initial_fullness_offset; + int32_t initial_delay; + + int32_t flatness_min_qp; + int32_t flatness_max_qp; + int32_t flatness_det_thresh; +}; + struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ uint32_t num_slices_v; /* Number of DSC slices - vertical */ @@ -811,6 +834,7 @@ struct dc_dsc_config { #endif bool is_dp; /* indicate if DSC is applied based on DP's capability */ uint32_t mst_pbn; /* pbn of display on dsc mst hub */ + const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */ }; /** diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 2e18bcf6b11ad2..cecd807f5ed832 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -31,6 +31,7 @@ #include "grph_object_defs.h" struct link_resource; +enum aux_return_code_type; enum dc_link_fec_state { dc_link_fec_not_ready, @@ -38,15 +39,6 @@ enum dc_link_fec_state { dc_link_fec_enabled }; -struct dc_link_status { - bool link_active; - struct dpcd_caps *dpcd_caps; -}; - -struct dprx_states { - bool cable_id_written; -}; - /* DP MST stream allocation (payload bandwidth number) */ struct link_mst_stream_allocation { /* DIG front */ @@ -101,6 +93,7 @@ struct psr_settings { bool psr_allow_active; // PSR is currently active enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD bool psr_vtotal_control_support; // Vtotal control is supported by sink + unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU /* These parameters are calculated in Driver, * based on display timing and Sink capabilities. @@ -158,13 +151,15 @@ struct dc_panel_config { struct dc_dpia_bw_alloc { int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated - int padding_bw; // The Padding "Un-used" BW allocated by CM for padding reasons int sink_max_bw; // The Max BW that sink can require/support int estimated_bw; // The estimated available BW for this DPIA int bw_granularity; // BW Granularity bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM + bool response_ready; // Response ready from the CM side }; +#define MAX_SINKS_PER_LINK 4 + /* * A link contains one or more sinks and their connected status. * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. @@ -279,6 +274,7 @@ struct dc_link { bool dp_keep_receiver_powered; bool dp_skip_DID2; bool dp_skip_reset_segment; + bool dp_skip_fs_144hz; bool dp_mot_reset_segment; /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ bool dpia_mst_dsc_always_on; @@ -293,11 +289,12 @@ struct dc_link { struct gpio *hpd_gpio; enum dc_link_fec_state fec_state; + bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly + struct dc_panel_config panel_config; struct phy_state phy_state; }; -const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); /** * dc_get_link_at_index() - Return an enumerated dc_link. @@ -335,15 +332,17 @@ static inline bool dc_get_edp_link_panel_inst(const struct dc *dc, unsigned int *inst_out) { struct dc_link *edp_links[MAX_NUM_EDP]; - int edp_num; + int edp_num, i; + *inst_out = 0; if (link->connector_signal != SIGNAL_TYPE_EDP) return false; get_edp_links(dc, edp_links, &edp_num); - if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index)) - *inst_out = 1; - else - *inst_out = 0; + for (i = 0; i < edp_num; i++) { + if (link == edp_links[i]) + break; + (*inst_out)++; + } return true; } @@ -365,11 +364,6 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits, uint32_t *backlight_millinits_peak); -bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable); - -bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits); -bool dc_link_set_default_brightness_aux(struct dc_link *link); - int dc_link_get_backlight_level(const struct dc_link *dc_link); int dc_link_get_target_backlight_pwm(const struct dc_link *link); @@ -383,38 +377,7 @@ bool dc_link_setup_psr(struct dc_link *dc_link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context); -bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable); - -void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); - -void dc_link_blank_all_dp_displays(struct dc *dc); -void dc_link_blank_all_edp_displays(struct dc *dc); - -void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); -bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, - uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); - -/* Request DC to detect if there is a Panel connected. - * boot - If this call is during initial boot. - * Return false for any type of detection failure or MST detection - * true otherwise. True meaning further action is required (status update - * and OS notification). - */ -enum dc_detect_reason { - DETECT_REASON_BOOT, - DETECT_REASON_RESUMEFROMS3S4, - DETECT_REASON_HPD, - DETECT_REASON_HPDRX, - DETECT_REASON_FALLBACK, - DETECT_REASON_RETRAIN, - DETECT_REASON_TDR, -}; - -bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); -enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); -enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); -enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -436,7 +399,11 @@ bool dc_link_wait_for_t12(struct dc_link *link); void dc_link_dp_handle_automated_test(struct dc_link *link); void dc_link_dp_handle_link_loss(struct dc_link *link); bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link); - +bool dc_link_check_link_loss_status(struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data); +enum dc_status dc_link_dp_read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data); struct dc_sink_init_data; struct dc_sink *dc_link_add_remote_sink( @@ -451,36 +418,6 @@ void dc_link_remove_remote_sink( /* Used by diagnostics for virtual link at the moment */ -void dc_link_dp_set_drive_settings( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings); - -bool dc_link_dp_perform_link_training_skip_aux( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_setting); - -enum link_training_result dc_link_dp_perform_link_training( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_settings, - bool skip_video_pattern); - -bool dc_link_dp_sync_lt_begin(struct dc_link *link); - -enum link_training_result dc_link_dp_sync_lt_attempt( - struct dc_link *link, - const struct link_resource *link_res, - struct dc_link_settings *link_setting, - struct dc_link_training_overrides *lt_settings); - -bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down); - -void dc_link_dp_enable_hpd(const struct dc_link *link); - -void dc_link_dp_disable_hpd(const struct dc_link *link); - bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, @@ -491,19 +428,28 @@ bool dc_link_dp_set_test_pattern( bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap); +/** + ***************************************************************************** + * Function: dc_link_enable_hpd_filter + * + * @brief + * If enable is true, programs HPD filter on associated HPD line to default + * values dependent on link->connector_signal + * + * If enable is false, programs HPD filter on associated HPD line with no + * delays on connect or disconnect + * + * @param [in] link: pointer to the dc link + * @param [in] enable: boolean specifying whether to enable hbd + ***************************************************************************** + */ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); bool dc_link_is_dp_sink_present(struct dc_link *link); - -bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type); /* * DPCD access interfaces */ -#ifdef CONFIG_DRM_AMD_DC_HDCP -bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal); -bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal); -#endif void dc_link_set_drive_settings(struct dc *dc, struct link_training_settings *lt_settings, const struct dc_link *link); @@ -523,9 +469,6 @@ void dc_link_set_test_pattern(struct dc_link *link, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); -uint32_t dc_link_bandwidth_kbps( - const struct dc_link *link, - const struct dc_link_settings *link_setting); const struct dc_link_settings *dc_link_get_link_cap( const struct dc_link *link); @@ -547,25 +490,16 @@ bool dc_submit_i2c_oem( struct dc *dc, struct i2c_command *cmd); -uint32_t dc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing); - bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); -void dc_link_get_cur_link_res(const struct dc_link *link, - struct link_resource *link_res); /* take a snapshot of current link resource allocation state */ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); /* restore link resource allocation state from a snapshot */ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); -void dc_link_clear_dprx_states(struct dc_link *link); -struct gpio *get_hpd_gpio(struct dc_bios *dcb, - struct graphics_object_id link_id, - struct gpio_service *gpio_service); void dp_trace_reset(struct dc_link *link); bool dc_dp_trace_is_initialized(struct dc_link *link); unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, @@ -579,6 +513,65 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link, bool in_detection); unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); -/* Destruct the mst topology of the link and reset the allocated payload table */ -bool reset_cur_dp_mst_topology(struct dc_link *link); +/* Attempt to transfer the given aux payload. This function does not perform + * retries or handle error states. The reply is returned in the payload->reply + * and the result through operation_result. Returns the number of bytes + * transferred,or -1 on a failure. + */ +int dc_link_aux_transfer_raw(struct ddc_service *ddc, + struct aux_payload *payload, + enum aux_return_code_type *operation_result); + +enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, + struct dc_link_settings *link_setting); +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on); +bool dc_link_decide_edp_link_settings(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw); +void dc_link_edp_panel_backlight_power_on(struct dc_link *link, + bool wait_for_hpd); + +/* + * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS + */ +/* + * Send a request from DP-Tx requesting to allocate BW remotely after + * allocating it locally. This will get processed by CM and a CB function + * will be called. + * + * @link: pointer to the dc_link struct instance + * @req_bw: The requested bw in Kbyte to allocated + * + * return: none + */ +void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw); + +/* + * CB function for when the status of the Req above is complete. We will + * find out the result of allocating on CM and update structs accordingly + * + * @link: pointer to the dc_link struct instance + * @bw: Allocated or Estimated BW depending on the result + * @result: Response type + * + * return: none + */ +void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result); + +/* + * Handle the USB4 BW Allocation related functionality here: + * Plug => Try to allocate max bw from timing parameters supported by the sink + * Unplug => de-allocate bw + * + * @link: pointer to the dc_link struct instance + * @peak_bw: Peak bw used by the link/sink + * + * return: allocated bw else return 0 + */ +int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); + +/* TODO: this is not meant to be exposed to DM. Should switch to stream update + * interface i.e stream_update->dsc_config + */ +bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index dfd3df1d2f7e6c..567452599659c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -190,6 +190,7 @@ struct dc_stream_state { struct dc_info_packet vsp_infopacket; struct dc_info_packet hfvsif_infopacket; struct dc_info_packet vtem_infopacket; + struct dc_info_packet adaptive_sync_infopacket; uint8_t dsc_packed_pps[128]; struct rect src; /* composition area */ struct rect dst; /* stream addressable area */ @@ -313,6 +314,7 @@ struct dc_stream_update { struct dc_info_packet *vsp_infopacket; struct dc_info_packet *hfvsif_infopacket; struct dc_info_packet *vtem_infopacket; + struct dc_info_packet *adaptive_sync_infopacket; bool *dpms_off; bool integer_scaling_update; bool *allow_freesync; @@ -543,9 +545,8 @@ bool dc_stream_get_crtc_position(struct dc *dc, unsigned int *nom_v_pos); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -bool dc_stream_forward_crc_window(struct dc *dc, +bool dc_stream_forward_crc_window(struct dc_stream_state *stream, struct rect *rect, - struct dc_stream_state *stream, bool is_stop); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index dc78e2404b4877..27d0242d6cbd40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -32,7 +32,9 @@ #include "os_types.h" #include "fixed31_32.h" #include "irq_types.h" +#include "dc_ddc_types.h" #include "dc_dp_types.h" +#include "dc_hdmi_types.h" #include "dc_hw_types.h" #include "dal_types.h" #include "grph_object_defs.h" @@ -82,13 +84,8 @@ struct dc_perf_trace { unsigned long last_entry_write; }; -#define DC_MAX_EDID_BUFFER_SIZE 2048 -#define DC_EDID_BLOCK_SIZE 128 #define MAX_SURFACE_NUM 4 #define NUM_PIXEL_FORMATS 10 -#define MAX_REPEATER_CNT 8 - -#include "dc_ddc_types.h" enum tiling_mode { TILING_MODE_INVALID, @@ -374,66 +371,6 @@ struct dc_csc_adjustments { struct fixed31_32 hue; }; -enum dpcd_downstream_port_max_bpc { - DOWN_STREAM_MAX_8BPC = 0, - DOWN_STREAM_MAX_10BPC, - DOWN_STREAM_MAX_12BPC, - DOWN_STREAM_MAX_16BPC -}; - - -enum link_training_offset { - DPRX = 0, - LTTPR_PHY_REPEATER1 = 1, - LTTPR_PHY_REPEATER2 = 2, - LTTPR_PHY_REPEATER3 = 3, - LTTPR_PHY_REPEATER4 = 4, - LTTPR_PHY_REPEATER5 = 5, - LTTPR_PHY_REPEATER6 = 6, - LTTPR_PHY_REPEATER7 = 7, - LTTPR_PHY_REPEATER8 = 8 -}; - -struct dc_lttpr_caps { - union dpcd_rev revision; - uint8_t mode; - uint8_t max_lane_count; - uint8_t max_link_rate; - uint8_t phy_repeater_cnt; - uint8_t max_ext_timeout; - union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; - union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; - uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; -}; - -struct dc_dongle_dfp_cap_ext { - bool supported; - uint16_t max_pixel_rate_in_mps; - uint16_t max_video_h_active_width; - uint16_t max_video_v_active_height; - struct dp_encoding_format_caps encoding_format_caps; - struct dp_color_depth_caps rgb_color_depth_caps; - struct dp_color_depth_caps ycbcr444_color_depth_caps; - struct dp_color_depth_caps ycbcr422_color_depth_caps; - struct dp_color_depth_caps ycbcr420_color_depth_caps; -}; - -struct dc_dongle_caps { - /* dongle type (DP converter, CV smart dongle) */ - enum display_dongle_type dongle_type; - bool extendedCapValid; - /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, - indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ - bool is_dp_hdmi_s3d_converter; - bool is_dp_hdmi_ycbcr422_pass_through; - bool is_dp_hdmi_ycbcr420_pass_through; - bool is_dp_hdmi_ycbcr422_converter; - bool is_dp_hdmi_ycbcr420_converter; - uint32_t dp_hdmi_max_bpc; - uint32_t dp_hdmi_max_pixel_clk_in_khz; - uint32_t dp_hdmi_frl_max_link_bw_in_kbps; - struct dc_dongle_dfp_cap_ext dfp_cap_ext; -}; /* Scaling format */ enum scaling_transformation { SCALING_TRANSFORMATION_UNINITIALIZED, @@ -690,6 +627,7 @@ struct psr_config { uint8_t su_y_granularity; unsigned int line_time_in_us; uint8_t rate_control_caps; + uint16_t dsc_slice_height; }; union dmcu_psr_level { @@ -801,6 +739,7 @@ struct psr_context { uint8_t su_y_granularity; unsigned int line_time_in_us; uint8_t rate_control_caps; + uint16_t dsc_slice_height; }; struct colorspace_transform { @@ -1000,4 +939,47 @@ struct otg_phy_mux { }; #endif +enum dc_detect_reason { + DETECT_REASON_BOOT, + DETECT_REASON_RESUMEFROMS3S4, + DETECT_REASON_HPD, + DETECT_REASON_HPDRX, + DETECT_REASON_FALLBACK, + DETECT_REASON_RETRAIN, + DETECT_REASON_TDR, +}; + +struct dc_link_status { + bool link_active; + struct dpcd_caps *dpcd_caps; +}; + +#if defined(CONFIG_DRM_AMD_DC_HDCP) +union hdcp_rx_caps { + struct { + uint8_t version; + uint8_t reserved; + struct { + uint8_t repeater : 1; + uint8_t hdcp_capable : 1; + uint8_t reserved : 6; + } byte0; + } fields; + uint8_t raw[3]; +}; + +union hdcp_bcaps { + struct { + uint8_t HDCP_CAPABLE:1; + uint8_t REPEATER:1; + uint8_t RESERVED:6; + } bits; + uint8_t raw; +}; + +struct hdcp_caps { + union hdcp_rx_caps rx_caps; + union hdcp_bcaps bcaps; +}; +#endif #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index e69f1899fbf054..c850ed49281f39 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -26,7 +26,7 @@ #ifndef __DAL_AUX_ENGINE_DCE110_H__ #define __DAL_AUX_ENGINE_DCE110_H__ -#include "i2caux_interface.h" +#include "gpio_service_interface.h" #include "inc/hw/aux_engine.h" enum aux_return_code_type; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 09260c23c3bded..fa314493ffc500 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dce_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 2d3201b77d6a0d..1e2d2cbe2c3737 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -417,6 +417,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->relock_delay_frame_cnt = 0; if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) copy_settings_data->relock_delay_frame_cnt = 2; + copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 913a1fe6b3dafb..fb3fd5b7c78b99 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -46,7 +46,7 @@ #include "link_encoder.h" #include "link_enc_cfg.h" #include "link_hwss.h" -#include "dc_link_dp.h" +#include "link.h" #include "dccg.h" #include "clock_source.h" #include "clk_mgr.h" @@ -54,7 +54,6 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" -#include "inc/link_dpcd.h" #include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" @@ -65,7 +64,6 @@ #include "dcn10/dcn10_hw_sequencer.h" -#include "link/link_dp_trace.h" #include "dce110_hw_sequencer.h" #define GAMMA_HW_POINTS_NUM 256 @@ -653,10 +651,16 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); - else + else { + if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) + pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( + pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream_res.encoder_info_frame); + pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); + } } void dce110_enable_stream(struct pipe_ctx *pipe_ctx) @@ -737,7 +741,7 @@ void dce110_edp_wait_for_hpd_ready( /* obtain HPD */ /* TODO what to do with this? */ - hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service); + hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service); if (!hpd) { BREAK_TO_DEBUGGER(); @@ -807,19 +811,19 @@ void dce110_edp_power_control( div64_u64(dm_get_elapse_time_in_ns( ctx, current_ts, - dp_trace_get_edp_poweroff_timestamp(link)), 1000000); + link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000); unsigned long long time_since_edp_poweron_ms = div64_u64(dm_get_elapse_time_in_ns( ctx, current_ts, - dp_trace_get_edp_poweron_timestamp(link)), 1000000); + link_dp_trace_get_edp_poweron_timestamp(link)), 1000000); DC_LOG_HW_RESUME_S3( "%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu", __func__, power_up, current_ts, - dp_trace_get_edp_poweroff_timestamp(link), - dp_trace_get_edp_poweron_timestamp(link), + link_dp_trace_get_edp_poweroff_timestamp(link), + link_dp_trace_get_edp_poweron_timestamp(link), time_since_edp_poweroff_ms, time_since_edp_poweron_ms); @@ -834,7 +838,7 @@ void dce110_edp_power_control( link->panel_config.pps.extra_t12_ms; /* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */ - if (dp_trace_get_edp_poweroff_timestamp(link) != 0) { + if (link_dp_trace_get_edp_poweroff_timestamp(link) != 0) { if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms) remaining_min_edp_poweroff_time_ms = remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms; @@ -875,14 +879,16 @@ void dce110_edp_power_control( if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { - if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) + + if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_ON, - panel_instance); - else + panel_instance, link->link_powered_externally); + } else { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_OFF, - panel_instance); + panel_instance, link->link_powered_externally); + } } bp_result = link_transmitter_control(ctx->dc_bios, &cntl); @@ -892,13 +898,13 @@ void dce110_edp_power_control( __func__, (power_up ? "On":"Off"), bp_result); - dp_trace_set_edp_power_timestamp(link, power_up); + link_dp_trace_set_edp_power_timestamp(link, power_up); DC_LOG_HW_RESUME_S3( "%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n", __func__, - dp_trace_get_edp_poweroff_timestamp(link), - dp_trace_get_edp_poweron_timestamp(link)); + link_dp_trace_get_edp_poweroff_timestamp(link), + link_dp_trace_get_edp_poweron_timestamp(link)); if (bp_result != BP_RESULT_OK) DC_LOG_ERROR( @@ -926,14 +932,14 @@ void dce110_edp_wait_for_T12( return; if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) && - dp_trace_get_edp_poweroff_timestamp(link) != 0) { + link_dp_trace_get_edp_poweroff_timestamp(link) != 0) { unsigned int t12_duration = 500; // Default T12 as per spec unsigned long long current_ts = dm_get_timestamp(ctx); unsigned long long time_since_edp_poweroff_ms = div64_u64(dm_get_elapse_time_in_ns( ctx, current_ts, - dp_trace_get_edp_poweroff_timestamp(link)), 1000000); + link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000); t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12 @@ -941,7 +947,6 @@ void dce110_edp_wait_for_T12( msleep(t12_duration - time_since_edp_poweroff_ms); } } - /*todo: cloned in stream enc, fix*/ /* * @brief @@ -1015,21 +1020,25 @@ void dce110_edp_backlight_control( * we shouldn't be doing power-sequencing, hence we can skip * waiting for T7-ready. */ - edp_receiver_ready_T7(link); + link_edp_receiver_ready_T7(link); else DC_LOG_DC("edp_receiver_ready_T7 skipped\n"); } + /* Setting link_powered_externally will bypass delays in the backlight + * as they are not required if the link is being powered by a different + * source. + */ if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLON, - panel_instance); + panel_instance, link->link_powered_externally); else ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLOFF, - panel_instance); + panel_instance, link->link_powered_externally); } link_transmitter_control(ctx->dc_bios, &cntl); @@ -1042,7 +1051,7 @@ void dce110_edp_backlight_control( if (link->dpcd_sink_ext_caps.bits.oled || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1) - dc_link_backlight_enable_aux(link, enable); + link_backlight_enable_aux(link, enable); /*edp 1.2*/ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) { @@ -1054,7 +1063,7 @@ void dce110_edp_backlight_control( * we shouldn't be doing power-sequencing, hence we can skip * waiting for T9-ready. */ - edp_add_delay_for_T9(link); + link_edp_add_delay_for_T9(link); else DC_LOG_DC("edp_receiver_ready_T9 skipped\n"); } @@ -1142,6 +1151,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_link *link = stream->link; struct dc *dc = pipe_ctx->stream->ctx->dc; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + struct dccg *dccg = dc->res_pool->dccg; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + int dp_hpo_inst; if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1150,7 +1163,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -1161,7 +1174,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) link_hwss->reset_stream_encoder(pipe_ctx); - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { + dto_params.otg_inst = tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; + dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); + } + + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO: This looks like a bug to me as we are disabling HPO IO when * we are just disabling a single HPO stream. Shouldn't we disable HPO * HW control only when HPOs for all streams are disabled? @@ -1203,7 +1225,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( pipe_ctx->stream_res.hpo_dp_stream_enc); @@ -1225,7 +1247,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) * we shouldn't be doing power-sequencing, hence we can skip * waiting for T9-ready. */ - edp_receiver_ready_T9(link); + link_edp_receiver_ready_T9(link); } } } @@ -1408,7 +1430,7 @@ static enum dc_status dce110_enable_stream_timing( if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; @@ -1512,7 +1534,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ - if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) + if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx))) /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) @@ -1544,17 +1566,17 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.tg->inst); if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); if (!stream->dpms_off) - core_link_enable_stream(context, pipe_ctx); + link_set_dpms_on(context, pipe_ctx); /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ - if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) { + if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) { if (!pipe_ctx->stream->apply_seamless_boot_optimization) hws->funcs.enable_stream_timing(pipe_ctx, context, dc); } @@ -1580,7 +1602,7 @@ static void power_down_encoders(struct dc *dc) for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; - dc_link_blank_dp_stream(dc->links[i], false); + link_blank_dp_stream(dc->links[i], false); if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; @@ -2063,7 +2085,7 @@ static void dce110_reset_hw_ctx_wrap( * disabled already, no need to disable again. */ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) { - core_link_disable_stream(pipe_ctx_old); + link_set_dpms_off(pipe_ctx_old); /* free acquired resources*/ if (pipe_ctx_old->stream_res.audio) { @@ -3034,13 +3056,13 @@ void dce110_enable_dp_link_output( pipes[i].clock_source->funcs->program_pix_clk( pipes[i].clock_source, &pipes[i].stream_res.pix_clk_params, - dp_get_link_encoding_format(link_settings), + link_dp_get_encoding_format(link_settings), &pipes[i].pll_settings); } } } - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); } @@ -3057,7 +3079,7 @@ void dce110_enable_dp_link_output( if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); } void dce110_disable_link_output(struct dc_link *link, @@ -3082,7 +3104,7 @@ void dce110_disable_link_output(struct dc_link *link, link->dc->hwss.edp_power_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->unlock_phy(dmcu); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } static const struct hw_sequencer_funcs dce110_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 758f4b3b0087f5..394d83a97f3317 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -71,7 +71,7 @@ void dce110_optimize_bandwidth( struct dc *dc, struct dc_state *context); -void dp_receiver_power_ctrl(struct dc_link *link, bool on); +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on); void dce110_edp_power_control( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index f607a0e28f1490..f62368da875dc5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -581,7 +581,7 @@ static void dpp1_dscl_set_manual_ratio_init( * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area * * @dpp: DPP data struct - * @recount: Rectangle information + * @recout: Rectangle information * * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on * the values specified in the recount parameter. diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index ba1c0621f0f8af..e8752077571a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -172,6 +172,10 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C; uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D; uint32_t SDPIF_REQUEST_RATE_LIMIT; + uint32_t DCHUBBUB_SDPIF_CFG0; + uint32_t DCHUBBUB_SDPIF_CFG1; + uint32_t DCHUBBUB_CLOCK_CNTL; + uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL; }; #define HUBBUB_REG_FIELD_LIST_DCN32(type) \ @@ -362,7 +366,13 @@ struct dcn_hubbub_registers { type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\ - type SDPIF_REQUEST_RATE_LIMIT + type SDPIF_REQUEST_RATE_LIMIT;\ + type DISPCLK_R_DCHUBBUB_GATE_DIS;\ + type DCFCLK_R_DCHUBBUB_GATE_DIS;\ + type SDPIF_MAX_NUM_OUTSTANDING;\ + type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\ + type SDPIF_PORT_CONTROL;\ + type DET_MEM_PWR_LS_MODE struct dcn_hubbub_shift { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 8f894c1d1d1eb1..a1a29c508394e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -45,7 +45,6 @@ #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" -#include "dc_link_dp.h" #include "dccg.h" #include "clk_mgr.h" #include "link_hwss.h" @@ -56,8 +55,7 @@ #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" #include "dce/dmub_outbox.h" -#include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -921,7 +919,7 @@ enum dc_status dcn10_enable_stream_timing( if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; @@ -1019,7 +1017,7 @@ static void dcn10_reset_back_end_for_pipe( * VBIOS lit up eDP, so check link status too. */ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - core_link_disable_stream(pipe_ctx); + link_set_dpms_off(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); @@ -1566,7 +1564,7 @@ void dcn10_init_hw(struct dc *dc) } /* we want to turn off all dp displays before doing detection */ - dc_link_blank_all_dp_displays(dc); + link_blank_all_dp_displays(dc); if (hws->funcs.enable_power_gating_plane) hws->funcs.enable_power_gating_plane(dc->hwseq, true); @@ -2901,7 +2899,7 @@ void dcn10_blank_pixel_data( dc->hwss.set_pipe(pipe_ctx); stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); } - } else if (blank) { + } else { dc->hwss.set_abm_immediate_disable(pipe_ctx); if (stream_res->tg->funcs->set_blank) { stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK); @@ -3225,12 +3223,16 @@ static void dcn10_config_stereo_parameters( timing_3d_format == TIMING_3D_FORMAT_INBAND_FA || timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA || timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { - enum display_dongle_type dongle = \ - stream->link->ddc->dongle_type; - if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || - dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || - dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) - flags->DISABLE_STEREO_DP_SYNC = 1; + + if (stream->link && stream->link->ddc) { + enum display_dongle_type dongle = \ + stream->link->ddc->dongle_type; + + if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || + dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || + dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + flags->DISABLE_STEREO_DP_SYNC = 1; + } } flags->RIGHT_EYE_POLARITY =\ stream->timing.flags.RIGHT_EYE_3D_POLARITY; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index fbccb7263ad257..c4287147b8537f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn10_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 88ac5f6f4c96cc..0b37bb0e184b28 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -519,7 +519,8 @@ struct dcn_optc_registers { type OTG_CRC_DATA_STREAM_COMBINE_MODE;\ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ type OTG_CRC_DATA_FORMAT;\ - type OTG_V_TOTAL_LAST_USED_BY_DRR; + type OTG_V_TOTAL_LAST_USED_BY_DRR;\ + type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 484e7cdf00b8c8..3c451ab5d3ca27 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -28,7 +28,7 @@ #include "dcn10_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dpcd_defs.h" #include "dcn30/dcn30_afmt.h" @@ -753,12 +753,19 @@ void enc1_stream_encoder_update_dp_info_packets( * use other packetIndex (such as 5,6) for other info packet */ + if (info_frame->adaptive_sync.valid) + enc1_update_generic_info_packet( + enc1, + 5, /* packetIndex */ + &info_frame->adaptive_sync); + /* enable/disable transmission of packet(s). * If enabled, packet transmission begins on the next frame */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); + REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid); /* This bit is the master enable bit. * When enabling secondary stream engine, @@ -926,7 +933,7 @@ void enc1_stream_encoder_dp_blank( /* disable DP stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM); /* the encoder stops sending the video stream * at the start of the vertical blanking. @@ -945,7 +952,7 @@ void enc1_stream_encoder_dp_blank( REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET); } /* output video stream to link encoder */ @@ -1018,7 +1025,7 @@ void enc1_stream_encoder_dp_unblank( REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } void enc1_stream_encoder_set_avmute( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 784a8b6f360de5..42344aec60d620 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -28,6 +28,7 @@ #include "reg_helper.h" #include "dcn20_dsc.h" #include "dsc/dscc_types.h" +#include "dsc/rc_calc.h" static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps); static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, @@ -200,7 +201,6 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct bool is_config_ok; struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - DC_LOG_DSC(" "); DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst); dsc_config_log(dsc, dsc_cfg); is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg); @@ -345,10 +345,38 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co } } +static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) +{ + uint8_t i; + + rc->rc_model_size = override->rc_model_size; + for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++) + rc->rc_buf_thresh[i] = override->rc_buf_thresh[i]; + for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) { + rc->qp_min[i] = override->rc_minqp[i]; + rc->qp_max[i] = override->rc_maxqp[i]; + rc->ofs[i] = override->rc_offset[i]; + } + + rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi; + rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo; + rc->rc_edge_factor = override->rc_edge_factor; + rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0; + rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1; + + rc->initial_fullness_offset = override->initial_fullness_offset; + rc->initial_xmit_delay = override->initial_delay; + + rc->flatness_min_qp = override->flatness_min_qp; + rc->flatness_max_qp = override->flatness_max_qp; + rc->flatness_det_thresh = override->flatness_det_thresh; +} + static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, struct dsc_optc_config *dsc_optc_cfg) { struct dsc_parameters dsc_params; + struct rc_params rc; /* Validate input parameters */ ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h); @@ -413,7 +441,12 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420); dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422); - if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) { + calc_rc_params(&rc, &dsc_reg_vals->pps); + + if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd) + dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd); + + if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) { dm_output_to_console("%s: DSC config failed\n", __func__); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 6291a241158ad6..b83873a3a534ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -46,16 +46,15 @@ #include "dchubbub.h" #include "reg_helper.h" #include "dcn10/dcn10_cm_common.h" -#include "dc_link_dp.h" #include "vm_helper.h" #include "dccg.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" #include "hw_sequencer.h" -#include "inc/link_dpcd.h" #include "dpcd_defs.h" #include "inc/link_enc_cfg.h" #include "link_hwss.h" +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -582,6 +581,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.gsl_group != 0) dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); + if (hubp->funcs->hubp_update_mall_sel) + hubp->funcs->hubp_update_mall_sel(hubp, 0, false); + dc->hwss.set_flip_control_gsl(pipe_ctx, false); hubp->funcs->hubp_clk_cntl(hubp, false); @@ -605,6 +607,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) { + bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; + DC_LOGGER_INIT(dc->ctx->logger); if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) @@ -612,6 +617,12 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) dcn20_plane_atomic_disable(dc, pipe_ctx); + /* Turn back off the phantom OTG after the phantom plane is fully disabled + */ + if (is_phantom) + if (tg && tg->funcs->disable_phantom_crtc) + tg->funcs->disable_phantom_crtc(tg); + DC_LOG_DC("Power down front end %d\n", pipe_ctx->pipe_idx); } @@ -700,7 +711,7 @@ enum dc_status dcn20_enable_stream_timing( if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; @@ -1766,6 +1777,15 @@ static void dcn20_program_pipe( &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping); } + + /* Set ABM pipe after other pipe configurations done */ + if (pipe_ctx->plane_state->visible) { + if (pipe_ctx->stream_res.abm) { + dc->hwss.set_pipe(pipe_ctx); + pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm, + pipe_ctx->stream->abm_level); + } + } } void dcn20_program_front_end_for_ctx( @@ -1803,6 +1823,20 @@ void dcn20_program_front_end_for_ctx( dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], &context->res_ctx.pipe_ctx[i]); + /* When disabling phantom pipes, turn on phantom OTG first (so we can get double + * buffer updates properly) + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; + + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && + dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; + + if (tg->funcs->enable_crtc) + tg->funcs->enable_crtc(tg); + } + } /* OTG blank before disabling all front ends */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -1999,8 +2033,11 @@ void dcn20_prepare_bandwidth( } } - /* program dchubbub watermarks */ - dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, + /* program dchubbub watermarks: + * For assigning wm_optimized_required, use |= operator since we don't want + * to clear the value if the optimize has not happened yet + */ + dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); @@ -2359,7 +2396,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -2412,7 +2449,7 @@ static void dcn20_reset_back_end_for_pipe( * VBIOS lit up eDP, so check link status too. */ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - core_link_disable_stream(pipe_ctx); + link_set_dpms_off(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); @@ -2432,7 +2469,7 @@ static void dcn20_reset_back_end_for_pipe( } } else if (pipe_ctx->stream_res.dsc) { - dp_set_dsc_enable(pipe_ctx, false); + link_set_dsc_enable(pipe_ctx, false); } /* by upper caller loop, parent pipe: pipe0, will be reset last. @@ -2615,6 +2652,37 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->mpcc_id = mpcc_id; } +static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) +{ + switch (link->link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + return PHYD32CLKA; + case TRANSMITTER_UNIPHY_B: + return PHYD32CLKB; + case TRANSMITTER_UNIPHY_C: + return PHYD32CLKC; + case TRANSMITTER_UNIPHY_D: + return PHYD32CLKD; + case TRANSMITTER_UNIPHY_E: + return PHYD32CLKE; + default: + return PHYD32CLKA; + } +} + +static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; + int count = 1; + + while (odm_pipe != NULL) { + count++; + odm_pipe = odm_pipe->next_odm_pipe; + } + + return count; +} + void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = @@ -2628,12 +2696,43 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct timing_generator *tg = pipe_ctx->stream_res.tg; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dtbclk_dto_params dto_params = {0}; + struct dccg *dccg = dc->res_pool->dccg; + enum phyd32clk_clock_source phyd32clk; + int dp_hpo_inst; + struct dce_hwseq *hws = dc->hwseq; + unsigned int k1_div = PIXEL_RATE_DIV_NA; + unsigned int k2_div = PIXEL_RATE_DIV_NA; - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { if (dc->hwseq->funcs.setup_hpo_hw_control) dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); } + if (link_is_dp_128b_132b_signal(pipe_ctx)) { + dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); + + phyd32clk = get_phyd32clk_src(link); + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } + + if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { + hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); + + dc->res_pool->dccg->funcs->set_pixel_rate_div( + dc->res_pool->dccg, + pipe_ctx->stream_res.tg->inst, + k1_div, k2_div); + } + link_hwss->setup_stream_encoder(pipe_ctx); if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { @@ -2644,7 +2743,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dc->hwss.update_info_frame(pipe_ctx); if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index 2f9bfaeaba8d61..51a57dae181145 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn20_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 8a0dd0d7134b3b..3af24ef9cb2de9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -62,7 +62,6 @@ #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" -#include "dc_link_ddc.h" #include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" @@ -90,6 +89,7 @@ #include "amdgpu_socbb.h" +#include "link.h" #define DC_LOGGER_INIT(logger) #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -1214,7 +1214,7 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) dcn20_pp_smu_destroy(&pool->base.pp_smu); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } struct hubp *dcn20_hubp_create( @@ -1389,6 +1389,9 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; + if (pipe_ctx->top_pipe) + continue; + if (pipe_ctx->stream != dc_stream) continue; @@ -2222,14 +2225,10 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat enum surface_pixel_format surf_pix_format = plane_state->format; unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); - enum swizzle_mode_values swizzle = DC_SW_LINEAR; - + plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S; if (bpp == 64) - swizzle = DC_SW_64KB_D; - else - swizzle = DC_SW_64KB_S; + plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D; - plane_state->tiling_info.gfx9.swizzle = swizzle; return DC_OK; } @@ -2766,7 +2765,7 @@ static bool dcn20_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index b40489e678f904..42865d6c0cdd1e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dpcd_defs.h" #define DC_LOGGER \ @@ -423,6 +423,22 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc, } } +static void enc2_stream_encoder_update_dp_info_packets_sdp_line_num( + struct stream_encoder *enc, + struct encoder_info_frame *info_frame) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (info_frame->adaptive_sync.valid == true && + info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { + //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF + REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1); + + REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, + info_frame->sdp_line_num.adaptive_sync_line_num); + } +} + static void enc2_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) @@ -530,7 +546,7 @@ void enc2_stream_encoder_dp_unblank( REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } static void enc2_dp_set_odm_combine( @@ -587,6 +603,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc2_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc2_stream_encoder_stop_hdmi_info_packets, + .update_dp_info_packets_sdp_line_num = + enc2_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc2_stream_encoder_update_dp_info_packets, .send_immediate_sdp_message = diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c index 7f9ec59ef443ec..8d31fa131cd60d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn201_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 69cc192a7e7191..15475c7e2cf93a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -35,7 +35,7 @@ #include "hw/clk_mgr.h" #include "dc_dmub_srv.h" #include "abm.h" - +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -132,8 +132,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) return; pipe_ctx->stream->dpms_off = false; - core_link_enable_stream(context, pipe_ctx); - core_link_disable_stream(pipe_ctx); + link_set_dpms_on(context, pipe_ctx); + link_set_dpms_off(pipe_ctx); pipe_ctx->stream->dpms_off = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index 0a1ba6e7081c2c..eb9abb9f969868 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -31,7 +31,6 @@ #include "dcn21_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index fbcf0afeae0db9..8f9244fe5c8682 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1393,15 +1393,13 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) { - enum dc_status result = DC_OK; - if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) { plane_state->dcc.enable = 1; /* align to our worst case block width */ plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024; } - result = dcn20_patch_unknown_plane_state(plane_state); - return result; + + return dcn20_patch_unknown_plane_state(plane_state); } static const struct resource_funcs dcn21_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 6f3c2fb60790ed..1fb8fd7afc95e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn30_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" /* #include "dcn3ag/dcn3ag_phy_fw.h" */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 17df53793c9221..5f9079d3943a67 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -404,6 +404,22 @@ static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s) } } +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( + struct stream_encoder *enc, + struct encoder_info_frame *info_frame) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (info_frame->adaptive_sync.valid == true && + info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { + //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF + REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1); + + REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, + info_frame->sdp_line_num.adaptive_sync_line_num); + } +} + void enc3_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) @@ -452,12 +468,20 @@ void enc3_stream_encoder_update_dp_info_packets( * use other packetIndex (such as 5,6) for other info packet */ + if (info_frame->adaptive_sync.valid) + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 5, /* packetIndex */ + &info_frame->adaptive_sync, + true); + /* enable/disable transmission of packet(s). * If enabled, packet transmission begins on the next frame */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); + REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid); /* This bit is the master enable bit. * When enabling secondary stream engine, @@ -803,6 +827,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { enc3_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc3_stream_encoder_stop_hdmi_info_packets, + .update_dp_info_packets_sdp_line_num = + enc3_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h index 54ee230e7f98d8..06310973ded2d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h @@ -292,6 +292,10 @@ void enc3_stream_encoder_update_hdmi_info_packets( void enc3_stream_encoder_stop_hdmi_info_packets( struct stream_encoder *enc); +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( + struct stream_encoder *enc, + struct encoder_info_frame *info_frame); + void enc3_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8c504571126499..df787fcf8e86e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -50,8 +50,7 @@ #include "dpcd_defs.h" #include "../dcn20/dcn20_hwseq.h" #include "dcn30_resource.h" -#include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" @@ -91,8 +90,8 @@ bool dcn30_set_blend_lut( return result; } -static bool dcn30_set_mpc_shaper_3dlut( - struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) +static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; @@ -104,19 +103,18 @@ static bool dcn30_set_mpc_shaper_3dlut( const struct pwl_params *shaper_lut = NULL; //get the shaper lut params if (stream->func_shaper) { - if (stream->func_shaper->type == TF_TYPE_HWPWL) + if (stream->func_shaper->type == TF_TYPE_HWPWL) { shaper_lut = &stream->func_shaper->pwl; - else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( - stream->func_shaper, - &dpp_base->shaper_params, true); + } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(stream->func_shaper, + &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; } } if (stream->lut3d_func && - stream->lut3d_func->state.bits.initialized == 1 && - stream->lut3d_func->state.bits.rmu_idx_valid == 1) { + stream->lut3d_func->state.bits.initialized == 1 && + stream->lut3d_func->state.bits.rmu_idx_valid == 1) { if (stream->lut3d_func->state.bits.rmu_mux_num == 0) mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux; else if (stream->lut3d_func->state.bits.rmu_mux_num == 1) @@ -125,20 +123,22 @@ static bool dcn30_set_mpc_shaper_3dlut( mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux; if (mpcc_id_projected != mpcc_id) BREAK_TO_DEBUGGER(); - /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/ + /* find the reason why logical layer assigned a different + * mpcc_id into acquire_post_bldn_3dlut + */ acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id, - stream->lut3d_func->state.bits.rmu_mux_num); + stream->lut3d_func->state.bits.rmu_mux_num); if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num) BREAK_TO_DEBUGGER(); - result = mpc->funcs->program_3dlut(mpc, - &stream->lut3d_func->lut_3d, - stream->lut3d_func->state.bits.rmu_mux_num); + + result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, + stream->lut3d_func->state.bits.rmu_mux_num); result = mpc->funcs->program_shaper(mpc, shaper_lut, - stream->lut3d_func->state.bits.rmu_mux_num); - } else - /*loop through the available mux and release the requested mpcc_id*/ + stream->lut3d_func->state.bits.rmu_mux_num); + } else { + // loop through the available mux and release the requested mpcc_id mpc->funcs->release_rmu(mpc, mpcc_id); - + } return result; } @@ -540,7 +540,7 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - dc_link_blank_all_dp_displays(dc); + link_blank_all_dp_displays(dc); if (hws->funcs.enable_power_gating_plane) hws->funcs.enable_power_gating_plane(dc->hwseq, true); @@ -675,10 +675,16 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); - else + else { + if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) + pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( + pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream_res.encoder_info_frame); + pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); + } } void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx) @@ -992,8 +998,5 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - - dc_dmub_srv_p_state_delegate(dc, - context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 867d60151aebbe..08b92715e2e646 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -291,6 +291,14 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); } +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */ + +} + void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) { optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); @@ -360,6 +368,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, + .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, }; void dcn30_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index dd45a5499b078a..fb06dc9a48937d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -279,6 +279,7 @@ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh) @@ -317,6 +318,7 @@ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh) void dcn30_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index c18c52a60100e5..b5b5320c7befb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -60,7 +60,7 @@ #include "dml/display_mode_vba.h" #include "dcn30/dcn30_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dce/dce_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -1208,7 +1208,7 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } static struct hubp *dcn30_hubp_create( @@ -1477,8 +1477,8 @@ bool dcn30_acquire_post_bldn_3dlut( state->bits.mpc_rmu2_mux = mpcc_id; ret = true; break; - } } + } return ret; } @@ -1648,7 +1648,8 @@ noinline bool dcn30_internal_validate_bw( display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate) + bool fast_validate, + bool allow_self_refresh_only) { bool out = false; bool repopulate_pipes = false; @@ -1675,7 +1676,7 @@ noinline bool dcn30_internal_validate_bw( dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - if (!fast_validate) { + if (!fast_validate || !allow_self_refresh_only) { /* * DML favors voltage over p-state, but we're more interested in * supporting p-state over voltage. We can't support p-state in @@ -1688,11 +1689,12 @@ noinline bool dcn30_internal_validate_bw( if (vlevel < context->bw_ctx.dml.soc.num_states) vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } - if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + if (allow_self_refresh_only && + (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { /* - * If mode is unsupported or there's still no p-state support then - * fall back to favoring voltage. + * If mode is unsupported or there's still no p-state support + * then fall back to favoring voltage. * * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. @@ -2063,7 +2065,7 @@ bool dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); if (pipe_cnt == 0) @@ -2590,7 +2592,7 @@ static bool dcn30_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index 7d063c7d6a4bfd..8e6b8b7368fdb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw( display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate); + bool fast_validate, + bool allow_self_refresh_only); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c index c9fbaed239654b..1b39a6e8a1ac5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn301_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 8cf10351f271ba..ee62ae3eb98f65 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1414,7 +1414,8 @@ static struct resource_funcs dcn301_res_pool_funcs = { .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn301_update_bw_bounding_box + .update_bw_bounding_box = dcn301_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state }; static bool dcn301_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 47cffd0e6830f7..03ddf4f5f065c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -47,6 +47,7 @@ #include "dcn10/dcn10_resource.h" +#include "link.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" #include "dce/dce_aux.h" @@ -1125,6 +1126,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool) if (pool->dccg != NULL) dcn_dccg_destroy(&pool->dccg); + + if (pool->oem_device != NULL) + link_destroy_ddc_service(&pool->oem_device); } static void dcn302_destroy_resource_pool(struct resource_pool **pool) @@ -1216,6 +1220,7 @@ static bool dcn302_resource_construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; ctx->dc_bios->regs = &bios_regs; @@ -1497,6 +1502,17 @@ static bool dcn302_resource_construct( dc->cap_funcs = cap_funcs; + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->oem_device = link_create_ddc_service(&ddc_init_data); + } else { + pool->oem_device = NULL; + } + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index c14d35894b2e57..31e2120641681d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -29,7 +29,7 @@ #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" @@ -1054,7 +1054,7 @@ static void dcn303_resource_destruct(struct resource_pool *pool) dcn_dccg_destroy(&pool->dccg); if (pool->oem_device != NULL) - dal_ddc_service_destroy(&pool->oem_device); + link_destroy_ddc_service(&pool->oem_device); } static void dcn303_destroy_resource_pool(struct resource_pool **pool) @@ -1421,7 +1421,7 @@ static bool dcn303_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->oem_device = dal_ddc_service_create(&ddc_init_data); + pool->oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index ab70ebd8f223d8..275e78c06dee14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -30,7 +30,6 @@ #include "link_encoder.h" #include "dcn31_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 80dfaa4d4d81ee..0b317ed31f918a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -242,7 +242,10 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern( REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; - case DP_TEST_PATTERN_SQUARE_PULSE: + case DP_TEST_PATTERN_SQUARE: + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: + case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0, TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 16639bd03adf7c..d76f55a12eb41c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -430,6 +430,22 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute( MSA_DATA_LANE_3, 0); } +static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num( + struct hpo_dp_stream_encoder *enc, + struct encoder_info_frame *info_frame) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + if (info_frame->adaptive_sync.valid == true && + info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { + //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1); + + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, + info_frame->sdp_line_num.adaptive_sync_line_num); + } +} + static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( struct hpo_dp_stream_encoder *enc, const struct encoder_info_frame *info_frame) @@ -458,12 +474,20 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( &info_frame->hdrsmd, true); + if (info_frame->adaptive_sync.valid) + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 5, /* packetIndex */ + &info_frame->adaptive_sync, + true); + /* enable/disable transmission of packet(s). * If enabled, packet transmission begins on the next frame */ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid); /* check if dynamic metadata packet transmission is enabled */ REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, @@ -714,6 +738,7 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank, .disable = dcn31_hpo_dp_stream_enc_disable, .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, + .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets, .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets, .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 6360dc9502e703..7e7cd5b64e6a1b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -1008,6 +1008,24 @@ static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) return false; } +void hubbub31_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /*Enable clock gate*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 0, + DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + } + + /* + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); +} static const struct hubbub_funcs hubbub31_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h index 70c60de448ac31..89d6208287b534 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h @@ -42,6 +42,10 @@ SR(DCHUBBUB_COMPBUF_CTRL),\ SR(COMPBUF_RESERVED_SPACE),\ SR(DCHUBBUB_DEBUG_CTRL_0),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_SDPIF_CFG0),\ + SR(DCHUBBUB_SDPIF_CFG1),\ + SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ @@ -120,11 +124,17 @@ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); +void hubbub31_init(struct hubbub *hubbub); + void hubbub31_construct(struct dcn20_hubbub *hubbub3, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 4226a051df414b..d13e46eeee3c0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -45,8 +45,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dcn10/dcn10_hw_sequencer.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" @@ -203,7 +202,7 @@ void dcn31_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc->ctx->dmub_srv); /* we want to turn off all dp displays before doing detection */ - dc_link_blank_all_dp_displays(dc); + link_blank_all_dp_displays(dc); if (hws->funcs.enable_power_gating_plane) hws->funcs.enable_power_gating_plane(dc->hwseq, true); @@ -231,7 +230,7 @@ void dcn31_init_hw(struct dc *dc) } if (num_opps > 1) { - dc_link_blank_all_edp_displays(dc); + link_blank_all_edp_displays(dc); break; } } @@ -415,7 +414,17 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); - else { + else if (link_is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets( + pipe_ctx->stream_res.hpo_dp_stream_enc, + &pipe_ctx->stream_res.encoder_info_frame); + return; + } else { + if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) + pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( + pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream_res.encoder_info_frame); + pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); @@ -556,7 +565,7 @@ static void dcn31_reset_back_end_for_pipe( * VBIOS lit up eDP, so check link status too. */ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - core_link_disable_stream(pipe_ctx); + link_set_dpms_off(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); @@ -575,7 +584,7 @@ static void dcn31_reset_back_end_for_pipe( } } } else if (pipe_ctx->stream_res.dsc) { - dp_set_dsc_enable(pipe_ctx, false); + link_set_dsc_enable(pipe_ctx, false); } pipe_ctx->stream = NULL; @@ -623,43 +632,3 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) if (hws->ctx->dc->debug.hpo_optimization) REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); } -void dcn31_set_drr(struct pipe_ctx **pipe_ctx, - int num_pipes, struct dc_crtc_timing_adjust adjust) -{ - int i = 0; - struct drr_params params = {0}; - unsigned int event_triggers = 0x2;/*Bit[1]: OTG_TRIG_A*/ - unsigned int num_frames = 2; - params.vertical_total_max = adjust.v_total_max; - params.vertical_total_min = adjust.v_total_min; - params.vertical_total_mid = adjust.v_total_mid; - params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; - for (i = 0; i < num_pipes; i++) { - if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) { - if (pipe_ctx[i]->stream_res.tg->funcs->set_drr) - pipe_ctx[i]->stream_res.tg->funcs->set_drr( - pipe_ctx[i]->stream_res.tg, ¶ms); - if (adjust.v_total_max != 0 && adjust.v_total_min != 0) - if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control) - pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx[i]->stream_res.tg, - event_triggers, num_frames); - } - } -} -void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_params *params) -{ - unsigned int i; - unsigned int triggers = 0; - if (params->triggers.surface_update) - triggers |= 0x600;/*bit 9 and bit10 : 110 0000 0000*/ - if (params->triggers.cursor_update) - triggers |= 0x10;/*bit4*/ - if (params->triggers.force_trigger) - triggers |= 0x1; - for (i = 0; i < num_pipes; i++) - pipe_ctx[i]->stream_res.tg->funcs-> - set_static_screen_control(pipe_ctx[i]->stream_res.tg, - triggers, params->num_frames); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index e7e03a8722e0ba..edfc01d6ad7378 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -56,8 +56,4 @@ bool dcn31_is_abm_supported(struct dc *dc, void dcn31_init_pipes(struct dc *dc, struct dc_state *context); void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); -void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_params *params); -void dcn31_set_drr(struct pipe_ctx **pipe_ctx, - int num_pipes, struct dc_crtc_timing_adjust adjust); #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 7c2da70ffe21a5..3a32810bbe382d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -64,9 +64,9 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn31_set_drr, + .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn31_set_static_screen_control, + .set_static_screen_control = dcn10_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index fe449f7aa7715b..63a677c8ee2726 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -40,7 +40,6 @@ #define FN(reg_name, field_name) \ optc1->tg_shift->field_name, optc1->tg_mask->field_name -#define STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN 0x2000 /*bit 13*/ static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { @@ -232,32 +231,6 @@ void optc3_init_odm(struct timing_generator *optc) OPTC_MEM_SEL, 0); optc1->opp_count = 1; } -void optc31_set_static_screen_control( - struct timing_generator *optc, - uint32_t event_triggers, - uint32_t num_frames) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t framecount; - uint32_t events; - - if (num_frames > 0xFF) - num_frames = 0xFF; - REG_GET_2(OTG_STATIC_SCREEN_CONTROL, - OTG_STATIC_SCREEN_EVENT_MASK, &events, - OTG_STATIC_SCREEN_FRAME_COUNT, &framecount); - - if (events == event_triggers && num_frames == framecount) - return; - if ((event_triggers & STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN) - != 0) - event_triggers = event_triggers & - ~STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN; - - REG_UPDATE_2(OTG_STATIC_SCREEN_CONTROL, - OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, - OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); -} static struct timing_generator_funcs dcn31_tg_funcs = { .validate_timing = optc1_validate_timing, @@ -293,7 +266,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = { .set_drr = optc31_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc31_set_static_screen_control, + .set_static_screen_control = optc1_set_static_screen_control, .program_stereo = optc1_program_stereo, .is_stereo_left_eye = optc1_is_stereo_left_eye, .tg_init = optc3_tg_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h index 5fc6c63580d705..30b81a448ce2d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h @@ -263,8 +263,5 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc); void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params); void optc3_init_odm(struct timing_generator *optc); -void optc31_set_static_screen_control( - struct timing_generator *optc, - uint32_t event_triggers, - uint32_t num_frames); + #endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 3ca517dcc82dc5..d3918a10773a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1795,7 +1795,7 @@ bool dcn31_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index 38842f938bed0b..962a2c02b422a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -30,7 +30,7 @@ #include "dcn314_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dpcd_defs.h" #define DC_LOGGER \ @@ -278,10 +278,11 @@ static void enc314_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc) { - /* New to DCN314 - disable the FIFO before VID stream disable. */ - enc314_disable_fifo(enc); - enc1_stream_encoder_dp_blank(link, enc); + + /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */ + if (enc->ctx->dc->debug.dig_fifo_off_in_blank) + enc314_disable_fifo(enc); } static void enc314_stream_encoder_dp_unblank( @@ -365,7 +366,7 @@ static void enc314_stream_encoder_dp_unblank( */ enc314_enable_fifo(enc); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } /* Set DSC-related configuration. @@ -428,6 +429,8 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = { enc3_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc3_stream_encoder_stop_hdmi_info_packets, + .update_dp_info_packets_sdp_line_num = + enc3_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h index 33dfdf8b4100fe..ed07723879036a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h @@ -280,6 +280,10 @@ void enc3_stream_encoder_update_hdmi_info_packets( void enc3_stream_encoder_stop_hdmi_info_packets( struct stream_encoder *enc); +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( + struct stream_encoder *enc, + struct encoder_info_frame *info_frame); + void enc3_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index a0741794db62aa..575d3501c848ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -46,9 +46,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "dc_link_dp.h" -#include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dcn10/dcn10_hw_sequencer.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" @@ -348,7 +346,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { @@ -391,3 +389,27 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc, pix_per_cycle); } + +void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) +{ + struct dc_context *ctx = hws->ctx; + union dmub_rb_cmd cmd; + + if (hws->ctx->dc->debug.disable_hubp_power_gate) + return; + + PERF_TRACE(); + + memset(&cmd, 0, sizeof(cmd)); + cmd.domain_control.header.type = DMUB_CMD__VBIOS; + cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL; + cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data); + cmd.domain_control.data.inst = hubp_inst; + cmd.domain_control.data.power_gate = !power_on; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); + dc_dmub_srv_wait_idle(ctx->dmub_srv); + + PERF_TRACE(); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index 244280298212c5..c419d3dbdfee6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -41,4 +41,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); +void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); + #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 31feb4b0edee9f..343f4d9dd5e341 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -66,9 +66,9 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn31_set_drr, + .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn31_set_static_screen_control, + .set_static_screen_control = dcn10_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -137,7 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn314_enable_power_gating_plane, - .hubp_pg_control = dcn31_hubp_pg_control, + .hubp_pg_control = dcn314_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn314_update_odm, .dsc_pg_control = dcn314_dsc_pg_control, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c index 41edbd64ea2169..0086cafb0f7a82 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c @@ -228,7 +228,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = { .set_drr = optc31_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc31_set_static_screen_control, + .set_static_screen_control = optc1_set_static_screen_control, .program_stereo = optc1_program_stereo, .is_stereo_left_eye = optc1_is_stereo_left_eye, .tg_init = optc3_tg_init, @@ -241,7 +241,6 @@ static struct timing_generator_funcs dcn314_tg_funcs = { .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, - .set_odm_combine = optc314_set_odm_combine, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, .set_drr_trigger_window = optc3_set_drr_trigger_window, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 79850a68f62ab0..54ed3de869d3b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -892,6 +892,8 @@ static const struct dc_debug_options debug_defaults_drv = { .force_abm_enable = false, .timing_trace = false, .clock_trace = true, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, @@ -901,7 +903,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4k*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = false, + .sanity_checks = true, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1695,6 +1697,61 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } +bool dcn314_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + DC_FP_START(); + // do not support self refresh only + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); + DC_FP_END(); + + // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + if (pipe_cnt == 0) + fast_validate = false; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + static struct resource_funcs dcn314_res_pool_funcs = { .destroy = dcn314_destroy_resource_pool, .link_enc_create = dcn31_link_encoder_create, @@ -1702,7 +1759,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .link_encs_assign = link_enc_cfg_link_encs_assign, .link_enc_unassign = link_enc_cfg_link_enc_unassign, .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn31_validate_bandwidth, + .validate_bandwidth = dcn314_validate_bandwidth, .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn314_populate_dml_pipes_from_context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h index 0dd3153aa5c17a..49ffe71018dfb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h @@ -39,6 +39,10 @@ struct dcn314_resource_pool { struct resource_pool base; }; +bool dcn314_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate); + struct resource_pool *dcn314_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index b4d5076e124c15..dc0b4950627556 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -1776,7 +1776,7 @@ static bool dcn316_resource_construct( pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index 076969d928afaa..501388014855c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -31,7 +31,6 @@ #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "link_enc_cfg.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c index d19fc93dbc75df..36e6f565794206 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn32_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dpcd_defs.h" #define DC_LOGGER \ @@ -373,7 +373,7 @@ static void enc32_stream_encoder_dp_unblank( REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } /* Set DSC-related configuration. @@ -421,6 +421,33 @@ static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pi REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0); } +static void enc32_reset_fifo(struct stream_encoder *enc, bool reset) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val = reset ? 1 : 0; + uint32_t is_symclk_on; + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); + REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); + + if (is_symclk_on) + REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); + else + udelay(10); +} + +static void enc32_enable_fifo(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + + enc32_reset_fifo(enc, true); + enc32_reset_fifo(enc, false); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); +} + static const struct stream_encoder_funcs dcn32_str_enc_funcs = { .dp_set_odm_combine = enc32_dp_set_odm_combine, @@ -436,6 +463,8 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = { enc3_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc3_stream_encoder_stop_hdmi_info_packets, + .update_dp_info_packets_sdp_line_num = + enc3_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = @@ -466,6 +495,7 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = { .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .set_input_mode = enc32_set_dig_input_mode, + .enable_fifo = enc32_enable_fifo, }; void dcn32_dio_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 9501403a48a958..eb08ccc38e798c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -945,6 +945,35 @@ void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); } +void hubbub32_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* Enable clock gate*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 0, + DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + } + /* + ignore the "df_pre_cstate_req" from the SDP port control. + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, + SDPIF_PORT_CONTROL, 1); + /*Set SDP's max outstanding request to 512 + must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ + REG_UPDATE(DCHUBBUB_SDPIF_CFG1, + SDPIF_MAX_NUM_OUTSTANDING, 1); + /*must set the registers back to 256 in zero frame buffer mode*/ + REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512); +} + static const struct hubbub_funcs hubbub32_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h index 786f9ce07f9286..b20eb04724bb9a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h @@ -83,7 +83,12 @@ SR(DCN_VM_FAULT_ADDR_LSB),\ SR(DCN_VM_FAULT_CNTL),\ SR(DCN_VM_FAULT_STATUS),\ - SR(SDPIF_REQUEST_RATE_LIMIT) + SR(SDPIF_REQUEST_RATE_LIMIT),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_SDPIF_CFG0),\ + SR(DCHUBBUB_SDPIF_CFG1),\ + SR(DCHUBBUB_MEM_PWR_MODE_CTRL) + #define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ @@ -96,6 +101,7 @@ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ @@ -161,7 +167,14 @@ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ - HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh) + HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) + + bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, @@ -191,6 +204,8 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow); void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub); +void hubbub32_init(struct hubbub *hubbub); + void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte); void hubbub32_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index ac1c6458dd55a8..fe0cd177744cae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -155,7 +155,11 @@ void hubp32_cursor_set_attributes( else REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); } - +void hubp32_init(struct hubp *hubp) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8); +} static struct hubp_funcs dcn32_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h index 56ef7115153657..4cdbf63c952bc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h @@ -61,6 +61,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp); void hubp32_cursor_set_attributes(struct hubp *hubp, const struct dc_cursor_attributes *attr); +void hubp32_init(struct hubp *hubp); + bool hubp32_construct( struct dcn20_hubp *hubp2, struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index b8767be1e4c55d..16f892125b6fac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -50,7 +50,7 @@ #include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" -#include "dc_link_dp.h" +#include "link.h" #include "dmub/inc/dmub_subvp_state.h" #define DC_LOGGER_INIT(logger) @@ -188,7 +188,8 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) /* First, check no-memory-request case */ for (i = 0; i < dc->current_state->stream_count; i++) { - if (dc->current_state->stream_status[i].plane_count) + if ((dc->current_state->stream_status[i].plane_count) && + (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) /* Fail eligibility on a visible stream */ break; } @@ -206,151 +207,31 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) */ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) { - int i, j; - struct dc_stream_state *stream = NULL; - struct dc_plane_state *plane = NULL; - uint32_t cursor_size = 0; - uint32_t total_lines = 0; - uint32_t lines_per_way = 0; + int i; uint8_t num_ways = 0; - uint8_t bytes_per_pixel = 0; - uint8_t cursor_bpp = 0; - uint16_t mblk_width = 0; - uint16_t mblk_height = 0; - uint16_t mall_alloc_width_blk_aligned = 0; - uint16_t mall_alloc_height_blk_aligned = 0; - uint16_t num_mblks = 0; - uint32_t bytes_in_mall = 0; - uint32_t cache_lines_used = 0; - uint32_t cache_lines_per_plane = 0; + uint32_t mall_ss_size_bytes = 0; + mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; + // TODO add additional logic for PSR active stream exclusion optimization + // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes; + + // Include cursor size for CAB allocation for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; - /* If PSR is supported on an eDP panel that's connected, but that panel is - * not in PSR at the time of trying to enter MALL SS, we have to include it - * in the static screen CAB calculation - */ - if (!pipe->stream || !pipe->plane_state || - (pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - pipe->stream->link->psr_settings.psr_allow_active) || - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (!pipe->stream || !pipe->plane_state) continue; - bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; - mblk_width = DCN3_2_MBLK_WIDTH; - mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; - - /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - - * FLOOR(vp_x_start, blk_width) - * - * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c - */ - mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + - pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) - - (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); - - /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - - * FLOOR(vp_y_start, blk_height) - * - * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c - */ - mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + - pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) - - (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); - - num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * - ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); - - /*For DCC: - * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1) - */ - if (pipe->plane_state->dcc.enable) - num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel + - (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES); - - bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; - - /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment - * (MALL is 64-byte aligned) - */ - cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; - cache_lines_used += cache_lines_per_plane; - } - - // Include cursor size for CAB allocation - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j]; - struct hubp *hubp = pipe->plane_res.hubp; - - if (pipe->stream && pipe->plane_state && hubp) - /* Find the cursor plane and use the exact size instead of - using the max for calculation */ - - if (hubp->curs_attr.width > 0) { - cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; - - switch (pipe->stream->cursor_attributes.color_format) { - case CURSOR_MODE_MONO: - cursor_size /= 2; - cursor_bpp = 4; - break; - case CURSOR_MODE_COLOR_1BIT_AND: - case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: - case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: - cursor_size *= 4; - cursor_bpp = 4; - break; - - case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: - case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: - cursor_size *= 8; - cursor_bpp = 8; - break; - } - - if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor && - cursor_size > 16384) { - /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) - */ - cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / - DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) / - dc->caps.cache_line_size + 2; - break; - } - } + mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false); } // Convert number of cache lines required to number of ways - total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - for (i = 0; i < ctx->stream_count; i++) { - stream = ctx->streams[i]; - for (j = 0; j < ctx->stream_status[i].plane_count; j++) { - plane = ctx->stream_status[i].plane_states[j]; - - if (stream->cursor_position.enable && plane && - dc->debug.alloc_extra_way_for_cursor && - cursor_size > 16384) { - /* Cursor caching is not supported since it won't be on the same line. - * So we need an extra line to accommodate it. With large cursors and a single 4k monitor - * this case triggers corruption. If we're at the edge, then dont trigger display refresh - * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp. - */ - num_ways++; - /* We only expect one cursor plane */ - break; - } - } - } if (dc->debug.force_mall_ss_num_ways > 0) { num_ways = dc->debug.force_mall_ss_num_ways; + } else { + num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes); } + return num_ways; } @@ -365,6 +246,13 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) if (!dc->ctx->dmub_srv) return false; + for (i = 0; i < dc->current_state->stream_count; i++) { + /* MALL SS messaging is not supported with PSR at this time */ + if (dc->current_state->streams[i] != NULL && + dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) + return false; + } + if (enable) { if (dc->current_state) { @@ -803,6 +691,26 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) } } +static void dcn32_initialize_min_clocks(struct dc *dc) +{ + struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; + + clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ; + clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; + clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; + clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; + clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; + clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; + clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; + clocks->fclk_p_state_change_support = true; + clocks->p_state_change_support = true; + + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + dc->current_state, + true); +} + void dcn32_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; @@ -884,7 +792,7 @@ void dcn32_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - dc_link_blank_all_dp_displays(dc); + link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -897,6 +805,18 @@ void dcn32_init_hw(struct dc *dc) if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); + + dcn32_initialize_min_clocks(dc); + + /* On HW init, allow idle optimizations after pipes have been turned off. + * + * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state + * is reset (i.e. not in idle at the time hw init is called), but software state + * still has idle_optimizations = true, so we must disable idle optimizations first + * (i.e. set false), then re-enable (set true). + */ + dc_allow_idle_optimizations(dc, false); + dc_allow_idle_optimizations(dc, true); } /* In headless boot cases, DIG may be turned @@ -1175,16 +1095,16 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; - } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { + } else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) { *k1_div = PIXEL_RATE_DIV_BY_1; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) *k2_div = PIXEL_RATE_DIV_BY_2; else *k2_div = PIXEL_RATE_DIV_BY_4; - } else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { + } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { if (two_pix_per_container) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_2; @@ -1239,7 +1159,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -1266,7 +1186,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) return false; - if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) && + if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) && dc->debug.enable_dp_dig_pixel_rate_div_policy) return true; return false; @@ -1300,7 +1220,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link) pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings); link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; break; @@ -1332,7 +1252,7 @@ void dcn32_disable_link_output(struct dc_link *link, else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->unlock_phy(dmcu); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); apply_symclk_on_tx_off_wa(link); } @@ -1450,3 +1370,39 @@ void dcn32_update_dsc_pg(struct dc *dc, } } } + +void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + unsigned int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + /* If an active, non-phantom pipe is being transitioned into a phantom + * pipe, wait for the double buffer update to complete first before we do + * ANY phantom pipe programming. + */ + if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && + old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + old_pipe->stream_res.tg->funcs->wait_for_state( + old_pipe->stream_res.tg, + CRTC_STATE_VBLANK); + old_pipe->stream_res.tg->funcs->wait_for_state( + old_pipe->stream_res.tg, + CRTC_STATE_VACTIVE); + } + } + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + + if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + // If old context or new context has phantom pipes, apply + // the phantom timings now. We can't change the phantom + // pipe configuration safely without driver acquiring + // the DMCUB lock first. + dc->hwss.apply_ctx_to_hw(dc, context); + break; + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 7de36529cf99c6..e9e9534f36680a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -102,4 +102,6 @@ void dcn32_update_dsc_pg(struct dc *dc, struct dc_state *context, bool safe_to_disable); +void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); + #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index a4e9fd5307c63a..0694fa3a368031 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -30,6 +30,7 @@ #include "dcn30/dcn30_hwseq.h" #include "dcn31/dcn31_hwseq.h" #include "dcn32_hwseq.h" +#include "dcn32_init.h" static const struct hw_sequencer_funcs dcn32_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, @@ -106,6 +107,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, + .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn20_update_visual_confirm_color, .update_phantom_vp_position = dcn32_update_phantom_vp_position, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index e4dbc8353ea338..74e50c09bb62f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -57,7 +57,6 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "dc_link_dp.h" #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32/dcn32_dio_link_encoder.h" @@ -69,7 +68,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -726,6 +725,7 @@ static const struct dc_debug_options debug_defaults_drv = { .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us + .disable_unbounded_requesting = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -1507,7 +1507,7 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } @@ -2149,13 +2149,19 @@ static bool dcn32_resource_construct( dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 0; + dc->caps.mall_size_per_mem_channel = 4; dc->caps.mall_size_total = 0; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; - dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64 + + /* Calculate the available MALL space */ + dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc, dc->ctx->dc_bios->vram_info.num_chans) * + dc->caps.mall_size_per_mem_channel * 1024 * 1024; + dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; + dc->caps.subvp_fw_processing_delay_us = 15; dc->caps.subvp_drr_max_vblank_margin_us = 40; dc->caps.subvp_prefetch_end_to_mall_start_us = 15; @@ -2449,7 +2455,7 @@ static bool dcn32_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } @@ -2592,3 +2598,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( return idle_pipe; } + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) +{ + /* + * DCN32 and DCN321 SKUs may have different sizes for MALL + * but we may not be able to access all the MALL space. + * If the num_chans is power of 2, then we can access all + * of the available MALL space. Otherwise, we can only + * access: + * + * max_cab_size_in_bytes = total_cache_size_in_bytes * + * ((2^floor(log2(num_chans)))/num_chans) + * + * Calculating the MALL sizes for all available SKUs, we + * have come up with the follow simplified check. + * - we have max_chans which provides the max MALL size. + * Each chans supports 4MB of MALL so: + * + * total_cache_size_in_bytes = max_chans * 4 MB + * + * - we have avail_chans which shows the number of channels + * we can use if we can't access the entire MALL space. + * It is generally half of max_chans + * - so we use the following checks: + * + * if (num_chans == max_chans), return max_chans + * if (num_chans < max_chans), return avail_chans + * + * - exception is GC_11_0_0 where we can't access max_chans, + * so we define max_avail_chans as the maximum available + * MALL space + * + */ + int gc_11_0_0_max_chans = 48; + int gc_11_0_0_max_avail_chans = 32; + int gc_11_0_0_avail_chans = 16; + int gc_11_0_3_max_chans = 16; + int gc_11_0_3_avail_chans = 8; + int gc_11_0_2_max_chans = 8; + int gc_11_0_2_avail_chans = 4; + + if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_0_max_chans) ? + gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; + } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_2_max_chans) ? + gc_11_0_2_max_chans : gc_11_0_2_avail_chans; + } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_3_max_chans) ? + gc_11_0_3_max_chans : gc_11_0_3_avail_chans; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 13fbc574910bbb..aca928edc4e3d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -38,6 +38,7 @@ #define DCN3_2_MBLK_HEIGHT_4BPE 128 #define DCN3_2_MBLK_HEIGHT_8BPE 64 #define DCN3_2_VMIN_DISPCLK_HZ 717000000 +#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) @@ -96,8 +97,17 @@ void dcn32_calculate_wm_and_dlg( int pipe_cnt, int vlevel); -uint32_t dcn32_helper_calculate_num_ways_for_subvp - (struct dc *dc, +uint32_t dcn32_helper_mall_bytes_to_ways( + struct dc *dc, + uint32_t total_size_in_mall_bytes); + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool ignore_cursor_buf); + +uint32_t dcn32_helper_calculate_num_ways_for_subvp( + struct dc *dc, struct dc_state *context); void dcn32_merge_pipes_for_subvp(struct dc *dc, @@ -112,6 +122,8 @@ bool dcn32_subvp_in_use(struct dc *dc, bool dcn32_mpo_in_use(struct dc_state *context); bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); +bool dcn32_is_center_timing(struct pipe_ctx *pipe); +bool dcn32_is_psr_capable(struct pipe_ctx *pipe); struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( struct dc_state *state, @@ -134,6 +146,12 @@ void dcn32_restore_mall_state(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); + +double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 783935c4e6644c..3a2d7bcc4b6d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -33,13 +33,75 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } + +uint32_t dcn32_helper_mall_bytes_to_ways( + struct dc *dc, + uint32_t total_size_in_mall_bytes) +{ + uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; + + /* add 2 lines for worst case alignment */ + cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; + + total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; + lines_per_way = total_cache_lines / dc->caps.cache_num_ways; + num_ways = cache_lines_used / lines_per_way; + if (cache_lines_used % lines_per_way > 0) + num_ways++; + + return num_ways; +} + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool ignore_cursor_buf) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; + uint32_t cursor_bpp = 4; + uint32_t cursor_mall_size_bytes = 0; + + switch (pipe_ctx->stream->cursor_attributes.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + cursor_bpp = 4; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + cursor_bpp = 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + cursor_size *= 8; + cursor_bpp = 8; + break; + } + + /* only count if cursor is enabled, and if additional allocation needed outside of the + * DCN cursor buffer + */ + if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf || + cursor_size > 16384)) { + /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) + * Note: add 1 mblk in case of cursor misalignment + */ + cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / + DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES; + } + + return cursor_mall_size_bytes; +} + /** * ******************************************************************************************** * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP * - * This function first checks the bytes required per pixel on the SubVP pipe, then calculates - * the total number of pixels required in the SubVP MALL region. These are used to calculate - * the number of cache lines used (then number of ways required) for SubVP MCLK switching. + * Gets total allocation required for the phantom viewport calculated by DML in bytes and + * converts to number of cache ways. * * @param [in] dc: current dc state * @param [in] context: new dc state @@ -48,106 +110,19 @@ static bool is_dual_plane(enum surface_pixel_format format) * * ******************************************************************************************** */ -uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context) +uint32_t dcn32_helper_calculate_num_ways_for_subvp( + struct dc *dc, + struct dc_state *context) { - uint32_t num_ways = 0; - uint32_t bytes_per_pixel = 0; - uint32_t cache_lines_used = 0; - uint32_t lines_per_way = 0; - uint32_t total_cache_lines = 0; - uint32_t bytes_in_mall = 0; - uint32_t num_mblks = 0; - uint32_t cache_lines_per_plane = 0; - uint32_t i = 0, j = 0; - uint16_t mblk_width = 0; - uint16_t mblk_height = 0; - uint32_t full_vp_width_blk_aligned = 0; - uint32_t full_vp_height_blk_aligned = 0; - uint32_t mall_alloc_width_blk_aligned = 0; - uint32_t mall_alloc_height_blk_aligned = 0; - uint16_t full_vp_height = 0; - bool subvp_in_use = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - /* Find the phantom pipes. - * - For pipe split case we need to loop through the bottom and next ODM - * pipes or only half the viewport size is counted - */ - if (pipe->stream && pipe->plane_state && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - struct pipe_ctx *main_pipe = NULL; - - subvp_in_use = true; - /* Get full viewport height from main pipe (required for MBLK calculation) */ - for (j = 0; j < dc->res_pool->pipe_count; j++) { - main_pipe = &context->res_ctx.pipe_ctx[j]; - if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) { - full_vp_height = main_pipe->plane_res.scl_data.viewport.height; - break; - } - } - - bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; - mblk_width = DCN3_2_MBLK_WIDTH; - mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; - - /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - - * FLOOR(vp_x_start, blk_width) - */ - full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + - pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) - - (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); - - /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - - * FLOOR(vp_y_start, blk_height) - */ - full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + - full_vp_height + mblk_height - 1) / mblk_height * mblk_height) - - (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); - - /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */ - mall_alloc_width_blk_aligned = full_vp_width_blk_aligned; - - /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */ - mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) / - mblk_height * mblk_height + mblk_height; - - /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c; - * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c; - * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c); - * (Should be divisible, but round up if not) - */ - num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * - ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); - - /*For DCC: - * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1) - */ - if (pipe->plane_state->dcc.enable) - num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel + - (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES); - - bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; - // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment - // (MALL is 64-byte aligned) - cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; - - cache_lines_used += cache_lines_per_plane; + if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { + if (dc->debug.force_subvp_num_ways) { + return dc->debug.force_subvp_num_ways; + } else { + return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); } + } else { + return 0; } - - total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_cache_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - if (subvp_in_use && dc->debug.force_subvp_num_ways > 0) - num_ways = dc->debug.force_subvp_num_ways; - - return num_ways; } void dcn32_merge_pipes_for_subvp(struct dc *dc, @@ -255,6 +230,37 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context) return false; } +bool dcn32_is_center_timing(struct pipe_ctx *pipe) +{ + bool is_center_timing = false; + + if (pipe->stream) { + if (pipe->stream->timing.v_addressable != pipe->stream->dst.height || + pipe->stream->timing.v_addressable != pipe->stream->src.height) { + is_center_timing = true; + } + } + + if (pipe->plane_state) { + if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height && + pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) { + is_center_timing = true; + } + } + + return is_center_timing; +} + +bool dcn32_is_psr_capable(struct pipe_ctx *pipe) +{ + bool psr_capable = false; + + if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + psr_capable = true; + } + return psr_capable; +} + /** * ******************************************************************************************* * dcn32_determine_det_override: Determine DET allocation for each pipe @@ -357,6 +363,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; + bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -373,7 +380,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, */ if (pipe_cnt == 1) { pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE; - if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { + if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { if (!is_dual_plane(pipe->plane_state->format)) { pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE; pipes[0].pipe.src.unbounded_req_mode = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c index fa9b6603cfd37e..13be5f06d98792 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c @@ -31,7 +31,6 @@ #include "dcn321_dio_link_encoder.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index d1f36df03c2eec..55f918b4407711 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -60,7 +60,6 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "dc_link_dp.h" #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32/dcn32_dio_link_encoder.h" @@ -73,7 +72,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -724,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = { .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us + .disable_unbounded_requesting = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -1492,7 +1492,7 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } @@ -1702,12 +1702,18 @@ static bool dcn321_resource_construct( dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 0; + dc->caps.mall_size_per_mem_channel = 4; dc->caps.mall_size_total = 0; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; - dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32 + + /* Calculate the available MALL space */ + dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc, dc->ctx->dc_bios->vram_info.num_chans) * + dc->caps.mall_size_per_mem_channel * 1024 * 1024; + dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; + dc->caps.subvp_fw_processing_delay_us = 15; dc->caps.subvp_drr_max_vblank_margin_us = 40; dc->caps.subvp_prefetch_end_to_mall_start_us = 15; @@ -1990,7 +1996,7 @@ static bool dcn321_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index af1c50ed905abd..7ce9a5b6c33bad 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -161,6 +161,12 @@ enum dc_edid_status dm_helpers_read_local_edid( struct dc_link *link, struct dc_sink *sink); +bool dm_helpers_dp_handle_test_pattern_request( + struct dc_context *ctx, + const struct dc_link *link, + union link_test_pattern dpcd_test_pattern, + union test_misc dpcd_test_params); + void dm_set_dcn_clocks( struct dc_context *ctx, struct dc_clocks *clks); @@ -193,6 +199,7 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, const struct dc_link *link, struct set_config_cmd_payload *payload, enum set_config_status *operation_result); +enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link); enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index c26da3bb2892bb..d3ba65efe1d2e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -26,12 +26,12 @@ #include "resource.h" #include "clk_mgr.h" -#include "dc_link_dp.h" #include "dchubbub.h" #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" #include "clk_mgr/dcn21/rn_clk_mgr.h" +#include "link.h" #include "dcn20_fpu.h" #define DC_LOGGER_INIT(logger) @@ -938,7 +938,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; } return false; @@ -949,7 +949,6 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc int plane_count; int i; unsigned int optimized_min_dst_y_next_start_us; - bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0; plane_count = 0; optimized_min_dst_y_next_start_us = 0; @@ -974,6 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; struct dc_stream_status *stream_status = &context->stream_status[0]; + bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0; + bool is_pwrseq0 = link->link_index == 0; if (dc_extended_blank_supported(dc)) { for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -986,23 +987,55 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc } } } - /* zstate only supported on PWRSEQ0 and when there's <2 planes*/ - if (link->link_index != 0 || stream_status->plane_count > 1) + + /* Don't support multi-plane configurations */ + if (stream_status->plane_count > 1) return DCN_ZSTATE_SUPPORT_DISALLOW; - if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000) + if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)) return DCN_ZSTATE_SUPPORT_ALLOW; - else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) + else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; else return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW; - } else if (allow_z8) { - return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; } else { return DCN_ZSTATE_SUPPORT_DISALLOW; } } +static void dcn20_adjust_freesync_v_startup( + const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + void dcn20_calculate_dlg_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -1062,6 +1095,11 @@ void dcn20_calculate_dlg_params( context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + pipe_idx++; } /*save a original dppclock copy*/ @@ -1302,7 +1340,7 @@ int dcn20_populate_dml_pipes_from_context( case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: pipes[pipe_cnt].dout.output_type = dm_dp; - if (is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i])) + if (link_is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i])) pipes[pipe_cnt].dout.output_type = dm_dp2p0; break; case SIGNAL_TYPE_EDP: diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index d3b5b6fedf042f..6266b0788387ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } @@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index edd098c7eb927c..989d83ee38421d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } @@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 1d84ae50311d9b..b7c2844d0cbee2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -4102,17 +4102,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } @@ -4165,7 +4165,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] @@ -5230,7 +5230,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.ODMCombineEnabled[k] = locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; } else { - mode_lib->vba.ODMCombineEnabled[k] = false; + mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled; } mode_lib->vba.DSCEnabled[k] = locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index d4c0f9cdac8e2f..4fa63636479371 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -634,7 +634,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, while (dummy_latency_index < max_latency_table_entries) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank == dm_allow_self_refresh_and_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index ec351c8418cbbd..27f488405335f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -878,7 +878,9 @@ static bool CalculatePrefetchSchedule( double DSTTotalPixelsAfterScaler; double LineTime; double dst_y_prefetch_equ; +#ifdef __DML_VBA_DEBUG__ double Tsw_oto; +#endif double prefetch_bw_oto; double prefetch_bw_pr; double Tvm_oto; @@ -1060,7 +1062,9 @@ static bool CalculatePrefetchSchedule( min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre); Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4; +#ifdef __DML_VBA_DEBUG__ Tsw_oto = Lsw_oto * LineTime; +#endif #ifdef __DML_VBA_DEBUG__ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 6a1cf6adea77db..acda3e1babd4af 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -149,8 +149,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = { .num_states = 5, .sr_exit_time_us = 16.5, .sr_enter_plus_exit_time_us = 18.5, - .sr_exit_z8_time_us = 280.0, - .sr_enter_plus_exit_z8_time_us = 350.0, + .sr_exit_z8_time_us = 210.0, + .sr_enter_plus_exit_z8_time_us = 310.0, .writeback_latency_us = 12.0, .dram_channel_width_bytes = 4, .round_trip_ping_latency_dcfclk_cycles = 106, @@ -346,7 +346,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE; dc->config.enable_4to1MPC = false; - if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (pipe_cnt == 1 && pipe->plane_state + && pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) { if (is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { dc->config.enable_4to1MPC = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index cb7c0c87842300..c843b394aeb4aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -900,7 +900,9 @@ static bool CalculatePrefetchSchedule( double DSTTotalPixelsAfterScaler; double LineTime; double dst_y_prefetch_equ; +#ifdef __DML_VBA_DEBUG__ double Tsw_oto; +#endif double prefetch_bw_oto; double prefetch_bw_pr; double Tvm_oto; @@ -1082,7 +1084,9 @@ static bool CalculatePrefetchSchedule( min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre); Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4; +#ifdef __DML_VBA_DEBUG__ Tsw_oto = Lsw_oto * LineTime; +#endif #ifdef __DML_VBA_DEBUG__ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61ee9ba063a78e..6576b897a51275 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -51,7 +51,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes( *BytePerPixelDETC = 0; *BytePerPixelY = 4; *BytePerPixelC = 0; - } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) { + } else if (SourcePixelFormat == dm_444_16) { *BytePerPixelDETY = 2; *BytePerPixelDETC = 0; *BytePerPixelY = 2; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index f94abd124021ed..e47828e3b6d5d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -24,13 +24,14 @@ * */ #include "dcn32_fpu.h" -#include "dc_link_dp.h" #include "dcn32/dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "display_mode_vba_util_32.h" +#include "dml/dcn32/display_mode_vba_32.h" // We need this includes for WATERMARKS_* defines #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -691,9 +692,11 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) && pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && - vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { + (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || + (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && + dcn32_allow_subvp_with_active_margin(pipe)))) { while (pipe) { num_pipes++; pipe = pipe->bottom_pipe; @@ -877,6 +880,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; + const struct dc_config *config = &dc->config; + + if (config->disable_subvp_drr) + return false; // Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -977,10 +984,12 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) subvp_pipe = pipe; } - // Use ignore_msa_timing_param flag to identify as DRR - if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) { - // SUBVP + DRR case - schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]); + // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On + if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param && + (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync || + context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) { + // SUBVP + DRR case -- only allowed if run through DRR validation path + schedulable = false; } else if (found) { main_timing = &subvp_pipe->stream->timing; phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; @@ -1084,12 +1093,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, { struct vba_vars_st *vba = &context->bw_ctx.dml.vba; unsigned int dc_pipe_idx = 0; + int i = 0; bool found_supported_config = false; struct pipe_ctx *pipe = NULL; uint32_t non_subvp_pipes = 0; bool drr_pipe_found = false; uint32_t drr_pipe_index = 0; - uint32_t i = 0; dc_assert_fp_enabled(); @@ -1169,15 +1178,25 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + /* Check that vlevel requested supports pstate or not + * if not, select the lowest vlevel that supports it + */ + for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { + if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { + *vlevel = i; + break; + } + } + if (*vlevel < context->bw_ctx.dml.soc.num_states && vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported && subvp_validate_static_schedulability(dc, context, *vlevel)) { found_supported_config = true; - } else if (*vlevel < context->bw_ctx.dml.soc.num_states && - vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { - /* Case where 1 SubVP is added, and DML reports MCLK unsupported. This handles - * the case for SubVP + DRR, where the DRR display does not support MCLK switch - * at it's native refresh rate / timing. + } else if (*vlevel < context->bw_ctx.dml.soc.num_states) { + /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed. + * This handles the case for SubVP + DRR, where the DRR display does not support MCLK + * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp + * display. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -1185,7 +1204,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; // Use ignore_msa_timing_param flag to identify as DRR - if (pipe->stream->ignore_msa_timing_param) { + if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) { drr_pipe_found = true; drr_pipe_index = i; } @@ -1194,6 +1213,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, // If there is only 1 remaining non SubVP pipe that is DRR, check static // schedulability for SubVP + DRR. if (non_subvp_pipes == 1 && drr_pipe_found) { + /* find lowest vlevel that supports the config */ + for (i = *vlevel; i >= 0; i--) { + if (vba->ModeSupport[i][vba->maxMpcComb]) { + *vlevel = i; + } else { + break; + } + } + found_supported_config = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[drr_pipe_index]); } @@ -1242,12 +1270,44 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; } return false; } +static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) @@ -1270,7 +1330,6 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; - context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); @@ -1294,6 +1353,10 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, unbounded_req_enabled = false; } + context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; + context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; + context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -1325,6 +1388,34 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, else context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + + context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + /* MALL Allocation Sizes */ + /* count from active, top pipes per plane only */ + if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && + (context->res_ctx.pipe_ctx[i].top_pipe == NULL || + context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && + context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { + /* SS: all active surfaces stored in MALL */ + if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { + context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + + if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { + /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */ + context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + } + } else { + /* SUBVP: phantom surfaces only stored in MALL */ + context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + } + } + + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + pipe_idx++; } /* If DCN isn't making memory requests we can allow pstate change and lower clocks */ @@ -1345,6 +1436,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; + context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1530,6 +1623,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, } dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); if (!fast_validate) dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); @@ -1549,16 +1643,12 @@ bool dcn32_internal_validate_bw(struct dc *dc, * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) */ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_fclk_and_stutter; + dm_prefetch_support_none; + context->bw_ctx.dml.validate_max_state = fast_validate; vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - /* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */ - if (vlevel == context->bw_ctx.dml.soc.num_states) { - context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_stutter; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - } + context->bw_ctx.dml.validate_max_state = false; if (vlevel < context->bw_ctx.dml.soc.num_states) { memset(split, 0, sizeof(split)); @@ -1645,6 +1735,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + memset(&pipe->link_res, 0, sizeof(pipe->link_res)); repopulate_pipes = true; } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { struct pipe_ctx *top_pipe = pipe->top_pipe; @@ -1660,6 +1751,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe->stream = NULL; memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + memset(&pipe->link_res, 0, sizeof(pipe->link_res)); repopulate_pipes = true; } else ASSERT(0); /* Should never try to merge master pipe */ @@ -1834,7 +1926,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, bool subvp_in_use = dcn32_subvp_in_use(dc, context); unsigned int min_dram_speed_mts_margin; bool need_fclk_lat_as_dummy = false; - bool is_subvp_p_drr = true; + bool is_subvp_p_drr = false; dc_assert_fp_enabled(); @@ -1842,7 +1934,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, if (subvp_in_use) { /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */ if (!pstate_en) { - context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; + context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter; pstate_en = true; is_subvp_p_drr = true; } @@ -1860,8 +1953,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; if (is_subvp_p_drr) { - context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; } } @@ -2038,6 +2132,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, */ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; + /* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case + * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported + */ + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } else { /* Set A: * All clocks min. @@ -2443,8 +2541,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa } /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) + if (dc->ctx->dc_bios->vram_info.num_chans) { dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; @@ -2622,3 +2723,60 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; } + +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) +{ + bool allow = false; + uint32_t refresh_rate = 0; + + /* Allow subvp on displays that have active margin for 2560x1440@60hz displays + * only for now. There must be no scaling as well. + * + * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs + * for p-state switching. + */ + if (pipe->stream && pipe->plane_state) { + refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) + / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); + if (pipe->stream->timing.v_addressable == 1440 && + pipe->stream->timing.h_addressable == 2560 && + refresh_rate >= 55 && refresh_rate <= 65 && + pipe->plane_state->src_rect.height == 1440 && + pipe->plane_state->src_rect.width == 2560 && + pipe->plane_state->dst_rect.height == 1440 && + pipe->plane_state->dst_rect.width == 2560) + allow = true; + } + return allow; +} + +/** + * ******************************************************************************************* + * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy + * + * @param [in]: dc: Current DC state + * @param [in]: context: New DC state to be programmed + * + * @return: Max vratio for prefetch + * + * ******************************************************************************************* + */ +double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) +{ + double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4 + int i; + + /* For single display MPO configs, allow the max vratio to be 8 + * if any plane is YUV420 format + */ + if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) { + for (i = 0; i < context->stream_status[0].plane_count; i++) { + if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr || + context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) { + max_vratio_pre = __DML_MAX_VRATIO_PRE__; + } + } + } + return max_vratio_pre; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 4b8f5fa0f0ad61..3b2a014ccf8f53 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -387,6 +387,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman mode_lib->vba.NumberOfActiveSurfaces, mode_lib->vba.MALLAllocatedForDCNFinal, mode_lib->vba.UseMALLForStaticScreen, + mode_lib->vba.UsesMALLForPStateChange, mode_lib->vba.DCCEnable, mode_lib->vba.ViewportStationary, mode_lib->vba.ViewportXStartY, @@ -411,6 +412,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->BlockWidthC, v->BlockHeightY, v->BlockHeightC, + mode_lib->vba.DCCMetaPitchY, + mode_lib->vba.DCCMetaPitchC, /* Output */ v->SurfaceSizeInMALL, @@ -893,8 +896,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman if (v->DestinationLinesForPrefetch[k] < 2) DestinationLineTimesForPrefetchLessThan2 = true; - if (v->VRatioPrefetchY[k] > __DML_MAX_VRATIO_PRE__ - || v->VRatioPrefetchC[k] > __DML_MAX_VRATIO_PRE__) + if (v->VRatioPrefetchY[k] > v->MaxVRatioPre + || v->VRatioPrefetchC[k] > v->MaxVRatioPre) VRatioPrefetchMoreThanMax = true; //bool DestinationLinesToRequestVMInVBlankEqualOrMoreThan32 = false; @@ -939,6 +942,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->UrgBurstFactorLumaPre, v->UrgBurstFactorChromaPre, v->UrgBurstFactorCursorPre, + v->PrefetchBandwidth, + v->VRatio, + v->MaxVRatioPre, /* output */ &MaxTotalRDBandwidth, @@ -969,6 +975,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector, v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector, v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector, + v->PrefetchBandwidth, + v->VRatio, + v->MaxVRatioPre, /* output */ &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0], @@ -1636,9 +1645,14 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman static void mode_support_configuration(struct vba_vars_st *v, struct display_mode_lib *mode_lib) { - int i, j; + int i, j, start_state; + + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; - for (i = v->soc.num_states - 1; i >= 0; i--) { + for (i = v->soc.num_states - 1; i >= start_state; i--) { for (j = 0; j < 2; j++) { if (mode_lib->vba.ScaleRatioAndTapsSupport == true && mode_lib->vba.SourceFormatPixelAndScanSupport == true @@ -1707,7 +1721,7 @@ static void mode_support_configuration(struct vba_vars_st *v, void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *v = &mode_lib->vba; - int i, j; + int i, j, start_state; unsigned int k, m; unsigned int MaximumMPCCombine; unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth; @@ -1720,6 +1734,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l #endif /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; /*Scale Ratio, taps Support Check*/ @@ -2009,7 +2027,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage && v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible; - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0; mode_lib->vba.TotalAvailablePipesSupport[i][j] = true; @@ -2286,7 +2304,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.ExceededMultistreamSlots[i] = false; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) { @@ -2335,8 +2353,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.ForcedOutputLinkBPP[k] != 0) mode_lib->vba.DSCOnlyIfNecessaryWithBPP = true; - if ((mode_lib->vba.DSCEnable[k] || mode_lib->vba.DSCEnable[k]) - && mode_lib->vba.OutputFormat[k] == dm_n422 + if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.OutputFormat[k] == dm_n422 && !mode_lib->vba.DSC422NativeSupport) mode_lib->vba.DSC422NativeNotSupported = true; @@ -2386,7 +2403,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k @@ -2403,7 +2420,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true; mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { @@ -2421,7 +2438,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false; for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2458,7 +2475,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Check DSC Unit and Slices Support */ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.NotEnoughDSCUnits[i] = false; mode_lib->vba.NotEnoughDSCSlices[i] = false; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; @@ -2493,7 +2510,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*DSC Delay per state*/ - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement( mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k], @@ -2520,7 +2537,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Swath, DET Configuration, DCFCLKDeepSleep // - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k]; @@ -2626,6 +2643,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.NumberOfActiveSurfaces, mode_lib->vba.MALLAllocatedForDCNFinal, mode_lib->vba.UseMALLForStaticScreen, + mode_lib->vba.UsesMALLForPStateChange, mode_lib->vba.DCCEnable, mode_lib->vba.ViewportStationary, mode_lib->vba.ViewportXStartY, @@ -2650,12 +2668,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MacroTileWidthC, mode_lib->vba.MacroTileHeightY, mode_lib->vba.MacroTileHeightC, + mode_lib->vba.DCCMetaPitchY, + mode_lib->vba.DCCMetaPitchC, /* Output */ mode_lib->vba.SurfaceSizeInMALL, &mode_lib->vba.ExceededMALLSize); - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { mode_lib->vba.swath_width_luma_ub_this_state[k] = @@ -2882,7 +2902,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } //Calculate Return BW - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2961,7 +2981,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.MinPrefetchMode, &mode_lib->vba.MaxPrefetchMode); - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i]; } @@ -3083,7 +3103,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DCFCLKState); } // UseMinimumRequiredDCFCLK == true - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i, mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j], @@ -3092,7 +3112,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } //Re-ordering Buffer Support Check - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024 / mode_lib->vba.ReturnBWPerState[i][j] @@ -3114,7 +3134,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + mode_lib->vba.ReadBandwidthChroma[k]; } - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j] @@ -3138,7 +3158,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Prefetch Check */ - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j]; @@ -3358,6 +3378,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.UrgentBurstFactorLumaPre, mode_lib->vba.UrgentBurstFactorChromaPre, mode_lib->vba.UrgentBurstFactorCursorPre, + v->PrefetchBW, + v->VRatio, + v->MaxVRatioPre, /* output */ &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // Single *PrefetchBandwidth @@ -3382,8 +3405,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.VRatioInPrefetchSupported[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { - if (mode_lib->vba.VRatioPreY[i][j][k] > __DML_MAX_VRATIO_PRE__ - || mode_lib->vba.VRatioPreC[i][j][k] > __DML_MAX_VRATIO_PRE__ + if (mode_lib->vba.VRatioPreY[i][j][k] > mode_lib->vba.MaxVRatioPre + || mode_lib->vba.VRatioPreC[i][j][k] > mode_lib->vba.MaxVRatioPre || mode_lib->vba.NoTimeForPrefetch[i][j][k] == true) { mode_lib->vba.VRatioInPrefetchSupported[i][j] = false; } @@ -3639,7 +3662,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 - && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 && mode_lib->vba.SourcePixelFormat[k] != dm_444_8 && mode_lib->vba.SourcePixelFormat[k] != dm_rgbe) { if (mode_lib->vba.ViewportWidthChroma[k] > mode_lib->vba.SurfaceWidthC[k] @@ -3656,7 +3678,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l MaximumMPCCombine = 0; - for (i = v->soc.num_states; i >= 0; i--) { + for (i = v->soc.num_states; i >= start_state; i--) { if (i == v->soc.num_states || mode_lib->vba.ModeSupport[i][0] == true || mode_lib->vba.ModeSupport[i][1] == true) { mode_lib->vba.VoltageLevel = i; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index c8b28c83ddf480..500b3dd6052d9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -44,7 +44,8 @@ #define __DML_MIN_DCFCLK_FACTOR__ 1.15 // Prefetch schedule max vratio -#define __DML_MAX_VRATIO_PRE__ 4.0 +#define __DML_MAX_VRATIO_PRE__ 7.9 +#define __DML_MAX_BW_RATIO_PRE__ 4.0 #define __DML_VBA_MAX_DST_Y_PRE__ 63.75 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index b53feeaf5cf117..d1000aa4c48162 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1772,6 +1772,7 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int NumberOfActiveSurfaces, unsigned int MALLAllocatedForDCN, enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[], + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[], bool DCCEnable[], bool ViewportStationary[], unsigned int ViewportXStartY[], @@ -1796,13 +1797,17 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int ReadBlockWidthC[], unsigned int ReadBlockHeightY[], unsigned int ReadBlockHeightC[], + unsigned int DCCMetaPitchY[], + unsigned int DCCMetaPitchC[], /* Output */ unsigned int SurfaceSizeInMALL[], bool *ExceededMALLSize) { - unsigned int TotalSurfaceSizeInMALL = 0; unsigned int k; + unsigned int TotalSurfaceSizeInMALLForSS = 0; + unsigned int TotalSurfaceSizeInMALLForSubVP = 0; + unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024; for (k = 0; k < NumberOfActiveSurfaces; ++k) { if (ViewportStationary[k]) { @@ -1828,18 +1833,18 @@ void dml32_CalculateSurfaceSizeInMall( } if (DCCEnable[k] == true) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_min(dml_ceil(SurfaceWidthY[k], 8 * Read256BytesBlockWidthY[k]), + (dml_min(dml_ceil(DCCMetaPitchY[k], 8 * Read256BytesBlockWidthY[k]), dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + 8 * Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k]) - dml_floor(ViewportXStartY[k], 8 * Read256BytesBlockWidthY[k])) * dml_min(dml_ceil(SurfaceHeightY[k], 8 * Read256BytesBlockHeightY[k]), dml_floor(ViewportYStartY[k] + ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1, 8 * - Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 - * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256; + Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 * + Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024); if (Read256BytesBlockWidthC[k] > 0) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_min(dml_ceil(SurfaceWidthC[k], 8 * + dml_min(dml_ceil(DCCMetaPitchC[k], 8 * Read256BytesBlockWidthC[k]), dml_floor(ViewportXStartC[k] + ViewportWidthC[k] + 8 * Read256BytesBlockWidthC[k] - 1, 8 * @@ -1872,16 +1877,16 @@ void dml32_CalculateSurfaceSizeInMall( } if (DCCEnable[k] == true) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_ceil(dml_min(SurfaceWidthY[k], ViewportWidthY[k] + 8 * + (dml_ceil(dml_min(DCCMetaPitchY[k], ViewportWidthY[k] + 8 * Read256BytesBlockWidthY[k] - 1), 8 * Read256BytesBlockWidthY[k]) * dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1), 8 * - Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256; + Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024); if (Read256BytesBlockWidthC[k] > 0) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_ceil(dml_min(SurfaceWidthC[k], ViewportWidthC[k] + 8 * + dml_ceil(dml_min(DCCMetaPitchC[k], ViewportWidthC[k] + 8 * Read256BytesBlockWidthC[k] - 1), 8 * Read256BytesBlockWidthC[k]) * dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] + 8 * @@ -1894,10 +1899,14 @@ void dml32_CalculateSurfaceSizeInMall( } for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable) - TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k]; + /* SS and Subvp counted separate as they are never used at the same time */ + if (UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) + TotalSurfaceSizeInMALLForSubVP = TotalSurfaceSizeInMALLForSubVP + SurfaceSizeInMALL[k]; + else if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable) + TotalSurfaceSizeInMALLForSS = TotalSurfaceSizeInMALLForSS + SurfaceSizeInMALL[k]; } - *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024); + *ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) || + (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes); } // CalculateSurfaceSizeInMall void dml32_CalculateVMRowAndSwath( @@ -3471,7 +3480,7 @@ bool dml32_CalculatePrefetchSchedule( double prefetch_sw_bytes; double bytes_pp; double dep_bytes; - unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__; + unsigned int max_vratio_pre = v->MaxVRatioPre; double min_Lsw; double Tsw_est1 = 0; double Tsw_est3 = 0; @@ -6134,29 +6143,46 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces, double UrgentBurstFactorLumaPre[], double UrgentBurstFactorChromaPre[], double UrgentBurstFactorCursorPre[], + double PrefetchBW[], + double VRatio[], + double MaxVRatioPre, /* output */ - double *PrefetchBandwidth, + double *MaxPrefetchBandwidth, double *FractionOfUrgentBandwidth, bool *PrefetchBandwidthSupport) { unsigned int k; + double ActiveBandwidthPerSurface; bool NotEnoughUrgentLatencyHiding = false; + double TotalActiveBandwidth = 0; + double TotalPrefetchBandwidth = 0; + for (k = 0; k < NumberOfActiveSurfaces; ++k) { if (NotUrgentLatencyHiding[k]) { NotEnoughUrgentLatencyHiding = true; } } - *PrefetchBandwidth = 0; + *MaxPrefetchBandwidth = 0; for (k = 0; k < NumberOfActiveSurfaces; ++k) { - *PrefetchBandwidth = *PrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k], - ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]), + ActiveBandwidthPerSurface = ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]); + + TotalActiveBandwidth += ActiveBandwidthPerSurface; + + TotalPrefetchBandwidth = TotalPrefetchBandwidth + PrefetchBW[k] * VRatio[k]; + + *MaxPrefetchBandwidth = *MaxPrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k], + ActiveBandwidthPerSurface, NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]); } - *PrefetchBandwidthSupport = (*PrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding; - *FractionOfUrgentBandwidth = *PrefetchBandwidth / ReturnBW; + if (MaxVRatioPre == __DML_MAX_VRATIO_PRE__) + *PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && (TotalPrefetchBandwidth <= TotalActiveBandwidth * __DML_MAX_BW_RATIO_PRE__) && !NotEnoughUrgentLatencyHiding; + else + *PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding; + + *FractionOfUrgentBandwidth = *MaxPrefetchBandwidth / ReturnBW; } double dml32_CalculateBandwidthAvailableForImmediateFlip(unsigned int NumberOfActiveSurfaces, @@ -6245,7 +6271,7 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface double PixelClock[], double VRatioY[], double VRatioC[], - enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]) + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]) { int k; double SwathSizeAllSurfaces = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 779c6805f59977..9ba792c633a5dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -334,6 +334,7 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int NumberOfActiveSurfaces, unsigned int MALLAllocatedForDCN, enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[], + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[], bool DCCEnable[], bool ViewportStationary[], unsigned int ViewportXStartY[], @@ -358,6 +359,8 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int ReadBlockWidthC[], unsigned int ReadBlockHeightY[], unsigned int ReadBlockHeightC[], + unsigned int DCCMetaPitchY[], + unsigned int DCCMetaPitchC[], /* Output */ unsigned int SurfaceSizeInMALL[], @@ -1093,9 +1096,12 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces, double UrgentBurstFactorLumaPre[], double UrgentBurstFactorChromaPre[], double UrgentBurstFactorCursorPre[], + double PrefetchBW[], + double VRatio[], + double MaxVRatioPre, /* output */ - double *PrefetchBandwidth, + double *MaxPrefetchBandwidth, double *FractionOfUrgentBandwidth, bool *PrefetchBandwidthSupport); @@ -1157,6 +1163,6 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface double PixelClock[], double VRatioY[], double VRatioC[], - enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]); + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index f4b176599be7a1..b80cef70fa60f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_sdp_bw_after_urgent = 100.0, + .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented @@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p } /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) + if (dc->ctx->dc_bios->vram_info.num_chans) { dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 3d643d50c3eb59..a9d49ef58fb59c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -91,6 +91,7 @@ struct display_mode_lib { struct dal_logger *logger; struct dml_funcs funcs; struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; + bool validate_max_state; }; void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 64d602e6412f16..3c077164f36203 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -246,6 +246,7 @@ struct _vcs_dpi_soc_bounding_box_st { bool disable_dram_clock_change_vactive_support; bool allow_dram_clock_one_display_vactive; enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank; + double max_vratio_pre; }; /** diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 8e6585dab20ef3..f9653f511baa32 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -202,6 +202,7 @@ dml_get_pipe_attr_func(vm_group_size_in_bytes, mode_lib->vba.vm_group_bytes); dml_get_pipe_attr_func(dpte_row_height_linear_l, mode_lib->vba.dpte_row_height_linear); dml_get_pipe_attr_func(pte_buffer_mode, mode_lib->vba.PTE_BUFFER_MODE); dml_get_pipe_attr_func(subviewport_lines_needed_in_mall, mode_lib->vba.SubViewportLinesNeededInMALL); +dml_get_pipe_attr_func(surface_size_in_mall, mode_lib->vba.SurfaceSizeInMALL) double get_total_immediate_flip_bytes( struct display_mode_lib *mode_lib, @@ -411,6 +412,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) soc->urgent_latency_adjustment_fabric_clock_component_us; mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference = soc->urgent_latency_adjustment_fabric_clock_reference_mhz; + mode_lib->vba.MaxVRatioPre = soc->max_vratio_pre; } static void fetch_ip_params(struct display_mode_lib *mode_lib) diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 81e53e67cd0b02..07993741f5e621 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -143,6 +143,7 @@ dml_get_pipe_attr_decl(vready_at_or_after_vsync); dml_get_pipe_attr_decl(min_dst_y_next_start); dml_get_pipe_attr_decl(vstartup_calculated); dml_get_pipe_attr_decl(subviewport_lines_needed_in_mall); +dml_get_pipe_attr_decl(surface_size_in_mall); double get_total_immediate_flip_bytes( struct display_mode_lib *mode_lib, @@ -262,6 +263,7 @@ struct vba_vars_st { int maxMpcComb; bool UseMaximumVStartup; + double MaxVRatioPre; double WritebackDISPCLK; double DPPCLKUsingSingleDPPLuma; double DPPCLKUsingSingleDPPChroma; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h index ad80bde9bc0f24..31574940ccc75b 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h @@ -46,7 +46,10 @@ struct dsc_parameters { uint32_t rc_buffer_model_size; }; -int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params); +struct rc_params; +int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, + const struct rc_params *rc, + struct dsc_parameters *dsc_params); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index f0aea988fef018..36d6c1646a5126 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -95,19 +95,19 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param dsc_cfg->rc_buf_thresh[i] = rc->rc_buf_thresh[i]; } -int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params) +int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, + const struct rc_params *rc, + struct dsc_parameters *dsc_params) { int ret; - struct rc_params rc; struct drm_dsc_config dsc_cfg; unsigned long long tmp; - calc_rc_params(&rc, pps); dsc_params->pps = *pps; - dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset); + dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset); copy_pps_fields(&dsc_cfg, &dsc_params->pps); - copy_rc_to_cfg(&dsc_cfg, &rc); + copy_rc_to_cfg(&dsc_cfg, rc); dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 9b63c6c0cc844e..e0bd0c722e0066 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = { DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), - DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), + DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT) }; static const struct ddc_sh_mask ddc_mask[] = { @@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 3), DDC_MASK_SH_LIST_DCN2(_MASK, 4), DDC_MASK_SH_LIST_DCN2(_MASK, 5), - DDC_MASK_SH_LIST_DCN2(_MASK, 6) + DDC_MASK_SH_LIST_DCN2(_MASK, 6), + DDC_MASK_SH_LIST_DCN2_VGA(_MASK) }; #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c index 687d4f128480e8..36a5736c58c92d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c @@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = { DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), - DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), + DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT) }; static const struct ddc_sh_mask ddc_mask[] = { @@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 3), DDC_MASK_SH_LIST_DCN2(_MASK, 4), DDC_MASK_SH_LIST_DCN2(_MASK, 5), - DDC_MASK_SH_LIST_DCN2(_MASK, 6) + DDC_MASK_SH_LIST_DCN2(_MASK, 6), + DDC_MASK_SH_LIST_DCN2_VGA(_MASK) }; #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c index 9fd8b269dd79cf..985f10b397509b 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c @@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = { DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), - DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), + DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT) }; static const struct ddc_sh_mask ddc_mask[] = { @@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 3), DDC_MASK_SH_LIST_DCN2(_MASK, 4), DDC_MASK_SH_LIST_DCN2(_MASK, 5), - DDC_MASK_SH_LIST_DCN2(_MASK, 6) + DDC_MASK_SH_LIST_DCN2(_MASK, 6), + DDC_MASK_SH_LIST_DCN2_VGA(_MASK) }; #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index 308a543178a56f..59884ef651b398 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -113,6 +113,13 @@ (PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\ (DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} +#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \ + {DDC_MASK_SH_LIST_COMMON(mask_sh),\ + 0,\ + 0,\ + 0,\ + 0} + struct ddc_registers { struct gpio_registers gpio; uint32_t ddc_setup; diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 4233955e3c47b4..e1422e5e86c92e 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -28,12 +28,11 @@ #include "dm_services.h" #include "dm_helpers.h" #include "include/hdcp_types.h" -#include "include/i2caux_interface.h" #include "include/signal_types.h" #include "core_types.h" -#include "dc_link_ddc.h" +#include "link.h" #include "link_hwss.h" -#include "inc/link_dpcd.h" +#include "link/protocols/link_dpcd.h" #define DC_LOGGER \ link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 525f8f0b8732a5..ed3c03108da626 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -56,33 +56,6 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, #endif #include "link_hwss.h" -/************ link *****************/ -struct link_init_data { - const struct dc *dc; - struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */ - uint32_t connector_index; /* this will be mapped to the HPD pins */ - uint32_t link_index; /* this is mapped to DAL display_index - TODO: remove it when DC is complete. */ - bool is_dpia_link; -}; - -struct dc_link *link_create(const struct link_init_data *init_params); -void link_destroy(struct dc_link **link); - -enum dc_status dc_link_validate_mode_timing( - const struct dc_stream_state *stream, - struct dc_link *link, - const struct dc_crtc_timing *timing); - -void core_link_resume(struct dc_link *link); - -void core_link_enable_stream( - struct dc_state *state, - struct pipe_ctx *pipe_ctx); - -void core_link_disable_stream(struct pipe_ctx *pipe_ctx); - -void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); /********** DAL Core*********************/ #include "transform.h" #include "dpp.h" @@ -450,10 +423,11 @@ struct pipe_ctx { struct _vcs_dpi_display_e2e_pipe_params_st dml_input; int det_buffer_size_kb; bool unbounded_req; + unsigned int surface_size_in_mall_bytes; - union pipe_update_flags update_flags; struct dwbc *dwbc; struct mcif_wb *mcif_wb; + union pipe_update_flags update_flags; }; /* Data used for dynamic link encoder assignment. @@ -507,6 +481,9 @@ struct dcn_bw_output { struct dcn_watermark_set watermarks; struct dcn_bw_writeback bw_writeback; int compbuf_size_kb; + unsigned int mall_ss_size_bytes; + unsigned int mall_ss_psr_active_size_bytes; + unsigned int mall_subvp_size_bytes; unsigned int legacy_svp_drr_stream_index; bool legacy_svp_drr_stream_index_valid; }; @@ -546,15 +523,6 @@ struct dc_state { */ struct resource_context res_ctx; - /** - * @bw_ctx: The output from bandwidth and watermark calculations and the DML - * - * Each context must have its own instance of VBA, and in order to - * initialize and obtain IP and SOC, the base DML instance from DC is - * initially copied into every context. - */ - struct bw_context bw_ctx; - /** * @pp_display_cfg: PowerPlay clocks and settings * Note: this is a big struct, do *not* put on stack! @@ -569,6 +537,15 @@ struct dc_state { struct clk_mgr *clk_mgr; + /** + * @bw_ctx: The output from bandwidth and watermark calculations and the DML + * + * Each context must have its own instance of VBA, and in order to + * initialize and obtain IP and SOC, the base DML instance from DC is + * initially copied into every context. + */ + struct bw_context bw_ctx; + /** * @refcount: refcount reference * diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h deleted file mode 100644 index 95fb61d62778aa..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DDC_SERVICE_H__ -#define __DAL_DDC_SERVICE_H__ - -#include "include/ddc_service_types.h" -#include "include/i2caux_interface.h" - -#define EDID_SEGMENT_SIZE 256 - -/* Address range from 0x00 to 0x1F.*/ -#define DP_ADAPTOR_TYPE2_SIZE 0x20 -#define DP_ADAPTOR_TYPE2_REG_ID 0x10 -#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D -/* Identifies adaptor as Dual-mode adaptor */ -#define DP_ADAPTOR_TYPE2_ID 0xA0 -/* MHz*/ -#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600 -/* MHz*/ -#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25 -/* kHZ*/ -#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000 -/* kHZ*/ -#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000 - -#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW - -struct ddc_service; -struct graphics_object_id; -enum ddc_result; -struct av_sync_data; -struct dp_receiver_id_info; - -struct i2c_payloads; -struct aux_payloads; -enum aux_return_code_type; - -void dal_ddc_i2c_payloads_add( - struct i2c_payloads *payloads, - uint32_t address, - uint32_t len, - uint8_t *data, - bool write); - -struct ddc_service_init_data { - struct graphics_object_id id; - struct dc_context *ctx; - struct dc_link *link; - bool is_dpia_link; -}; - -struct ddc_service *dal_ddc_service_create( - struct ddc_service_init_data *ddc_init_data); - -void dal_ddc_service_destroy(struct ddc_service **ddc); - -enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc); - -void dal_ddc_service_set_transaction_type( - struct ddc_service *ddc, - enum ddc_transaction_type type); - -bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc); - -void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( - struct ddc_service *ddc, - struct display_sink_capability *sink_cap); - -bool dal_ddc_service_query_ddc_data( - struct ddc_service *ddc, - uint32_t address, - uint8_t *write_buf, - uint32_t write_size, - uint8_t *read_buf, - uint32_t read_size); - -bool dal_ddc_submit_aux_command(struct ddc_service *ddc, - struct aux_payload *payload); - -int dc_link_aux_transfer_raw(struct ddc_service *ddc, - struct aux_payload *payload, - enum aux_return_code_type *operation_result); - -bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, - struct aux_payload *payload); - -bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, - uint32_t timeout); - -void dal_ddc_service_write_scdc_data( - struct ddc_service *ddc_service, - uint32_t pix_clk, - bool lte_340_scramble); - -void dal_ddc_service_read_scdc_data( - struct ddc_service *ddc_service); - -void ddc_service_set_dongle_type(struct ddc_service *ddc, - enum display_dongle_type dongle_type); - -void dal_ddc_service_set_ddc_pin( - struct ddc_service *ddc_service, - struct ddc *ddc); - -struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service); - -uint32_t get_defer_delay(struct ddc_service *ddc); - -#endif /* __DAL_DDC_SERVICE_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h deleted file mode 100644 index e8d8c5cb130924..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_LINK_DP_H__ -#define __DC_LINK_DP_H__ - -#define LINK_TRAINING_ATTEMPTS 4 -#define LINK_TRAINING_RETRY_DELAY 50 /* ms */ -#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ -#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ -#define MAX_MTP_SLOT_COUNT 64 -#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 -#define TRAINING_AUX_RD_INTERVAL 100 //us -#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX. - -struct dc_link; -struct dc_stream_state; -struct dc_link_settings; - -enum { - LINK_TRAINING_MAX_RETRY_COUNT = 5, - /* to avoid infinite loop where-in the receiver - * switches between different VS - */ - LINK_TRAINING_MAX_CR_RETRY = 100, - /* - * Some receivers fail to train on first try and are good - * on subsequent tries. 2 retries should be plenty. If we - * don't have a successful training then we don't expect to - * ever get one. - */ - LINK_TRAINING_MAX_VERIFY_RETRY = 2, - PEAK_FACTOR_X1000 = 1006, -}; - -struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); - -bool dp_verify_link_cap_with_retries( - struct dc_link *link, - struct dc_link_settings *known_limit_link_setting, - int attempts); - -bool dp_validate_mode_timing( - struct dc_link *link, - const struct dc_crtc_timing *timing); - -bool decide_edp_link_settings(struct dc_link *link, - struct dc_link_settings *link_setting, - uint32_t req_bw); - -bool decide_link_settings( - struct dc_stream_state *stream, - struct dc_link_settings *link_setting); - -bool perform_link_training_with_retries( - const struct dc_link_settings *link_setting, - bool skip_video_pattern, - int attempts, - struct pipe_ctx *pipe_ctx, - enum signal_type signal, - bool do_fallback); - -bool hpd_rx_irq_check_link_loss_status( - struct dc_link *link, - union hpd_irq_data *hpd_irq_dpcd_data); - -bool is_mst_supported(struct dc_link *link); - -bool detect_dp_sink_caps(struct dc_link *link); - -void detect_edp_sink_caps(struct dc_link *link); - -bool is_dp_active_dongle(const struct dc_link *link); - -bool is_dp_branch_device(const struct dc_link *link); - -bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); - -void dp_enable_mst_on_sink(struct dc_link *link, bool enable); - -enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); -void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); - -bool dp_overwrite_extended_receiver_cap(struct dc_link *link); - -void dpcd_set_source_specific_data(struct dc_link *link); - -void dpcd_write_cable_id_to_dprx(struct dc_link *link); - -/* Write DPCD link configuration data. */ -enum dc_status dpcd_set_link_settings( - struct dc_link *link, - const struct link_training_settings *lt_settings); -/* Write DPCD drive settings. */ -enum dc_status dpcd_set_lane_settings( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - uint32_t offset); -/* Read training status and adjustment requests from DPCD. */ -enum dc_status dp_get_lane_status_and_lane_adjust( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - union lane_status ln_status[LANE_COUNT_DP_MAX], - union lane_align_status_updated *ln_align, - union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - uint32_t offset); - -void dp_wait_for_training_aux_rd_interval( - struct dc_link *link, - uint32_t wait_in_micro_secs); - -bool dp_is_cr_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); - -enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); - -bool dp_is_ch_eq_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); -bool dp_is_symbol_locked(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); -bool dp_is_interlane_aligned(union lane_align_status_updated align_status); - -bool dp_is_max_vs_reached( - const struct link_training_settings *lt_settings); -void dp_hw_to_dpcd_lane_settings( - const struct link_training_settings *lt_settings, - const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]); -void dp_decide_lane_settings( - const struct link_training_settings *lt_settings, - const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]); - -uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval); - -enum dpcd_training_patterns - dc_dp_training_pattern_to_dpcd_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern pattern); - -uint8_t dc_dp_initialize_scrambling_data_symbols( - struct dc_link *link, - enum dc_dp_training_pattern pattern); - -enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready); -void dp_set_fec_enable(struct dc_link *link, bool enable); -bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); -void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); -bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); -bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable); - -/* Initialize output parameter lt_settings. */ -void dp_decide_training_settings( - struct dc_link *link, - const struct dc_link_settings *link_setting, - struct link_training_settings *lt_settings); - -/* Convert PHY repeater count read from DPCD uint8_t. */ -uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count); - -/* Check DPCD training status registers to detect link loss. */ -enum link_training_result dp_check_link_loss_status( - struct dc_link *link, - const struct link_training_settings *link_training_setting); - -enum dc_status dpcd_configure_lttpr_mode( - struct dc_link *link, - struct link_training_settings *lt_settings); - -enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); -enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); -bool dp_is_lttpr_present(struct dc_link *link); -enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting); -void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override); -enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); -enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); -bool dpcd_write_128b_132b_sst_payload_allocation_table( - const struct dc_stream_state *stream, - struct dc_link *link, - struct link_mst_stream_allocation_table *proposed_table, - bool allocate); - -enum dc_status dpcd_configure_channel_coding( - struct dc_link *link, - struct link_training_settings *lt_settings); - -bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link); - -struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( - const struct dc_stream_state *stream, - const struct dc_link *link); -void enable_dp_hpo_output(struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_settings); -void disable_dp_hpo_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal); - -void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); -bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); -void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); -void dp_receiver_power_ctrl(struct dc_link *link, bool on); -void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); -void dp_enable_link_phy( - struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings); -void edp_add_delay_for_T9(struct dc_link *link); -bool edp_receiver_ready_T9(struct dc_link *link); -bool edp_receiver_ready_T7(struct dc_link *link); - -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal); - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal); - -bool dp_set_hw_training_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dc_dp_training_pattern pattern, - uint32_t offset); - -void dp_set_hw_lane_settings( - struct dc_link *link, - const struct link_resource *link_res, - const struct link_training_settings *link_settings, - uint32_t offset); - -void dp_set_hw_test_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dp_test_pattern test_pattern, - uint8_t *custom_pattern, - uint32_t custom_pattern_size); - -void dp_retrain_link_dp_test(struct dc_link *link, - struct dc_link_settings *link_setting, - bool skip_video_pattern); -#endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h deleted file mode 100644 index 39c1d1d0735757..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_LINK_DPIA_H__ -#define __DC_LINK_DPIA_H__ - -/* This module implements functionality for training DPIA links. */ - -struct dc_link; -struct dc_link_settings; - -/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ -#define DPIA_CLK_SYNC_DELAY 16000 - -/* Extend interval between training status checks for manual testing. */ -#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 - -/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ -/* DPCD DP Tunneling over USB4 */ -#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d -#define DP_IN_ADAPTER_INFO 0xe000e -#define DP_USB4_DRIVER_ID 0xe000f -#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b - -/* SET_CONFIG message types sent by driver. */ -enum dpia_set_config_type { - DPIA_SET_CFG_SET_LINK = 0x01, - DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, - DPIA_SET_CFG_SET_TRAINING = 0x18, - DPIA_SET_CFG_SET_VSPE = 0x19 -}; - -/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ -enum dpia_set_config_ts { - DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ - DPIA_TS_TPS1 = 0x01, - DPIA_TS_TPS2 = 0x02, - DPIA_TS_TPS3 = 0x03, - DPIA_TS_TPS4 = 0x07, - DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ -}; - -/* SET_CONFIG message data associated with messages sent by driver. */ -union dpia_set_config_data { - struct { - uint8_t mode : 1; - uint8_t reserved : 7; - } set_link; - struct { - uint8_t stage; - } set_training; - struct { - uint8_t swing : 2; - uint8_t max_swing_reached : 1; - uint8_t pre_emph : 2; - uint8_t max_pre_emph_reached : 1; - uint8_t reserved : 2; - } set_vspe; - uint8_t raw; -}; - -/* Read tunneling device capability from DPCD and update link capability - * accordingly. - */ -enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); - -/* Query hot plug status of USB4 DP tunnel. - * Returns true if HPD high. - */ -bool dc_link_dpia_query_hpd_status(struct dc_link *link); - -/* Train DP tunneling link for USB4 DPIA display endpoint. - * DPIA equivalent of dc_link_dp_perfrorm_link_training. - * Aborts link training upon detection of sink unplug. - */ -enum link_training_result dc_link_dpia_perform_link_training( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_setting, - bool skip_video_pattern); - -#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 2ae630bf2aee48..7254182b7c7210 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -27,7 +27,6 @@ #define __DAL_AUX_ENGINE_H__ #include "dc_ddc_types.h" -#include "include/i2caux_interface.h" enum aux_return_code_type; @@ -81,7 +80,12 @@ enum i2c_default_speed { I2CAUX_DEFAULT_I2C_SW_SPEED = 50 }; -union aux_config; +union aux_config { + struct { + uint32_t ALLOW_AUX_WHEN_HPD_LOW:1; + } bits; + uint32_t raw; +}; struct aux_engine { uint32_t inst; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 5b0265c0df61c7..beb26dc8a07fad 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -187,6 +187,7 @@ struct hubbub_funcs { void (*init_crb)(struct hubbub *hubbub); void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow); void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel); + void (*dchubbub_init)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 42db4b7b79fdc4..bb5ad70d42662f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -72,6 +72,12 @@ enum dynamic_metadata_mode { dmdata_dolby_vision }; +struct enc_sdp_line_num { + /* Adaptive Sync SDP */ + bool adaptive_sync_line_num_valid; + uint32_t adaptive_sync_line_num; +}; + struct encoder_info_frame { /* auxiliary video information */ struct dc_info_packet avi; @@ -85,6 +91,9 @@ struct encoder_info_frame { struct dc_info_packet vsc; /* HDR Static MetaData */ struct dc_info_packet hdrsmd; + /* Adaptive Sync SDP*/ + struct dc_info_packet adaptive_sync; + struct enc_sdp_line_num sdp_line_num; }; struct encoder_unblank_param { @@ -154,6 +163,10 @@ struct stream_encoder_funcs { void (*stop_hdmi_info_packets)( struct stream_encoder *enc); + void (*update_dp_info_packets_sdp_line_num)( + struct stream_encoder *enc, + struct encoder_info_frame *info_frame); + void (*update_dp_info_packets)( struct stream_encoder *enc, const struct encoder_info_frame *info_frame); @@ -302,6 +315,10 @@ struct hpo_dp_stream_encoder_funcs { bool compressed_format, bool double_buffer_en); + void (*update_dp_info_packets_sdp_line_num)( + struct hpo_dp_stream_encoder *enc, + struct encoder_info_frame *info_frame); + void (*update_dp_info_packets)( struct hpo_dp_stream_encoder *enc, const struct encoder_info_frame *info_frame); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 0e42e721dd15a6..1d9f9c53d2bd69 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -331,6 +331,7 @@ struct timing_generator_funcs { uint32_t vtotal_change_limit); void (*init_odm)(struct timing_generator *tg); + void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index c43523f9ff6d0b..88ac723d10aa72 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -266,6 +266,7 @@ struct hw_sequencer_funcs { void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); + void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h new file mode 100644 index 00000000000000..e70fa005922361 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -0,0 +1,157 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_H__ +#define __DC_LINK_H__ + +/* FILE POLICY AND INTENDED USAGE: + * + * This header declares link functions exposed to dc. All functions must have + * "link_" as prefix. For example link_run_my_function. This header is strictly + * private in dc and should never be included in other header files. dc + * components should include this header in their .c files in order to access + * functions in link folder. This file should never include any header files in + * link folder. If there is a need to expose a function declared in one of + * header files in side link folder, you need to move the function declaration + * into this file and prefix it with "link_". + */ +#include "core_types.h" +#include "dc_link.h" + +struct link_init_data { + const struct dc *dc; + struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */ + uint32_t connector_index; /* this will be mapped to the HPD pins */ + uint32_t link_index; /* this is mapped to DAL display_index + TODO: remove it when DC is complete. */ + bool is_dpia_link; +}; + +struct dc_link *link_create(const struct link_init_data *init_params); +void link_destroy(struct dc_link **link); + +// TODO - convert any function declarations below to function pointers +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service); + +struct ddc_service_init_data { + struct graphics_object_id id; + struct dc_context *ctx; + struct dc_link *link; + bool is_dpia_link; +}; + +struct ddc_service *link_create_ddc_service( + struct ddc_service_init_data *ddc_init_data); + +void link_destroy_ddc_service(struct ddc_service **ddc); + +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc); + +bool link_query_ddc_data( + struct ddc_service *ddc, + uint32_t address, + uint8_t *write_buf, + uint32_t write_size, + uint8_t *read_buf, + uint32_t read_size); + + +/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy + * states as outlined in the DP spec. Returns true if the request was + * successful. + * + * NOTE: The function requires explicit mutex on DM side in order to prevent + * potential race condition. DC components should call the dpcd read/write + * function in dm_helpers in order to access dpcd safely + */ +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, + struct aux_payload *payload); + +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc); + +bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); + +enum dp_link_encoding link_dp_get_encoding_format( + const struct dc_link_settings *link_settings); + +bool link_decide_link_settings( + struct dc_stream_state *stream, + struct dc_link_settings *link_setting); + +void link_dp_trace_set_edp_power_timestamp(struct dc_link *link, + bool power_up); +uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link); +uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link); + +bool link_is_edp_ilr_optimization_required(struct dc_link *link, + struct dc_crtc_timing *crtc_timing); + +bool link_backlight_enable_aux(struct dc_link *link, bool enable); +void link_edp_add_delay_for_T9(struct dc_link *link); +bool link_edp_receiver_ready_T9(struct dc_link *link); +bool link_edp_receiver_ready_T7(struct dc_link *link); +bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable); +bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, + uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); +void link_get_psr_residency(const struct dc_link *link, uint32_t *residency); +enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +void link_blank_all_dp_displays(struct dc *dc); +void link_blank_all_edp_displays(struct dc *dc); +void link_blank_dp_stream(struct dc_link *link, bool hw_init); +void link_resume(struct dc_link *link); +void link_set_dpms_on( + struct dc_state *state, + struct pipe_ctx *pipe_ctx); +void link_set_dpms_off(struct pipe_ctx *pipe_ctx); +void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); +void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); +bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); +bool link_update_dsc_config(struct pipe_ctx *pipe_ctx); +enum dc_status link_validate_mode_timing( + const struct dc_stream_state *stream, + struct dc_link *link, + const struct dc_crtc_timing *timing); +bool link_detect(struct dc_link *link, enum dc_detect_reason reason); +bool link_detect_connection_type(struct dc_link *link, + enum dc_connection_type *type); +const struct dc_link_status *link_get_status(const struct dc_link *link); +#ifdef CONFIG_DRM_AMD_DC_HDCP +/* return true if the connected receiver supports the hdcp version */ +bool link_is_hdcp14(struct dc_link *link, enum signal_type signal); +bool link_is_hdcp22(struct dc_link *link, enum signal_type signal); +#endif +void link_clear_dprx_states(struct dc_link *link); +bool link_reset_cur_dp_mst_topology(struct dc_link *link); +uint32_t dp_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_settings); +uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing); +void link_get_cur_res_map(const struct dc *dc, uint32_t *map); +void link_restore_res_map(const struct dc *dc, uint32_t *map); + +#endif /* __DC_LINK_HPD_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 5040836f404d05..fa6da93caa889c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -165,10 +165,6 @@ bool resource_validate_attach_surfaces( struct dc_state *context, const struct resource_pool *pool); -void resource_validate_ctx_update_pointer_after_copy( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx); - enum dc_status resource_map_clock_resources( const struct dc *dc, struct dc_state *context, @@ -236,4 +232,13 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm( struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm); + +/* A test harness interface that modifies dp encoder resources in the given dc + * state and bypasses the need to revalidate. The interface assumes that the + * test harness interface is called with pre-validated link config stored in the + * pipe_ctx and updates dp encoder resources according to the link config. + */ +enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index 5f4f6dd79511cd..3c7cb3dc046b6c 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -37,7 +37,7 @@ #include "soc15_hw_ip.h" #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn201( +static enum dc_irq_source to_dal_irq_source_dcn201( struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id) @@ -136,11 +136,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .ack = NULL }; -static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = { - .set = NULL, - .ack = NULL -}; - #undef BASE_INNER #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile index 054c2a727eb22d..40352d8d76485e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/Makefile +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -23,8 +23,41 @@ # It abstracts the control and status of back end pipe such as DIO, HPO, DPIA, # PHY, HPD, DDC and etc). -LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o +LINK = link_detection.o link_dpms.o link_factory.o link_resource.o \ +link_validation.o -AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK)) +AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/, \ +$(LINK)) AMD_DISPLAY_FILES += $(AMD_DAL_LINK) +############################################################################### +# accessories +############################################################################### +LINK_ACCESSORIES = link_dp_trace.o link_dp_cts.o link_fpga.o + +AMD_DAL_LINK_ACCESSORIES = $(addprefix $(AMDDALPATH)/dc/link/accessories/, \ +$(LINK_ACCESSORIES)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES) +############################################################################### +# hwss +############################################################################### +LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o + +AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ +$(LINK_HWSS)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_HWSS) +############################################################################### +# protocols +############################################################################### +LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \ +link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \ +link_dp_training_dpia.o link_dp_training_auxless.o \ +link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \ +link_edp_panel_control.o link_dp_irq_handler.o + +AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \ +$(LINK_PROTOCOLS)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_PROTOCOLS) \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c new file mode 100644 index 00000000000000..942300e0bd929f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -0,0 +1,1046 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_dp_cts.h" +#include "link/link_resource.h" +#include "link/protocols/link_dpcd.h" +#include "link/protocols/link_dp_training.h" +#include "link/protocols/link_dp_phy.h" +#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h" +#include "link/link_dpms.h" +#include "resource.h" +#include "dm_helpers.h" +#include "dc_dmub_srv.h" +#include "dce/dmub_hw_lock_mgr.h" + +#define DC_LOGGER \ + link->ctx->logger + +static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) +{ + switch (test_rate) { + case DP_TEST_LINK_RATE_RBR: + return LINK_RATE_LOW; + case DP_TEST_LINK_RATE_HBR: + return LINK_RATE_HIGH; + case DP_TEST_LINK_RATE_HBR2: + return LINK_RATE_HIGH2; + case DP_TEST_LINK_RATE_HBR3: + return LINK_RATE_HIGH3; + case DP_TEST_LINK_RATE_UHBR10: + return LINK_RATE_UHBR10; + case DP_TEST_LINK_RATE_UHBR20: + return LINK_RATE_UHBR20; + case DP_TEST_LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR13_5; + default: + return LINK_RATE_UNKNOWN; + } +} + +static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) +{ + return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && + test_pattern <= DP_TEST_PATTERN_SQUARE_END); +} + +static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) +{ + if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && + test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || + test_pattern == DP_TEST_PATTERN_VIDEO_MODE) + return true; + else + return false; +} + +void dp_retrain_link_dp_test(struct dc_link *link, + struct dc_link_settings *link_setting, + bool skip_video_pattern) +{ + struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; + + udelay(100); + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + + for (i = 0; i < count; i++) { + link_set_dpms_off(pipes[i]); + pipes[i]->link_config.dp_link_settings = *link_setting; + update_dp_encoder_resources_for_test_harness( + link->dc, + state, + pipes[i]); + } + + for (i = count-1; i >= 0; i--) + link_set_dpms_on(state, pipes[i]); +} + +static void dp_test_send_link_training(struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + uint8_t test_rate = 0; + + core_link_read_dpcd( + link, + DP_TEST_LANE_COUNT, + (unsigned char *)(&link_settings.lane_count), + 1); + core_link_read_dpcd( + link, + DP_TEST_LINK_RATE, + &test_rate, + 1); + link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); + + /* Set preferred link settings */ + link->verified_link_cap.lane_count = link_settings.lane_count; + link->verified_link_cap.link_rate = link_settings.link_rate; + + dp_retrain_link_dp_test(link, &link_settings, false); +} + +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ + union audio_test_mode dpcd_test_mode = {0}; + struct audio_test_pattern_type dpcd_pattern_type = {0}; + union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = &pipes[0]; + unsigned int channel_count; + unsigned int channel = 0; + unsigned int modes = 0; + unsigned int sampling_rate_in_hz = 0; + + // get audio test mode and test pattern parameters + core_link_read_dpcd( + link, + DP_TEST_AUDIO_MODE, + &dpcd_test_mode.raw, + sizeof(dpcd_test_mode)); + + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PATTERN_TYPE, + &dpcd_pattern_type.value, + sizeof(dpcd_pattern_type)); + + channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); + + // read pattern periods for requested channels when sawTooth pattern is requested + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || + dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + + test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? + DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + // read period for each channel + for (channel = 0; channel < channel_count; channel++) { + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PERIOD_CH1 + channel, + &dpcd_pattern_period[channel].raw, + sizeof(dpcd_pattern_period[channel])); + } + } + + // translate sampling rate + switch (dpcd_test_mode.bits.sampling_rate) { + case AUDIO_SAMPLING_RATE_32KHZ: + sampling_rate_in_hz = 32000; + break; + case AUDIO_SAMPLING_RATE_44_1KHZ: + sampling_rate_in_hz = 44100; + break; + case AUDIO_SAMPLING_RATE_48KHZ: + sampling_rate_in_hz = 48000; + break; + case AUDIO_SAMPLING_RATE_88_2KHZ: + sampling_rate_in_hz = 88200; + break; + case AUDIO_SAMPLING_RATE_96KHZ: + sampling_rate_in_hz = 96000; + break; + case AUDIO_SAMPLING_RATE_176_4KHZ: + sampling_rate_in_hz = 176400; + break; + case AUDIO_SAMPLING_RATE_192KHZ: + sampling_rate_in_hz = 192000; + break; + default: + sampling_rate_in_hz = 0; + break; + } + + link->audio_test_data.flags.test_requested = 1; + link->audio_test_data.flags.disable_video = disable_video; + link->audio_test_data.sampling_rate = sampling_rate_in_hz; + link->audio_test_data.channel_count = channel_count; + link->audio_test_data.pattern_type = test_pattern; + + if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { + for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { + link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; + } + } +} + +/* TODO Raven hbr2 compliance eye output is unstable + * (toggling on and off) with debugger break + * This caueses intermittent PHY automation failure + * Need to look into the root cause */ +static void dp_test_send_phy_test_pattern(struct dc_link *link) +{ + union phy_test_pattern dpcd_test_pattern; + union lane_adjust dpcd_lane_adjustment[2]; + unsigned char dpcd_post_cursor_2_adjustment = 0; + unsigned char test_pattern_buffer[ + (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - + DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned int test_pattern_size = 0; + enum dp_test_pattern test_pattern; + union lane_adjust dpcd_lane_adjust; + unsigned int lane; + struct link_training_settings link_training_settings; + unsigned char no_preshoot = 0; + unsigned char no_deemphasis = 0; + + dpcd_test_pattern.raw = 0; + memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); + memset(&link_training_settings, 0, sizeof(link_training_settings)); + + /* get phy test pattern and pattern parameters from DP receiver */ + core_link_read_dpcd( + link, + DP_PHY_TEST_PATTERN, + &dpcd_test_pattern.raw, + sizeof(dpcd_test_pattern)); + core_link_read_dpcd( + link, + DP_ADJUST_REQUEST_LANE0_1, + &dpcd_lane_adjustment[0].raw, + sizeof(dpcd_lane_adjustment)); + + /* prepare link training settings */ + link_training_settings.link_settings = link->cur_link_settings; + + link_training_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link->cur_link_settings); + + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) + dp_fixed_vs_pe_read_lane_adjust( + link, + link_training_settings.dpcd_lane_settings); + + /*get post cursor 2 parameters + * For DP 1.1a or eariler, this DPCD register's value is 0 + * For DP 1.2 or later: + * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1 + * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3 + */ + core_link_read_dpcd( + link, + DP_ADJUST_REQUEST_POST_CURSOR2, + &dpcd_post_cursor_2_adjustment, + sizeof(dpcd_post_cursor_2_adjustment)); + + /* translate request */ + switch (dpcd_test_pattern.bits.PATTERN) { + case PHY_TEST_PATTERN_D10_2: + test_pattern = DP_TEST_PATTERN_D102; + break; + case PHY_TEST_PATTERN_SYMBOL_ERROR: + test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR; + break; + case PHY_TEST_PATTERN_PRBS7: + test_pattern = DP_TEST_PATTERN_PRBS7; + break; + case PHY_TEST_PATTERN_80BIT_CUSTOM: + test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM; + break; + case PHY_TEST_PATTERN_CP2520_1: + /* CP2520 pattern is unstable, temporarily use TPS4 instead */ + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? + DP_TEST_PATTERN_TRAINING_PATTERN4 : + DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; + break; + case PHY_TEST_PATTERN_CP2520_2: + /* CP2520 pattern is unstable, temporarily use TPS4 instead */ + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? + DP_TEST_PATTERN_TRAINING_PATTERN4 : + DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; + break; + case PHY_TEST_PATTERN_CP2520_3: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; + break; + case PHY_TEST_PATTERN_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; + break; + case PHY_TEST_PATTERN_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; + break; + case PHY_TEST_PATTERN_PRBS9: + test_pattern = DP_TEST_PATTERN_PRBS9; + break; + case PHY_TEST_PATTERN_PRBS11: + test_pattern = DP_TEST_PATTERN_PRBS11; + break; + case PHY_TEST_PATTERN_PRBS15: + test_pattern = DP_TEST_PATTERN_PRBS15; + break; + case PHY_TEST_PATTERN_PRBS23: + test_pattern = DP_TEST_PATTERN_PRBS23; + break; + case PHY_TEST_PATTERN_PRBS31: + test_pattern = DP_TEST_PATTERN_PRBS31; + break; + case PHY_TEST_PATTERN_264BIT_CUSTOM: + test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; + break; + case PHY_TEST_PATTERN_SQUARE: + test_pattern = DP_TEST_PATTERN_SQUARE; + break; + case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: + test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; + no_preshoot = 1; + break; + case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: + test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; + no_deemphasis = 1; + break; + case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: + test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; + no_preshoot = 1; + no_deemphasis = 1; + break; + default: + test_pattern = DP_TEST_PATTERN_VIDEO_MODE; + break; + } + + if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { + test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; + core_link_read_dpcd( + link, + DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_pattern_buffer, + test_pattern_size); + } + + if (is_dp_phy_sqaure_pattern(test_pattern)) { + test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) + core_link_read_dpcd( + link, + DP_PHY_SQUARE_PATTERN, + test_pattern_buffer, + test_pattern_size); + } + + if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { + test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- + DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; + core_link_read_dpcd( + link, + DP_TEST_264BIT_CUSTOM_PATTERN_7_0, + test_pattern_buffer, + test_pattern_size); + } + + for (lane = 0; lane < + (unsigned int)(link->cur_link_settings.lane_count); + lane++) { + dpcd_lane_adjust.raw = + dp_get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); + if (link_dp_get_encoding_format(&link->cur_link_settings) == + DP_8b_10b_ENCODING) { + link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing) + (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); + link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis) + (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); + link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = + (enum dc_post_cursor2) + ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); + } else if (link_dp_get_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level = + dpcd_lane_adjust.tx_ffe.PRESET_VALUE; + link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot; + link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis; + } + } + + dp_hw_to_dpcd_lane_settings(&link_training_settings, + link_training_settings.hw_lane_settings, + link_training_settings.dpcd_lane_settings); + /*Usage: Measure DP physical lane signal + * by DP SI test equipment automatically. + * PHY test pattern request is generated by equipment via HPD interrupt. + * HPD needs to be active all the time. HPD should be active + * all the time. Do not touch it. + * forward request to DS + */ + dc_link_dp_set_test_pattern( + link, + test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, + &link_training_settings, + test_pattern_buffer, + test_pattern_size); +} + +static void set_crtc_test_pattern(struct dc_link *link, + struct pipe_ctx *pipe_ctx, + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space) +{ + enum controller_dp_test_pattern controller_test_pattern; + enum dc_color_depth color_depth = pipe_ctx-> + stream->timing.display_color_depth; + struct bit_depth_reduction_params params; + struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; + int width = pipe_ctx->stream->timing.h_addressable + + pipe_ctx->stream->timing.h_border_left + + pipe_ctx->stream->timing.h_border_right; + int height = pipe_ctx->stream->timing.v_addressable + + pipe_ctx->stream->timing.v_border_bottom + + pipe_ctx->stream->timing.v_border_top; + + memset(¶ms, 0, sizeof(params)); + + switch (test_pattern) { + case DP_TEST_PATTERN_COLOR_SQUARES: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + break; + case DP_TEST_PATTERN_COLOR_SQUARES_CEA: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA; + break; + case DP_TEST_PATTERN_VERTICAL_BARS: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_VERTICALBARS; + break; + case DP_TEST_PATTERN_HORIZONTAL_BARS: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS; + break; + case DP_TEST_PATTERN_COLOR_RAMP: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_COLORRAMP; + break; + default: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; + break; + } + + switch (test_pattern) { + case DP_TEST_PATTERN_COLOR_SQUARES: + case DP_TEST_PATTERN_COLOR_SQUARES_CEA: + case DP_TEST_PATTERN_VERTICAL_BARS: + case DP_TEST_PATTERN_HORIZONTAL_BARS: + case DP_TEST_PATTERN_COLOR_RAMP: + { + /* disable bit depth reduction */ + pipe_ctx->stream->bit_depth_params = params; + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + controller_test_pattern, color_depth); + else if (link->dc->hwss.set_disp_pattern_generator) { + struct pipe_ctx *odm_pipe; + enum controller_dp_color_space controller_color_space; + int opp_cnt = 1; + int offset = 0; + int dpg_width = width; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; + break; + case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: + default: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); + ASSERT(0); + break; + } + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + dpg_width = width / opp_cnt; + offset = dpg_width; + + link->dc->hwss.set_disp_pattern_generator(link->dc, + pipe_ctx, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + dpg_width, + height, + 0); + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + dpg_width, + height, + offset); + offset += offset; + } + } + } + break; + case DP_TEST_PATTERN_VIDEO_MODE: + { + /* restore bitdepth reduction */ + resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); + pipe_ctx->stream->bit_depth_params = params; + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + color_depth); + else if (link->dc->hwss.set_disp_pattern_generator) { + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + int dpg_width; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + dpg_width = width / opp_cnt; + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, + color_depth, + NULL, + dpg_width, + height, + 0); + } + link->dc->hwss.set_disp_pattern_generator(link->dc, + pipe_ctx, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, + color_depth, + NULL, + dpg_width, + height, + 0); + } + } + break; + + default: + break; + } +} + +void dc_link_dp_handle_automated_test(struct dc_link *link) +{ + union test_request test_request; + union test_response test_response; + + memset(&test_request, 0, sizeof(test_request)); + memset(&test_response, 0, sizeof(test_response)); + + core_link_read_dpcd( + link, + DP_TEST_REQUEST, + &test_request.raw, + sizeof(union test_request)); + if (test_request.bits.LINK_TRAINING) { + /* ACK first to let DP RX test box monitor LT sequence */ + test_response.bits.ACK = 1; + core_link_write_dpcd( + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + dp_test_send_link_training(link); + /* no acknowledge request is needed again */ + test_response.bits.ACK = 0; + } + if (test_request.bits.LINK_TEST_PATTRN) { + union test_misc dpcd_test_params; + union link_test_pattern dpcd_test_pattern; + + memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); + memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); + + /* get link test pattern and pattern parameters */ + core_link_read_dpcd( + link, + DP_TEST_PATTERN, + &dpcd_test_pattern.raw, + sizeof(dpcd_test_pattern)); + core_link_read_dpcd( + link, + DP_TEST_MISC0, + &dpcd_test_params.raw, + sizeof(dpcd_test_params)); + test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link, + dpcd_test_pattern, dpcd_test_params) ? 1 : 0; + } + + if (test_request.bits.AUDIO_TEST_PATTERN) { + dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); + test_response.bits.ACK = 1; + } + + if (test_request.bits.PHY_TEST_PATTERN) { + dp_test_send_phy_test_pattern(link); + test_response.bits.ACK = 1; + } + + /* send request acknowledgment */ + if (test_response.bits.ACK) + core_link_write_dpcd( + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); +} + +bool dc_link_dp_set_test_pattern( + struct dc_link *link, + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, + const struct link_training_settings *p_link_settings, + const unsigned char *p_custom_pattern, + unsigned int cust_pattern_size) +{ + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = NULL; + unsigned int lane; + unsigned int i; + unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0}; + union dpcd_training_pattern training_pattern; + enum dpcd_phy_test_patterns pattern; + + memset(&training_pattern, 0, sizeof(training_pattern)); + + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream == NULL) + continue; + + if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { + pipe_ctx = &pipes[i]; + break; + } + } + + if (pipe_ctx == NULL) + return false; + + /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ + if (link->test_pattern_enabled && test_pattern == + DP_TEST_PATTERN_VIDEO_MODE) { + /* Set CRTC Test Pattern */ + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, + (uint8_t *)p_custom_pattern, + (uint32_t)cust_pattern_size); + + /* Unblank Stream */ + link->dc->hwss.unblank_stream( + pipe_ctx, + &link->verified_link_cap); + /* TODO:m_pHwss->MuteAudioEndpoint + * (pPathMode->pDisplayPath, false); + */ + + /* Reset Test Pattern state */ + link->test_pattern_enabled = false; + + return true; + } + + /* Check for PHY Test Patterns */ + if (is_dp_phy_pattern(test_pattern)) { + /* Set DPCD Lane Settings before running test pattern */ + if (p_link_settings != NULL) { + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + dp_fixed_vs_pe_set_retimer_lane_settings( + link, + p_link_settings->dpcd_lane_settings, + p_link_settings->link_settings.lane_count); + } else { + dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); + } + dpcd_set_lane_settings(link, p_link_settings, DPRX); + } + + /* Blank stream if running test pattern */ + if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { + /*TODO: + * m_pHwss-> + * MuteAudioEndpoint(pPathMode->pDisplayPath, true); + */ + /* Blank stream */ + link->dc->hwss.blank_stream(pipe_ctx); + } + + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, + (uint8_t *)p_custom_pattern, + (uint32_t)cust_pattern_size); + + if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { + /* Set Test Pattern state */ + link->test_pattern_enabled = true; + if (p_link_settings != NULL) + dpcd_set_link_settings(link, + p_link_settings); + } + + switch (test_pattern) { + case DP_TEST_PATTERN_VIDEO_MODE: + pattern = PHY_TEST_PATTERN_NONE; + break; + case DP_TEST_PATTERN_D102: + pattern = PHY_TEST_PATTERN_D10_2; + break; + case DP_TEST_PATTERN_SYMBOL_ERROR: + pattern = PHY_TEST_PATTERN_SYMBOL_ERROR; + break; + case DP_TEST_PATTERN_PRBS7: + pattern = PHY_TEST_PATTERN_PRBS7; + break; + case DP_TEST_PATTERN_80BIT_CUSTOM: + pattern = PHY_TEST_PATTERN_80BIT_CUSTOM; + break; + case DP_TEST_PATTERN_CP2520_1: + pattern = PHY_TEST_PATTERN_CP2520_1; + break; + case DP_TEST_PATTERN_CP2520_2: + pattern = PHY_TEST_PATTERN_CP2520_2; + break; + case DP_TEST_PATTERN_CP2520_3: + pattern = PHY_TEST_PATTERN_CP2520_3; + break; + case DP_TEST_PATTERN_128b_132b_TPS1: + pattern = PHY_TEST_PATTERN_128b_132b_TPS1; + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + pattern = PHY_TEST_PATTERN_128b_132b_TPS2; + break; + case DP_TEST_PATTERN_PRBS9: + pattern = PHY_TEST_PATTERN_PRBS9; + break; + case DP_TEST_PATTERN_PRBS11: + pattern = PHY_TEST_PATTERN_PRBS11; + break; + case DP_TEST_PATTERN_PRBS15: + pattern = PHY_TEST_PATTERN_PRBS15; + break; + case DP_TEST_PATTERN_PRBS23: + pattern = PHY_TEST_PATTERN_PRBS23; + break; + case DP_TEST_PATTERN_PRBS31: + pattern = PHY_TEST_PATTERN_PRBS31; + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; + break; + case DP_TEST_PATTERN_SQUARE: + pattern = PHY_TEST_PATTERN_SQUARE; + break; + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: + pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; + break; + case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: + pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; + break; + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: + pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; + break; + default: + return false; + } + + if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE + /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/) + return false; + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + if (is_dp_phy_sqaure_pattern(test_pattern)) + core_link_write_dpcd(link, + DP_LINK_SQUARE_PATTERN, + p_custom_pattern, + 1); + + /* tell receiver that we are sending qualification + * pattern DP 1.2 or later - DP receiver's link quality + * pattern is set using DPCD LINK_QUAL_LANEx_SET + * register (0x10B~0x10E)\ + */ + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) + link_qual_pattern[lane] = + (unsigned char)(pattern); + + core_link_write_dpcd(link, + DP_LINK_QUAL_LANE0_SET, + link_qual_pattern, + sizeof(link_qual_pattern)); + } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 || + link->dpcd_caps.dpcd_rev.raw == 0) { + /* tell receiver that we are sending qualification + * pattern DP 1.1a or earlier - DP receiver's link + * quality pattern is set using + * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET + * register (0x102). We will use v_1.3 when we are + * setting test pattern for DP 1.1. + */ + core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET, + &training_pattern.raw, + sizeof(training_pattern)); + training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern; + core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET, + &training_pattern.raw, + sizeof(training_pattern)); + } + } else { + enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + color_space = COLOR_SPACE_SRGB; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_SRGB_LIMITED; + break; + + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + color_space = COLOR_SPACE_YCBCR601; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + color_space = COLOR_SPACE_YCBCR709; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + default: + break; + } + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { + union dmub_hw_lock_flags hw_locks = { 0 }; + struct dmub_hw_lock_inst_flags inst_flags = { 0 }; + + hw_locks.bits.lock_dig = 1; + inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; + + dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, + true, + &hw_locks, + &inst_flags); + } else + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( + pipe_ctx->stream_res.tg); + } + + pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); + /* update MSA to requested color space */ + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream->timing, + color_space, + pipe_ctx->stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + + if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range + else + pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + resource_build_info_frame(pipe_ctx); + link->dc->hwss.update_info_frame(pipe_ctx); + } + + /* CRTC Patterns */ + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { + if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { + union dmub_hw_lock_flags hw_locks = { 0 }; + struct dmub_hw_lock_inst_flags inst_flags = { 0 }; + + hw_locks.bits.lock_dig = 1; + inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; + + dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, + false, + &hw_locks, + &inst_flags); + } else + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( + pipe_ctx->stream_res.tg); + } + + /* Set Test Pattern state */ + link->test_pattern_enabled = true; + } + + return true; +} + +void dc_link_set_drive_settings(struct dc *dc, + struct link_training_settings *lt_settings, + const struct dc_link *link) +{ + + int i; + struct link_resource link_res; + + for (i = 0; i < dc->link_count; i++) + if (dc->links[i] == link) + break; + + if (i >= dc->link_count) + ASSERT_CRITICAL(false); + + link_get_cur_link_res(link, &link_res); + dp_set_drive_settings(dc->links[i], &link_res, lt_settings); +} + +void dc_link_set_preferred_link_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe; + struct dc_stream_state *link_stream; + struct dc_link_settings store_settings = *link_setting; + + link->preferred_link_setting = store_settings; + + /* Retrain with preferred link settings only relevant for + * DP signal type + * Check for non-DP signal or if passive dongle present + */ + if (!dc_is_dp_signal(link->connector_signal) || + link->dongle_max_pix_clk > 0) + return; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->link) { + if (pipe->stream->link == link) { + link_stream = pipe->stream; + break; + } + } + } + + /* Stream not found */ + if (i == MAX_PIPES) + return; + + /* Cannot retrain link if backend is off */ + if (link_stream->dpms_off) + return; + + if (link_decide_link_settings(link_stream, &store_settings)) + dp_retrain_link_dp_test(link, &store_settings, false); +} + +void dc_link_set_preferred_training_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link_training_overrides *lt_overrides, + struct dc_link *link, + bool skip_immediate_retrain) +{ + if (lt_overrides != NULL) + link->preferred_training_settings = *lt_overrides; + else + memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); + + if (link_setting != NULL) { + link->preferred_link_setting = *link_setting; + } else { + link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; + link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; + } + + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->type == dc_connection_mst_branch) + dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link); + + /* Retrain now, or wait until next stream update to apply */ + if (skip_immediate_retrain == false) + dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link); +} + +void dc_link_set_test_pattern(struct dc_link *link, + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, + const struct link_training_settings *p_link_settings, + const unsigned char *p_custom_pattern, + unsigned int cust_pattern_size) +{ + if (link != NULL) + dc_link_dp_set_test_pattern( + link, + test_pattern, + test_pattern_color_space, + p_link_settings, + p_custom_pattern, + cust_pattern_size); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h new file mode 100644 index 00000000000000..7f17838b653b77 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_DP_CTS_H__ +#define __LINK_DP_CTS_H__ +#include "link.h" + +void dp_retrain_link_dp_test(struct dc_link *link, + struct dc_link_settings *link_setting, + bool skip_video_pattern); + +#endif /* __LINK_DP_CTS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c similarity index 89% rename from drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c rename to drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c index 2c1a3bfcdb5067..459b362ed374af 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c @@ -22,8 +22,9 @@ * Authors: AMD * */ -#include "dc_link.h" #include "link_dp_trace.h" +#include "link/protocols/link_dpcd.h" +#include "link.h" void dp_trace_init(struct dc_link *link) { @@ -145,7 +146,7 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link) return link->dp_trace.link_loss_count; } -void dp_trace_set_edp_power_timestamp(struct dc_link *link, +void link_dp_trace_set_edp_power_timestamp(struct dc_link *link, bool power_up) { if (!power_up) @@ -155,12 +156,19 @@ void dp_trace_set_edp_power_timestamp(struct dc_link *link, link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx); } -uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link) +uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link) { return link->dp_trace.edp_trace_power_timestamps.poweron; } -uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link) +uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link) { return link->dp_trace.edp_trace_power_timestamps.poweroff; -} \ No newline at end of file +} + +void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) +{ + if (link != NULL && link->dc->debug.enable_driver_sequence_debug) + core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, + &dp_test_mode, sizeof(dp_test_mode)); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h similarity index 91% rename from drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h rename to drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h index 26700e3cd65e9c..89feea1b26920b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h @@ -24,6 +24,7 @@ */ #ifndef __LINK_DP_TRACE_H__ #define __LINK_DP_TRACE_H__ +#include "link.h" void dp_trace_init(struct dc_link *link); void dp_trace_reset(struct dc_link *link); @@ -54,9 +55,4 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link, bool in_detection); unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); -void dp_trace_set_edp_power_timestamp(struct dc_link *link, - bool power_up); -uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link); -uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link); - #endif /* __LINK_DP_TRACE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c new file mode 100644 index 00000000000000..d3cc604eed67a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c @@ -0,0 +1,95 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_fpga.h" +#include "link/link_dpms.h" +#include "dm_helpers.h" +#include "link_hwss.h" +#include "dccg.h" +#include "resource.h" + +#define DC_LOGGER_INIT(logger) + +void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + uint8_t req_slot_count = 0; + uint8_t vc_id = 1; /// VC ID always 1 for SST + struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings; + const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + stream->link->cur_link_settings = link_settings; + + if (link_hwss->ext.enable_dp_link_output) + link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, + stream->signal, pipe_ctx->clock_source->id, + &link_settings); + + /* Enable DP_STREAM_ENC */ + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + + /* Allocate Payload */ + if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { + // MST case + uint8_t i; + + proposed_table.stream_count = state->stream_count; + for (i = 0; i < state->stream_count; i++) { + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_allocations[i].slot_count = req_slot_count; + proposed_table.stream_allocations[i].vcp_id = i+1; + /* NOTE: This makes assumption that pipe_ctx index is same as stream index */ + proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; + } + } else { + // SST case + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, stream->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_count = 1; /// Always 1 stream for SST + proposed_table.stream_allocations[0].slot_count = req_slot_count; + proposed_table.stream_allocations[0].vcp_id = vc_id; + proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + } + + link_hwss->ext.update_stream_allocation_table(stream->link, + &pipe_ctx->link_res, + &proposed_table); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + + dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); + dc->hwss.enable_audio_stream(pipe_ctx); +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h new file mode 100644 index 00000000000000..3a80f5595943ad --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_FPGA_H__ +#define __LINK_FPGA_H__ +#include "link.h" +void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, + struct pipe_ctx *pipe_ctx); +#endif /* __LINK_FPGA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c similarity index 93% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c rename to drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index 33148b753c03b4..b092b00b3599f3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -24,7 +24,6 @@ */ #include "link_hwss_dio.h" #include "core_types.h" -#include "dc_link_dp.h" #include "link_enc_cfg.h" void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, @@ -45,7 +44,7 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) link_enc->funcs->connect_dig_be_to_fe(link_enc, pipe_ctx->stream_res.stream_enc->id, true); if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(pipe_ctx->stream->link, + link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); if (stream_enc->funcs->enable_fifo) stream_enc->funcs->enable_fifo(stream_enc); @@ -64,7 +63,7 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->id, false); if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(pipe_ctx->stream->link, + link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); } @@ -106,7 +105,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) &stream->timing); if (dc_is_dp_signal(stream->signal)) - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } void enable_dio_dp_link_output(struct dc_link *link, @@ -127,7 +126,7 @@ void enable_dio_dp_link_output(struct dc_link *link, link_enc, link_settings, clock_source); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); } void disable_dio_link_output(struct dc_link *link, @@ -137,7 +136,7 @@ void disable_dio_link_output(struct dc_link *link, struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); link_enc->funcs->disable_output(link_enc, signal); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } void set_dio_dp_link_test_pattern(struct dc_link *link, @@ -147,7 +146,7 @@ void set_dio_dp_link_test_pattern(struct dc_link *link, struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } void set_dio_dp_lane_settings(struct dc_link *link, @@ -196,7 +195,7 @@ void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc, false); if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(pipe_ctx->stream->link, + link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM); } @@ -215,7 +214,7 @@ void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx) } if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(pipe_ctx->stream->link, + link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM); } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h rename to drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index 9a108c3d783156..8b8a099feeb09d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -26,6 +26,7 @@ #define __LINK_HWSS_DIO_H__ #include "link_hwss.h" +#include "link.h" const struct link_hwss *get_dio_link_hwss(void); bool can_use_dio_link_hwss(const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c rename to drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h rename to drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c similarity index 85% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c rename to drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index 2f46e1ac4ce0e9..aa1c5e253b43e4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -26,7 +26,6 @@ #include "dm_helpers.h" #include "core_types.h" #include "dccg.h" -#include "dc_link_dp.h" #include "clk_mgr.h" static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) @@ -87,57 +86,20 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, hblank_min_symbol_width); } -static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) -{ - struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; - int count = 1; - - while (odm_pipe != NULL) { - count++; - odm_pipe = odm_pipe->next_odm_pipe; - } - - return count; -} - static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { - struct dc *dc = pipe_ctx->stream->ctx->dc; struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; - struct dccg *dccg = dc->res_pool->dccg; - struct timing_generator *tg = pipe_ctx->stream_res.tg; - struct dtbclk_dto_params dto_params = {0}; - enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); - dto_params.otg_inst = tg->inst; - dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; - dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); - dto_params.timing = &pipe_ctx->stream->timing; - dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); - - dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst); - dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { - struct dc *dc = pipe_ctx->stream->ctx->dc; struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; - struct dccg *dccg = dc->res_pool->dccg; - struct timing_generator *tg = pipe_ctx->stream_res.tg; - struct dtbclk_dto_params dto_params = {0}; - - dto_params.otg_inst = tg->inst; - dto_params.timing = &pipe_ctx->stream->timing; stream_enc->funcs->disable(stream_enc); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); - dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst); } static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) @@ -153,7 +115,7 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) stream->use_vsc_sdp_for_colorimetry, stream->timing.flags.DSC, false); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } static void enable_hpo_dp_fpga_link_output(struct dc_link *link, @@ -239,7 +201,7 @@ static void set_hpo_dp_link_test_pattern(struct dc_link *link, { link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( link_res->hpo_dp_link_enc, tp_params); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } static void set_hpo_dp_lane_settings(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h similarity index 98% rename from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h rename to drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h index 57d447ec27b87d..3cbb94b41a2316 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -26,6 +26,7 @@ #define __LINK_HWSS_HPO_DP_H__ #include "link_hwss.h" +#include "link.h" bool can_use_hpo_dp_link_hwss(const struct dc_link *link, const struct link_resource *link_res); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c new file mode 100644 index 00000000000000..38216c789d7771 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -0,0 +1,1323 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file manages link detection states and receiver states by using various + * link protocols. It also provides helper functions to interpret certain + * capabilities or status based on the states it manages or retrieve them + * directly from connected receivers. + */ + +#include "link_dpms.h" +#include "link_detection.h" +#include "link_hwss.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_ddc.h" +#include "protocols/link_hpd.h" +#include "protocols/link_dpcd.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_dpia.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_training.h" +#include "accessories/link_dp_trace.h" + +#include "link_enc_cfg.h" +#include "dm_helpers.h" +#include "clk_mgr.h" + +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ + DC_LOG_HW_HOTPLUG( \ + __VA_ARGS__) +/* + * Some receivers fail to train on first try and are good + * on subsequent tries. 2 retries should be plenty. If we + * don't have a successful training then we don't expect to + * ever get one. + */ +#define LINK_TRAINING_MAX_VERIFY_RETRY 2 + +static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) +{ + enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; + + switch (sink_signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_LVDS: + case SIGNAL_TYPE_RGB: + transaction_type = DDC_TRANSACTION_TYPE_I2C; + break; + + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_EDP: + transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + break; + + case SIGNAL_TYPE_DISPLAY_PORT_MST: + /* MST does not use I2COverAux, but there is the + * SPECIAL use case for "immediate dwnstrm device + * access" (EPR#370830). + */ + transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + break; + + default: + break; + } + + return transaction_type; +} + +static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, + struct graphics_object_id downstream) +{ + if (downstream.type == OBJECT_TYPE_CONNECTOR) { + switch (downstream.id) { + case CONNECTOR_ID_SINGLE_LINK_DVII: + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return SIGNAL_TYPE_RGB; + default: + return SIGNAL_TYPE_DVI_SINGLE_LINK; + } + break; + case CONNECTOR_ID_DUAL_LINK_DVII: + { + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return SIGNAL_TYPE_RGB; + default: + return SIGNAL_TYPE_DVI_DUAL_LINK; + } + } + break; + case CONNECTOR_ID_SINGLE_LINK_DVID: + return SIGNAL_TYPE_DVI_SINGLE_LINK; + case CONNECTOR_ID_DUAL_LINK_DVID: + return SIGNAL_TYPE_DVI_DUAL_LINK; + case CONNECTOR_ID_VGA: + return SIGNAL_TYPE_RGB; + case CONNECTOR_ID_HDMI_TYPE_A: + return SIGNAL_TYPE_HDMI_TYPE_A; + case CONNECTOR_ID_LVDS: + return SIGNAL_TYPE_LVDS; + case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_USBC: + return SIGNAL_TYPE_DISPLAY_PORT; + case CONNECTOR_ID_EDP: + return SIGNAL_TYPE_EDP; + default: + return SIGNAL_TYPE_NONE; + } + } else if (downstream.type == OBJECT_TYPE_ENCODER) { + switch (downstream.id) { + case ENCODER_ID_EXTERNAL_NUTMEG: + case ENCODER_ID_EXTERNAL_TRAVIS: + return SIGNAL_TYPE_DISPLAY_PORT; + default: + return SIGNAL_TYPE_NONE; + } + } + + return SIGNAL_TYPE_NONE; +} + +/* + * @brief + * Detect output sink type + */ +static enum signal_type link_detect_sink_signal_type(struct dc_link *link, + enum dc_detect_reason reason) +{ + enum signal_type result; + struct graphics_object_id enc_id; + + if (link->is_dig_mapping_flexible) + enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; + else + enc_id = link->link_enc->id; + result = get_basic_signal_type(enc_id, link->link_id); + + /* Use basic signal type for link without physical connector. */ + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + return result; + + /* Internal digital encoder will detect only dongles + * that require digital signal + */ + + /* Detection mechanism is different + * for different native connectors. + * LVDS connector supports only LVDS signal; + * PCIE is a bus slot, the actual connector needs to be detected first; + * eDP connector supports only eDP signal; + * HDMI should check straps for audio + */ + + /* PCIE detects the actual connector on add-on board */ + if (link->link_id.id == CONNECTOR_ID_PCIE) { + /* ZAZTODO implement PCIE add-on card detection */ + } + + switch (link->link_id.id) { + case CONNECTOR_ID_HDMI_TYPE_A: { + /* check audio support: + * if native HDMI is not supported, switch to DVI + */ + struct audio_support *aud_support = + &link->dc->res_pool->audio_support; + + if (!aud_support->hdmi_audio_native) + if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) + result = SIGNAL_TYPE_DVI_SINGLE_LINK; + } + break; + case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_USBC: { + /* DP HPD short pulse. Passive DP dongle will not + * have short pulse + */ + if (reason != DETECT_REASON_HPDRX) { + /* Check whether DP signal detected: if not - + * we assume signal is DVI; it could be corrected + * to HDMI after dongle detection + */ + if (!dm_helpers_is_dp_sink_present(link)) + result = SIGNAL_TYPE_DVI_SINGLE_LINK; + } + } + break; + default: + break; + } + + return result; +} + +static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, + struct audio_support *audio_support) +{ + enum signal_type signal = SIGNAL_TYPE_NONE; + + switch (dongle_type) { + case DISPLAY_DONGLE_DP_HDMI_DONGLE: + if (audio_support->hdmi_audio_on_dongle) + signal = SIGNAL_TYPE_HDMI_TYPE_A; + else + signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + case DISPLAY_DONGLE_DP_DVI_DONGLE: + signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: + if (audio_support->hdmi_audio_native) + signal = SIGNAL_TYPE_HDMI_TYPE_A; + else + signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + default: + signal = SIGNAL_TYPE_NONE; + break; + } + + return signal; +} + +static void read_scdc_caps(struct ddc_service *ddc_service, + struct dc_sink *sink) +{ + uint8_t slave_address = HDMI_SCDC_ADDRESS; + uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; + + link_query_ddc_data(ddc_service, slave_address, &offset, + sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, + sizeof(sink->scdc_caps.manufacturer_OUI.byte)); + + offset = HDMI_SCDC_DEVICE_ID; + + link_query_ddc_data(ddc_service, slave_address, &offset, + sizeof(offset), &(sink->scdc_caps.device_id.byte), + sizeof(sink->scdc_caps.device_id.byte)); +} + +static bool i2c_read( + struct ddc_service *ddc, + uint32_t address, + uint8_t *buffer, + uint32_t len) +{ + uint8_t offs_data = 0; + struct i2c_payload payloads[2] = { + { + .write = true, + .address = address, + .length = 1, + .data = &offs_data }, + { + .write = false, + .address = address, + .length = len, + .data = buffer } }; + + struct i2c_command command = { + .payloads = payloads, + .number_of_payloads = 2, + .engine = DDC_I2C_COMMAND_ENGINE, + .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + + return dm_helpers_submit_i2c( + ddc->ctx, + ddc->link, + &command); +} + +enum { + DP_SINK_CAP_SIZE = + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 +}; + +static void query_dp_dual_mode_adaptor( + struct ddc_service *ddc, + struct display_sink_capability *sink_cap) +{ + uint8_t i; + bool is_valid_hdmi_signature; + enum display_dongle_type *dongle = &sink_cap->dongle_type; + uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; + bool is_type2_dongle = false; + int retry_count = 2; + struct dp_hdmi_dongle_signature_data *dongle_signature; + + /* Assume we have no valid DP passive dongle connected */ + *dongle = DISPLAY_DONGLE_NONE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; + + /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ + if (!i2c_read( + ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) { + /* Passive HDMI dongles can sometimes fail here without retrying*/ + while (retry_count > 0) { + if (i2c_read(ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) + break; + retry_count--; + } + if (retry_count == 0) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + return; + } + } + + /* Check if Type 2 dongle.*/ + if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) + is_type2_dongle = true; + + dongle_signature = + (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; + + is_valid_hdmi_signature = true; + + /* Check EOT */ + if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { + is_valid_hdmi_signature = false; + } + + /* Check signature */ + for (i = 0; i < sizeof(dongle_signature->id); ++i) { + /* If its not the right signature, + * skip mismatch in subversion byte.*/ + if (dongle_signature->id[i] != + dp_hdmi_dongle_signature_str[i] && i != 3) { + + if (is_type2_dongle) { + is_valid_hdmi_signature = false; + break; + } + + } + } + + if (is_type2_dongle) { + uint32_t max_tmds_clk = + type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; + + max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; + + if (0 == max_tmds_clk || + max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || + max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + } else { + if (is_valid_hdmi_signature == true) { + *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 2 DP-HDMI passive dongle %dMhz: ", + max_tmds_clk); + } else { + *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", + max_tmds_clk); + + } + + /* Multiply by 1000 to convert to kHz. */ + sink_cap->max_hdmi_pixel_clock = + max_tmds_clk * 1000; + } + sink_cap->is_dongle_type_one = false; + + } else { + if (is_valid_hdmi_signature == true) { + *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 1 DP-HDMI passive dongle %dMhz: ", + sink_cap->max_hdmi_pixel_clock / 1000); + } else { + *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", + sink_cap->max_hdmi_pixel_clock / 1000); + } + sink_cap->is_dongle_type_one = true; + } + + return; +} + +static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, + struct display_sink_capability *sink_cap, + struct audio_support *audio_support) +{ + query_dp_dual_mode_adaptor(ddc, sink_cap); + + return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, + audio_support); +} + +static void link_disconnect_sink(struct dc_link *link) +{ + if (link->local_sink) { + dc_sink_release(link->local_sink); + link->local_sink = NULL; + } + + link->dpcd_sink_count = 0; + //link->dpcd_caps.dpcd_rev.raw = 0; +} + +static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) +{ + dc_sink_release(link->local_sink); + link->local_sink = prev_sink; +} + +#if defined(CONFIG_DRM_AMD_DC_HDCP) +static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) +{ + struct hdcp_protection_message msg22; + struct hdcp_protection_message msg14; + + memset(&msg22, 0, sizeof(struct hdcp_protection_message)); + memset(&msg14, 0, sizeof(struct hdcp_protection_message)); + memset(link->hdcp_caps.rx_caps.raw, 0, + sizeof(link->hdcp_caps.rx_caps.raw)); + + if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->ddc->transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || + link->connector_signal == SIGNAL_TYPE_EDP) { + msg22.data = link->hdcp_caps.rx_caps.raw; + msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); + msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; + } else { + msg22.data = &link->hdcp_caps.rx_caps.fields.version; + msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); + msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; + } + msg22.version = HDCP_VERSION_22; + msg22.link = HDCP_LINK_PRIMARY; + msg22.max_retries = 5; + dc_process_hdcp_msg(signal, link, &msg22); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED; + + msg14.data = &link->hdcp_caps.bcaps.raw; + msg14.length = sizeof(link->hdcp_caps.bcaps.raw); + msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; + msg14.version = HDCP_VERSION_14; + msg14.link = HDCP_LINK_PRIMARY; + msg14.max_retries = 5; + + status = dc_process_hdcp_msg(signal, link, &msg14); + } + +} +#endif // CONFIG_DRM_AMD_DC_HDCP +static void read_current_link_settings_on_detect(struct dc_link *link) +{ + union lane_count_set lane_count_set = {0}; + uint8_t link_bw_set; + uint8_t link_rate_set; + uint32_t read_dpcd_retry_cnt = 10; + enum dc_status status = DC_ERROR_UNEXPECTED; + int i; + union max_down_spread max_down_spread = {0}; + + // Read DPCD 00101h to find out the number of lanes currently set + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd(link, + DP_LANE_COUNT_SET, + &lane_count_set.raw, + sizeof(lane_count_set)); + /* First DPCD read after VDD ON can fail if the particular board + * does not have HPD pin wired correctly. So if DPCD read fails, + * which it should never happen, retry a few times. Target worst + * case scenario of 80 ms. + */ + if (status == DC_OK) { + link->cur_link_settings.lane_count = + lane_count_set.bits.LANE_COUNT_SET; + break; + } + + msleep(8); + } + + // Read DPCD 00100h to find if standard link rates are set + core_link_read_dpcd(link, DP_LINK_BW_SET, + &link_bw_set, sizeof(link_bw_set)); + + if (link_bw_set == 0) { + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the edp link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // edp_supported_link_rates_count = 0 for DP + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + // Link Rate not found. Seamless boot may not work. + ASSERT(false); + } + } else { + link->cur_link_settings.link_rate = link_bw_set; + link->cur_link_settings.use_link_rate_set = false; + } + // Read DPCD 00003h to find the max down spread. + core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, + &max_down_spread.raw, sizeof(max_down_spread)); + link->cur_link_settings.link_spread = + max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; +} + +static bool detect_dp(struct dc_link *link, + struct display_sink_capability *sink_caps, + enum dc_detect_reason reason) +{ + struct audio_support *audio_support = &link->dc->res_pool->audio_support; + + sink_caps->signal = link_detect_sink_signal_type(link, reason); + sink_caps->transaction_type = + get_ddc_transaction_type(sink_caps->signal); + + if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { + sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; + if (!detect_dp_sink_caps(link)) + return false; + + if (is_dp_branch_device(link)) + /* DP SST branch */ + link->type = dc_connection_sst_branch; + } else { + /* DP passive dongles */ + sink_caps->signal = dp_passive_dongle_detection(link->ddc, + sink_caps, + audio_support); + link->dpcd_caps.dongle_type = sink_caps->dongle_type; + link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; + link->dpcd_caps.dpcd_rev.raw = 0; + } + + return true; +} + +static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) +{ + if (old_edid->length != new_edid->length) + return false; + + if (new_edid->length == 0) + return false; + + return (memcmp(old_edid->raw_edid, + new_edid->raw_edid, new_edid->length) == 0); +} + +static bool wait_for_entering_dp_alt_mode(struct dc_link *link) +{ + + /** + * something is terribly wrong if time out is > 200ms. (5Hz) + * 500 microseconds * 400 tries us 200 ms + **/ + unsigned int sleep_time_in_microseconds = 500; + unsigned int tries_allowed = 400; + bool is_in_alt_mode; + unsigned long long enter_timestamp; + unsigned long long finish_timestamp; + unsigned long long time_taken_in_ns; + int tries_taken; + + DC_LOGGER_INIT(link->ctx->logger); + + /** + * this function will only exist if we are on dcn21 (is_in_alt_mode is a + * function pointer, so checking to see if it is equal to 0 is the same + * as checking to see if it is null + **/ + if (!link->link_enc->funcs->is_in_alt_mode) + return true; + + is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); + DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode); + + if (is_in_alt_mode) + return true; + + enter_timestamp = dm_get_timestamp(link->ctx); + + for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) { + udelay(sleep_time_in_microseconds); + /* ask the link if alt mode is enabled, if so return ok */ + if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = + dm_get_elapse_time_in_ns(link->ctx, + finish_timestamp, + enter_timestamp); + DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", + div_u64(time_taken_in_ns, 1000000)); + return true; + } + } + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, + enter_timestamp); + DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", + div_u64(time_taken_in_ns, 1000000)); + return false; +} + +static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +} + +static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; +} + +static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Connected\n", + link->link_index); + + link->type = dc_connection_mst_branch; + apply_dpia_mst_dsc_always_on_wa(link); + + dm_helpers_dp_update_branch_info(link->ctx, link); + if (dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { + link_disconnect_sink(link); + } else { + link->type = dc_connection_sst_branch; + } + + return link->type == dc_connection_mst_branch; +} + +bool link_reset_cur_dp_mst_topology(struct dc_link *link) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Disconnected\n", + link->link_index); + + revert_dpia_mst_dsc_always_on_wa(link); + return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); +} + +static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, + enum dc_detect_reason reason) +{ + int i; + bool can_apply_seamless_boot = false; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; + } + } + + return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; +} + +static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ + dc_z10_restore(dc); + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); +} + +static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); +} + +static void verify_link_capability_destructive(struct dc_link *link, + struct dc_sink *sink, + enum dc_detect_reason reason) +{ + bool should_prepare_phy_clocks = + should_prepare_phy_clocks_for_link_verification(link->dc, reason); + + if (should_prepare_phy_clocks) + prepare_phy_clocks_for_destructive_link_verification(link->dc); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + struct dc_link_settings known_limit_link_setting = + dp_get_max_link_cap(link); + link_set_all_streams_dpms_off_for_link(link); + dp_verify_link_cap_with_retries( + link, &known_limit_link_setting, + LINK_TRAINING_MAX_VERIFY_RETRY); + } else { + ASSERT(0); + } + + if (should_prepare_phy_clocks) + restore_phy_clocks_for_destructive_link_verification(link->dc); +} + +static void verify_link_capability_non_destructive(struct dc_link *link) +{ + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + if (dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* TODO - should we check link encoder's max link caps here? + * How do we know which link encoder to check from? + */ + link->verified_link_cap = link->reported_link_cap; + else + link->verified_link_cap = dp_get_max_link_cap(link); + } +} + +static bool should_verify_link_capability_destructively(struct dc_link *link, + enum dc_detect_reason reason) +{ + bool destrictive = false; + struct dc_link_settings max_link_cap; + bool is_link_enc_unavailable = link->link_enc && + link->dc->res_pool->funcs->link_encs_assign && + !link_enc_cfg_is_link_enc_avail( + link->ctx->dc, + link->link_enc->preferred_engine, + link); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + max_link_cap = dp_get_max_link_cap(link); + destrictive = true; + + if (link->dc->debug.skip_detection_link_training || + dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + destrictive = false; + } else if (link_dp_get_encoding_format(&max_link_cap) == + DP_8b_10b_ENCODING) { + if (link->dpcd_caps.is_mst_capable || + is_link_enc_unavailable) { + destrictive = false; + } + } + } + + return destrictive; +} + +static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, + enum dc_detect_reason reason) +{ + if (should_verify_link_capability_destructively(link, reason)) + verify_link_capability_destructive(link, sink, reason); + else + verify_link_capability_non_destructive(link); +} + +/** + * detect_link_and_local_sink() - Detect if a sink is attached to a given link + * + * link->local_sink is created or destroyed as needed. + * + * This does not create remote sinks. + */ +static bool detect_link_and_local_sink(struct dc_link *link, + enum dc_detect_reason reason) +{ + struct dc_sink_init_data sink_init_data = { 0 }; + struct display_sink_capability sink_caps = { 0 }; + uint32_t i; + bool converter_disable_audio = false; + struct audio_support *aud_support = &link->dc->res_pool->audio_support; + bool same_edid = false; + enum dc_edid_status edid_status; + struct dc_context *dc_ctx = link->ctx; + struct dc *dc = dc_ctx->dc; + struct dc_sink *sink = NULL; + struct dc_sink *prev_sink = NULL; + struct dpcd_caps prev_dpcd_caps; + enum dc_connection_type new_connection_type = dc_connection_none; + const uint32_t post_oui_delay = 30; // 30ms + + DC_LOGGER_INIT(link->ctx->logger); + + if (dc_is_virtual_signal(link->connector_signal)) + return false; + + if (((link->connector_signal == SIGNAL_TYPE_LVDS || + link->connector_signal == SIGNAL_TYPE_EDP) && + (!link->dc->config.allow_edp_hotplug_detection)) && + link->local_sink) { + // need to re-write OUI and brightness in resume case + if (link->connector_signal == SIGNAL_TYPE_EDP && + (link->dpcd_sink_ext_caps.bits.oled == 1)) { + dpcd_set_source_specific_data(link); + msleep(post_oui_delay); + set_default_brightness_aux(link); + //TODO: use cached + } + + return true; + } + + if (!dc_link_detect_connection_type(link, &new_connection_type)) { + BREAK_TO_DEBUGGER(); + return false; + } + + prev_sink = link->local_sink; + if (prev_sink) { + dc_sink_retain(prev_sink); + memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); + } + + link_disconnect_sink(link); + if (new_connection_type != dc_connection_none) { + link->type = new_connection_type; + link->link_state_valid = false; + + /* From Disconnected-to-Connected. */ + switch (link->connector_signal) { + case SIGNAL_TYPE_HDMI_TYPE_A: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + if (aud_support->hdmi_audio_native) + sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; + else + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_SINGLE_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_DUAL_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + } + + case SIGNAL_TYPE_LVDS: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_LVDS; + break; + } + + case SIGNAL_TYPE_EDP: { + detect_edp_sink_caps(link); + read_current_link_settings_on_detect(link); + + /* Disable power sequence on MIPI panel + converter + */ + if (dc->config.enable_mipi_converter_optimization && + dc_ctx->dce_version == DCN_VERSION_3_01 && + link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 && + memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580, + sizeof(link->dpcd_caps.branch_dev_name)) == 0) { + dc->config.edp_no_power_sequencing = true; + + if (!link->dpcd_caps.set_power_state_capable_edp) + link->wa_flags.dp_keep_receiver_powered = true; + } + + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_EDP; + break; + } + + case SIGNAL_TYPE_DISPLAY_PORT: { + + /* wa HPD high coming too early*/ + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { + + /* if alt mode times out, return false */ + if (!wait_for_entering_dp_alt_mode(link)) + return false; + } + + if (!detect_dp(link, &sink_caps, reason)) { + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } + + /* Active SST downstream branch device unplug*/ + if (link->type == dc_connection_sst_branch && + link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { + if (prev_sink) + /* Downstream unplug */ + dc_sink_release(prev_sink); + return true; + } + + /* disable audio for non DP to HDMI active sst converter */ + if (link->type == dc_connection_sst_branch && + is_dp_active_dongle(link) && + (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER)) + converter_disable_audio = true; + break; + } + + default: + DC_ERROR("Invalid connector type! signal:%d\n", + link->connector_signal); + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } /* switch() */ + + if (link->dpcd_caps.sink_count.bits.SINK_COUNT) + link->dpcd_sink_count = + link->dpcd_caps.sink_count.bits.SINK_COUNT; + else + link->dpcd_sink_count = 1; + + set_ddc_transaction_type(link->ddc, + sink_caps.transaction_type); + + link->aux_mode = + link_is_in_aux_transaction_mode(link->ddc); + + sink_init_data.link = link; + sink_init_data.sink_signal = sink_caps.signal; + + sink = dc_sink_create(&sink_init_data); + if (!sink) { + DC_ERROR("Failed to create sink!\n"); + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } + + sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; + sink->converter_disable_audio = converter_disable_audio; + + /* dc_sink_create returns a new reference */ + link->local_sink = sink; + + edid_status = dm_helpers_read_local_edid(link->ctx, + link, sink); + + switch (edid_status) { + case EDID_BAD_CHECKSUM: + DC_LOG_ERROR("EDID checksum invalid.\n"); + break; + case EDID_PARTIAL_VALID: + DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); + break; + case EDID_NO_RESPONSE: + DC_LOG_ERROR("No EDID read.\n"); + /* + * Abort detection for non-DP connectors if we have + * no EDID + * + * DP needs to report as connected if HDP is high + * even if we have no EDID in order to go to + * fail-safe mode + */ + if (dc_is_hdmi_signal(link->connector_signal) || + dc_is_dvi_signal(link->connector_signal)) { + if (prev_sink) + dc_sink_release(prev_sink); + + return false; + } + + if (link->type == dc_connection_sst_branch && + link->dpcd_caps.dongle_type == + DISPLAY_DONGLE_DP_VGA_CONVERTER && + reason == DETECT_REASON_HPDRX) { + /* Abort detection for DP-VGA adapters when EDID + * can't be read and detection reason is VGA-side + * hotplug + */ + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); + + return true; + } + + break; + default: + break; + } + + // Check if edid is the same + if ((prev_sink) && + (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) + same_edid = is_same_edid(&prev_sink->dc_edid, + &sink->dc_edid); + + if (sink->edid_caps.panel_patch.skip_scdc_overwrite) + link->ctx->dc->debug.hdmi20_disable = true; + + if (dc_is_hdmi_signal(link->connector_signal)) + read_scdc_caps(link->ddc, link->local_sink); + + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + sink_caps.transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { + /* + * TODO debug why certain monitors don't like + * two link trainings + */ +#if defined(CONFIG_DRM_AMD_DC_HDCP) + query_hdcp_capability(sink->sink_signal, link); +#endif + } else { + // If edid is the same, then discard new sink and revert back to original sink + if (same_edid) { + link_disconnect_remap(prev_sink, link); + sink = prev_sink; + prev_sink = NULL; + } +#if defined(CONFIG_DRM_AMD_DC_HDCP) + query_hdcp_capability(sink->sink_signal, link); +#endif + } + + /* HDMI-DVI Dongle */ + if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && + !sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + + if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) + dp_trace_init(link); + + /* Connectivity log: detection */ + for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { + CONN_DATA_DETECT(link, + &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], + DC_EDID_BLOCK_SIZE, + "%s: [Block %d] ", sink->edid_caps.display_name, i); + } + + DC_LOG_DETECTION_EDID_PARSER("%s: " + "manufacturer_id = %X, " + "product_id = %X, " + "serial_number = %X, " + "manufacture_week = %d, " + "manufacture_year = %d, " + "display_name = %s, " + "speaker_flag = %d, " + "audio_mode_count = %d\n", + __func__, + sink->edid_caps.manufacturer_id, + sink->edid_caps.product_id, + sink->edid_caps.serial_number, + sink->edid_caps.manufacture_week, + sink->edid_caps.manufacture_year, + sink->edid_caps.display_name, + sink->edid_caps.speaker_flags, + sink->edid_caps.audio_mode_count); + + for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { + DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, " + "format_code = %d, " + "channel_count = %d, " + "sample_rate = %d, " + "sample_size = %d\n", + __func__, + i, + sink->edid_caps.audio_modes[i].format_code, + sink->edid_caps.audio_modes[i].channel_count, + sink->edid_caps.audio_modes[i].sample_rate, + sink->edid_caps.audio_modes[i].sample_size); + } + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + // Init dc_panel_config by HW config + if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults) + dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config); + // Pickup base DM settings + dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); + // Override dc_panel_config if system has specific settings + dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); + } + + } else { + /* From Connected-to-Disconnected. */ + link->type = dc_connection_none; + sink_caps.signal = SIGNAL_TYPE_NONE; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps)); +#endif + /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk + * is not cleared. If we emulate a DP signal on this connection, it thinks + * the dongle is still there and limits the number of modes we can emulate. + * Clear dongle_max_pix_clk on disconnect to fix this + */ + link->dongle_max_pix_clk = 0; + + dc_link_clear_dprx_states(link); + dp_trace_reset(link); + } + + LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", + link->link_index, sink, + (sink_caps.signal == + SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), + prev_sink, same_edid); + + if (prev_sink) + dc_sink_release(prev_sink); + + return true; +} + +/** + * dc_link_detect_connection_type() - Determine if there is a sink connected + * + * @type: Returned connection type + * Does not detect downstream devices, such as MST sinks + * or display connected through active dongles + */ +bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type) +{ + uint32_t is_hpd_high = 0; + + if (link->connector_signal == SIGNAL_TYPE_LVDS) { + *type = dc_connection_single; + return true; + } + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /*in case it is not on*/ + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + + /* Link may not have physical HPD pin. */ + if (link->ep_type != DISPLAY_ENDPOINT_PHY) { + if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link)) + *type = dc_connection_none; + else + *type = dc_connection_single; + + return true; + } + + + if (!query_hpd_status(link, &is_hpd_high)) + goto hpd_gpio_failure; + + if (is_hpd_high) { + *type = dc_connection_single; + /* TODO: need to do the actual detection */ + } else { + *type = dc_connection_none; + } + + return true; + +hpd_gpio_failure: + return false; +} + +bool link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + bool is_local_sink_detect_success; + bool is_delegated_to_mst_top_mgr = false; + enum dc_connection_type pre_link_type = link->type; + + is_local_sink_detect_success = detect_link_and_local_sink(link, reason); + + if (is_local_sink_detect_success && link->local_sink) + verify_link_capability(link, link->local_sink, reason); + + if (is_local_sink_detect_success && link->local_sink && + dc_is_dp_signal(link->local_sink->sink_signal) && + link->dpcd_caps.is_mst_capable) + is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); + + if (is_local_sink_detect_success && + pre_link_type == dc_connection_mst_branch && + link->type != dc_connection_mst_branch) + is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link); + + return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; +} + +void link_clear_dprx_states(struct dc_link *link) +{ + memset(&link->dprx_states, 0, sizeof(link->dprx_states)); +} +#if defined(CONFIG_DRM_AMD_DC_HDCP) + +bool link_is_hdcp14(struct dc_link *link, enum signal_type signal) +{ + bool ret = false; + + switch (signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, + * we can poll for bksv but some displays have an issue with this. Since its so rare + * for a display to not be 1.4 capable, this assumtion is ok + */ + ret = true; + break; + default: + break; + } + return ret; +} + +bool link_is_hdcp22(struct dc_link *link, enum signal_type signal) +{ + bool ret = false; + + switch (signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && + link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && + (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; + break; + default: + break; + } + + return ret; +} +#endif // CONFIG_DRM_AMD_DC_HDCP + +const struct dc_link_status *link_get_status(const struct dc_link *link) +{ + return &link->link_status; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h new file mode 100644 index 00000000000000..1831636516fb9e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DETECTION_H__ +#define __DC_LINK_DETECTION_H__ +#include "link.h" + +#endif /* __DC_LINK_DETECTION_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c new file mode 100644 index 00000000000000..257e1c3ba00abc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -0,0 +1,2528 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns the programming sequence of stream's dpms state associated + * with the link and link's enable/disable sequences as result of the stream's + * dpms state change. + * + * TODO - The reason link owns stream's dpms programming sequence is + * because dpms programming sequence is highly dependent on underlying signal + * specific link protocols. This unfortunately causes link to own a portion of + * stream state programming sequence. This creates a gray area where the + * boundary between link and stream is not clearly defined. + */ + +#include "link_dpms.h" +#include "link_hwss.h" +#include "accessories/link_fpga.h" +#include "accessories/link_dp_trace.h" +#include "protocols/link_dpcd.h" +#include "protocols/link_ddc.h" +#include "protocols/link_hpd.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_training.h" +#include "protocols/link_edp_panel_control.h" + +#include "dm_helpers.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dsc.h" +#include "dccg.h" +#include "clk_mgr.h" +#include "atomfirmware.h" +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ + DC_LOG_HW_HOTPLUG( \ + __VA_ARGS__) + +#define RETIMER_REDRIVER_INFO(...) \ + DC_LOG_RETIMER_REDRIVER( \ + __VA_ARGS__) +#include "dc/dcn30/dcn30_vpg.h" + +#define MAX_MTP_SLOT_COUNT 64 +#define LINK_TRAINING_ATTEMPTS 4 +#define PEAK_FACTOR_X1000 1006 + +void link_blank_all_dp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || + (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) + continue; + + /* DP 2.0 spec requires that we read LTTPR caps first */ + dp_retrieve_lttpr_cap(dc->links[i]); + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + link_blank_dp_stream(dc->links[i], true); + } + +} + +void link_blank_all_edp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) || + (!dc->links[i]->edp_sink_present)) + continue; + + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + link_blank_dp_stream(dc->links[i], true); + } +} + +void link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ + unsigned int j; + struct dc *dc = link->ctx->dc; + enum signal_type signal = link->connector_signal; + + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->funcs->get_dig_frontend && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) { + unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + + if (fe != ENGINE_ID_UNKNOWN) + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(link, + dc->res_pool->stream_enc[j]); + break; + } + } + } + + if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + dc_link_dp_receiver_power_ctrl(link, false); + } +} + +void link_set_all_streams_dpms_off_for_link(struct dc_link *link) +{ + struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; + struct dc_stream_update stream_update; + bool dpms_off = true; + struct link_resource link_res = {0}; + + memset(&stream_update, 0, sizeof(stream_update)); + stream_update.dpms_off = &dpms_off; + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + + for (i = 0; i < count; i++) { + stream_update.stream = pipes[i]->stream; + dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, + pipes[i]->stream, &stream_update, + state); + } + + /* link can be also enabled by vbios. In this case it is not recorded + * in pipe_ctx. Disable link phy here to make sure it is completely off + */ + dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +void link_resume(struct dc_link *link) +{ + if (link->connector_signal != SIGNAL_TYPE_VIRTUAL) + program_hpd_filter(link); +} + +/* This function returns true if the pipe is used to feed video signal directly + * to the link. + */ +static bool is_master_pipe_for_link(const struct dc_link *link, + const struct pipe_ctx *pipe) +{ + return (pipe->stream && + pipe->stream->link && + pipe->stream->link == link && + pipe->top_pipe == NULL && + pipe->prev_odm_pipe == NULL); +} + +/* + * This function finds all master pipes feeding to a given link with dpms set to + * on in given dc state. + */ +void link_get_master_pipes_with_dpms_on(const struct dc_link *link, + struct dc_state *state, + uint8_t *count, + struct pipe_ctx *pipes[MAX_PIPES]) +{ + int i; + struct pipe_ctx *pipe = NULL; + + *count = 0; + for (i = 0; i < MAX_PIPES; i++) { + pipe = &state->res_ctx.pipe_ctx[i]; + + if (is_master_pipe_for_link(link, pipe) && + pipe->stream->dpms_off == false) { + pipes[(*count)++] = pipe; + } + } +} + +static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, + enum engine_id eng_id, + struct ext_hdmi_settings *settings) +{ + bool result = false; + int i = 0; + struct integrated_info *integrated_info = + pipe_ctx->stream->ctx->dc_bios->integrated_info; + + if (integrated_info == NULL) + return false; + + /* + * Get retimer settings from sbios for passing SI eye test for DCE11 + * The setting values are varied based on board revision and port id + * Therefore the setting values of each ports is passed by sbios. + */ + + // Check if current bios contains ext Hdmi settings + if (integrated_info->gpu_cap_info & 0x20) { + switch (eng_id) { + case ENGINE_ID_DIGA: + settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp0_ext_hdmi_reg_settings, + sizeof(integrated_info->dp0_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp0_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings)); + result = true; + break; + case ENGINE_ID_DIGB: + settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp1_ext_hdmi_reg_settings, + sizeof(integrated_info->dp1_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp1_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings)); + result = true; + break; + case ENGINE_ID_DIGC: + settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp2_ext_hdmi_reg_settings, + sizeof(integrated_info->dp2_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp2_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings)); + result = true; + break; + case ENGINE_ID_DIGD: + settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp3_ext_hdmi_reg_settings, + sizeof(integrated_info->dp3_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp3_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings)); + result = true; + break; + default: + break; + } + + if (result == true) { + // Validate settings from bios integrated info table + if (settings->slv_addr == 0) + return false; + if (settings->reg_num > 9) + return false; + if (settings->reg_num_6g > 3) + return false; + + for (i = 0; i < settings->reg_num; i++) { + if (settings->reg_settings[i].i2c_reg_index > 0x20) + return false; + } + + for (i = 0; i < settings->reg_num_6g; i++) { + if (settings->reg_settings_6g[i].i2c_reg_index > 0x20) + return false; + } + } + } + + return result; +} + +static bool write_i2c(struct pipe_ctx *pipe_ctx, + uint8_t address, uint8_t *buffer, uint32_t length) +{ + struct i2c_command cmd = {0}; + struct i2c_payload payload = {0}; + + memset(&payload, 0, sizeof(payload)); + memset(&cmd, 0, sizeof(cmd)); + + cmd.number_of_payloads = 1; + cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; + cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz; + + payload.address = address; + payload.data = buffer; + payload.length = length; + payload.write = true; + cmd.payloads = &payload; + + if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, + pipe_ctx->stream->link, &cmd)) + return true; + + return false; +} + +static void write_i2c_retimer_setting( + struct pipe_ctx *pipe_ctx, + bool is_vga_mode, + bool is_over_340mhz, + struct ext_hdmi_settings *settings) +{ + uint8_t slave_address = (settings->slv_addr >> 1); + uint8_t buffer[2]; + const uint8_t apply_rx_tx_change = 0x4; + uint8_t offset = 0xA; + uint8_t value = 0; + int i = 0; + bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + memset(&buffer, 0, sizeof(buffer)); + + /* Start Ext-Hdmi programming*/ + + for (i = 0; i < settings->reg_num; i++) { + /* Apply 3G settings */ + if (settings->reg_settings[i].i2c_reg_index <= 0x20) { + + buffer[0] = settings->reg_settings[i].i2c_reg_index; + buffer[1] = settings->reg_settings[i].i2c_reg_val; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + + if (!i2c_success) + goto i2c_write_fail; + + /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A + * needs to be set to 1 on every 0xA-0xC write. + */ + if (settings->reg_settings[i].i2c_reg_index == 0xA || + settings->reg_settings[i].i2c_reg_index == 0xB || + settings->reg_settings[i].i2c_reg_index == 0xC) { + + /* Query current value from offset 0xA */ + if (settings->reg_settings[i].i2c_reg_index == 0xA) + value = settings->reg_settings[i].i2c_reg_val; + else { + i2c_success = + link_query_ddc_data( + pipe_ctx->stream->link->ddc, + slave_address, &offset, 1, &value, 1); + if (!i2c_success) + goto i2c_write_fail; + } + + buffer[0] = offset; + /* Set APPLY_RX_TX_CHANGE bit to 1 */ + buffer[1] = value | apply_rx_tx_change; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + } + } + } + + /* Apply 3G settings */ + if (is_over_340mhz) { + for (i = 0; i < settings->reg_num_6g; i++) { + /* Apply 3G settings */ + if (settings->reg_settings[i].i2c_reg_index <= 0x20) { + + buffer[0] = settings->reg_settings_6g[i].i2c_reg_index; + buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + + if (!i2c_success) + goto i2c_write_fail; + + /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A + * needs to be set to 1 on every 0xA-0xC write. + */ + if (settings->reg_settings_6g[i].i2c_reg_index == 0xA || + settings->reg_settings_6g[i].i2c_reg_index == 0xB || + settings->reg_settings_6g[i].i2c_reg_index == 0xC) { + + /* Query current value from offset 0xA */ + if (settings->reg_settings_6g[i].i2c_reg_index == 0xA) + value = settings->reg_settings_6g[i].i2c_reg_val; + else { + i2c_success = + link_query_ddc_data( + pipe_ctx->stream->link->ddc, + slave_address, &offset, 1, &value, 1); + if (!i2c_success) + goto i2c_write_fail; + } + + buffer[0] = offset; + /* Set APPLY_RX_TX_CHANGE bit to 1 */ + buffer[1] = value | apply_rx_tx_change; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + } + } + } + } + + if (is_vga_mode) { + /* Program additional settings if using 640x480 resolution */ + + /* Write offset 0xFF to 0x01 */ + buffer[0] = 0xff; + buffer[1] = 0x01; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x00 to 0x23 */ + buffer[0] = 0x00; + buffer[1] = 0x23; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0xff to 0x00 */ + buffer[0] = 0xff; + buffer[1] = 0x00; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); +} + +static void write_i2c_default_retimer_setting( + struct pipe_ctx *pipe_ctx, + bool is_vga_mode, + bool is_over_340mhz) +{ + uint8_t slave_address = (0xBA >> 1); + uint8_t buffer[2]; + bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + memset(&buffer, 0, sizeof(buffer)); + + /* Program Slave Address for tuning single integrity */ + /* Write offset 0x0A to 0x13 */ + buffer[0] = 0x0A; + buffer[1] = 0x13; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0A to 0x17 */ + buffer[0] = 0x0A; + buffer[1] = 0x17; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0B to 0xDA or 0xD8 */ + buffer[0] = 0x0B; + buffer[1] = is_over_340mhz ? 0xDA : 0xD8; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0A to 0x17 */ + buffer[0] = 0x0A; + buffer[1] = 0x17; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0C to 0x1D or 0x91 */ + buffer[0] = 0x0C; + buffer[1] = is_over_340mhz ? 0x1D : 0x91; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0A to 0x17 */ + buffer[0] = 0x0A; + buffer[1] = 0x17; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + + if (is_vga_mode) { + /* Program additional settings if using 640x480 resolution */ + + /* Write offset 0xFF to 0x01 */ + buffer[0] = 0xff; + buffer[1] = 0x01; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x00 to 0x23 */ + buffer[0] = 0x00; + buffer[1] = 0x23; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0xff to 0x00 */ + buffer[0] = 0xff; + buffer[1] = 0x00; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); +} + +static void write_i2c_redriver_setting( + struct pipe_ctx *pipe_ctx, + bool is_over_340mhz) +{ + uint8_t slave_address = (0xF0 >> 1); + uint8_t buffer[16]; + bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + memset(&buffer, 0, sizeof(buffer)); + + // Program Slave Address for tuning single integrity + buffer[3] = 0x4E; + buffer[4] = 0x4E; + buffer[5] = 0x4E; + buffer[6] = is_over_340mhz ? 0x4E : 0x4A; + + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ + \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ + offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ + i2c_success = %d\n", + slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); + + if (!i2c_success) + DC_LOG_DEBUG("Set redriver failed"); +} +#if defined(CONFIG_DRM_AMD_DC_HDCP) + +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + struct link_encoder *link_enc = NULL; + struct cp_psp_stream_config config = {0}; + enum dp_panel_mode panel_mode = + dp_get_panel_mode(pipe_ctx->stream->link); + + if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) + return; + + link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + ASSERT(link_enc); + if (link_enc == NULL) + return; + + /* otg instance */ + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + + /* dig front end */ + config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; + + /* stream encoder index */ + config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; + if (link_is_dp_128b_132b_signal(pipe_ctx)) + config.stream_enc_idx = + pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; + + /* dig back end */ + config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; + + /* link encoder index */ + config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (link_is_dp_128b_132b_signal(pipe_ctx)) + config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; + + /* dio output index is dpia index for DPIA endpoint & dcio index by default */ + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; + else + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + + /* phy index */ + config.phy_idx = resource_transmitter_to_phy_idx( + pipe_ctx->stream->link->dc, link_enc->transmitter); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ + config.phy_idx = 0; + + /* stream properties */ + config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; + config.mst_enabled = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; + config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; + config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? + 1 : 0; + config.dpms_off = dpms_off; + + /* dm stream context */ + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); +} +#endif + +static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + + if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) + return; + + dc->hwss.set_avmute(pipe_ctx, enable); +} + +static void enable_mst_on_sink(struct dc_link *link, bool enable) +{ + unsigned char mstmCntl; + + core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); + if (enable) + mstmCntl |= DP_MST_EN; + else + mstmCntl &= (~DP_MST_EN); + + core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); +} + +static void dsc_optc_config_log(struct display_stream_compressor *dsc, + struct dsc_optc_config *config) +{ + uint32_t precision = 1 << 28; + uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; + uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; + uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; + DC_LOGGER_INIT(dsc->ctx->logger); + + /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC + * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is + * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal + */ + ll_bytes_per_pix_fraq *= 10000000; + ll_bytes_per_pix_fraq /= precision; + + DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", + config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); + DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); + DC_LOG_DSC("\tslice_width %d", config->slice_width); +} + +static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + bool result = false; + + if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + result = true; + else + result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); + return result; +} + +/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, + * i.e. after dp_enable_dsc_on_rx() had been called + */ +void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + DC_LOGGER_INIT(dsc->ctx->logger); + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + if (enable) { + struct dsc_config dsc_cfg; + struct dsc_optc_config dsc_optc_cfg; + enum optc_dsc_mode optc_dsc_mode; + + /* Enable DSC hw block */ + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); + dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; + + odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); + odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); + } + dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; + dsc_cfg.pic_width *= opp_cnt; + + optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + + /* Enable DSC in encoder */ + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) + && !link_is_dp_128b_132b_signal(pipe_ctx)) { + DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); + dsc_optc_config_log(dsc, &dsc_optc_cfg); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, + optc_dsc_mode, + dsc_optc_cfg.bytes_per_pixel, + dsc_optc_cfg.slice_width); + + /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ + } + + /* Enable DSC in OPTC */ + DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); + dsc_optc_config_log(dsc, &dsc_optc_cfg); + pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, + optc_dsc_mode, + dsc_optc_cfg.bytes_per_pixel, + dsc_optc_cfg.slice_width); + } else { + /* disable DSC in OPTC */ + pipe_ctx->stream_res.tg->funcs->set_dsc_config( + pipe_ctx->stream_res.tg, + OPTC_DSC_DISABLED, 0, 0); + + /* disable DSC in stream encoder */ + if (dc_is_dp_signal(stream->signal)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( + pipe_ctx->stream_res.stream_enc, + OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } + } + + /* disable DSC block */ + pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); + } +} + +/* + * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; + * hence PPS info packet update need to use frame update instead of immediate update. + * Added parameter immediate_update for this purpose. + * The decision to use frame update is hard-coded in function dp_update_dsc_config(), + * which is the only place where a "false" would be passed in for param immediate_update. + * + * immediate_update is only applicable when DSC is enabled. + */ +bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + DC_LOGGER_INIT(dsc->ctx->logger); + + if (!pipe_ctx->stream->timing.flags.DSC || !dsc) + return false; + + if (enable) { + struct dsc_config dsc_cfg; + uint8_t dsc_packed_pps[128]; + + memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + + /* Enable DSC hw block */ + dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + + dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); + memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); + if (dc_is_dp_signal(stream->signal)) { + DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); + if (link_is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + } + } else { + /* disable DSC PPS in stream encoder */ + memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); + if (dc_is_dp_signal(stream->signal)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } + } + + return true; +} + +bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + bool result = false; + + if (!pipe_ctx->stream->timing.flags.DSC) + goto out; + if (!dsc) + goto out; + + if (enable) { + { + link_set_dsc_on_stream(pipe_ctx, true); + result = true; + } + } else { + dp_set_dsc_on_rx(pipe_ctx, false); + link_set_dsc_on_stream(pipe_ctx, false); + result = true; + } +out: + return result; +} + +bool link_update_dsc_config(struct pipe_ctx *pipe_ctx) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + + if (!pipe_ctx->stream->timing.flags.DSC) + return false; + if (!dsc) + return false; + + link_set_dsc_on_stream(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, false); + return true; +} + +static void enable_stream_features(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + + if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { + struct dc_link *link = stream->link; + union down_spread_ctrl old_downspread; + union down_spread_ctrl new_downspread; + + memset(&old_downspread, 0, sizeof(old_downspread)); + + core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, + &old_downspread.raw, sizeof(old_downspread)); + + new_downspread.raw = old_downspread.raw; + + new_downspread.bits.IGNORE_MSA_TIMING_PARAM = + (stream->ignore_msa_timing_param) ? 1 : 0; + + if (new_downspread.raw != old_downspread.raw) { + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &new_downspread.raw, sizeof(new_downspread)); + } + + } else { + dm_helpers_mst_enable_stream_features(stream); + } +} + +static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) +{ + const uint32_t VCP_Y_PRECISION = 1000; + uint64_t vcp_x, vcp_y; + DC_LOGGER_INIT(link->ctx->logger); + + // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision + avg_time_slots_per_mtp = dc_fixpt_add( + avg_time_slots_per_mtp, + dc_fixpt_from_fraction( + 1, + 2*VCP_Y_PRECISION)); + + vcp_x = dc_fixpt_floor( + avg_time_slots_per_mtp); + vcp_y = dc_fixpt_floor( + dc_fixpt_mul_int( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + dc_fixpt_floor( + avg_time_slots_per_mtp)), + VCP_Y_PRECISION)); + + + if (link->type == dc_connection_mst_branch) + DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " + "X: %llu " + "Y: %llu/%d", + vcp_x, + vcp_y, + VCP_Y_PRECISION); + else + DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " + "X: %llu " + "Y: %llu/%d", + vcp_x, + vcp_y, + VCP_Y_PRECISION); +} + +static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) +{ + struct fixed31_32 mbytes_per_sec; + uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, + &stream->link->cur_link_settings); + link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ + + mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); + + return dc_fixpt_div_int(mbytes_per_sec, 54); +} + +static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) +{ + struct fixed31_32 peak_kbps; + uint32_t numerator = 0; + uint32_t denominator = 1; + + /* + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on + * common multiplier to render an integer PBN for all link rate/lane + * counts combinations + * calculate + * peak_kbps *= (1006/1000) + * peak_kbps *= (64/54) + * peak_kbps *= 8 convert to bytes + */ + + numerator = 64 * PEAK_FACTOR_X1000; + denominator = 54 * 8 * 1000 * 1000; + kbps *= numerator; + peak_kbps = dc_fixpt_from_fraction(kbps, denominator); + + return peak_kbps; +} + +static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) +{ + uint64_t kbps; + + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); + return get_pbn_from_bw_in_kbps(kbps); +} + + +// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) +static void get_lane_status( + struct dc_link *link, + uint32_t lane_count, + union lane_status *status, + union lane_align_status_updated *status_updated) +{ + unsigned int lane; + uint8_t dpcd_buf[3] = {0}; + + if (status == NULL || status_updated == NULL) { + return; + } + + core_link_read_dpcd( + link, + DP_LANE0_1_STATUS, + dpcd_buf, + sizeof(dpcd_buf)); + + for (lane = 0; lane < lane_count; lane++) { + status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane); + } + + status_updated->raw = dpcd_buf[2]; +} + +static bool poll_for_allocation_change_trigger(struct dc_link *link) +{ + /* + * wait for ACT handled + */ + int i; + const int act_retries = 30; + enum act_return_status result = ACT_FAILED; + union payload_table_update_status update_status = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated lane_status_updated; + DC_LOGGER_INIT(link->ctx->logger); + + if (link->aux_access_disabled) + return true; + for (i = 0; i < act_retries; i++) { + get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); + + if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(lane_status_updated)) { + DC_LOG_ERROR("SST Update Payload: Link loss occurred while " + "polling for ACT handled."); + result = ACT_LINK_LOST; + break; + } + core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + if (update_status.bits.ACT_HANDLED == 1) { + DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); + result = ACT_SUCCESS; + break; + } + + msleep(5); + } + + if (result == ACT_FAILED) { + DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " + "continue on. Something is wrong with the branch."); + } + + return (result == ACT_SUCCESS); +} + +static void update_mst_stream_alloc_table( + struct dc_link *link, + struct stream_encoder *stream_enc, + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? + const struct dc_dp_mst_stream_allocation_table *proposed_table) +{ + struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; + struct link_mst_stream_allocation *dc_alloc; + + int i; + int j; + + /* if DRM proposed_table has more than one new payload */ + ASSERT(proposed_table->stream_count - + link->mst_stream_alloc_table.stream_count < 2); + + /* copy proposed_table to link, add stream encoder */ + for (i = 0; i < proposed_table->stream_count; i++) { + + for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) { + dc_alloc = + &link->mst_stream_alloc_table.stream_allocations[j]; + + if (dc_alloc->vcp_id == + proposed_table->stream_allocations[i].vcp_id) { + + work_table[i] = *dc_alloc; + work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; + break; /* exit j loop */ + } + } + + /* new vcp_id */ + if (j == link->mst_stream_alloc_table.stream_count) { + work_table[i].vcp_id = + proposed_table->stream_allocations[i].vcp_id; + work_table[i].slot_count = + proposed_table->stream_allocations[i].slot_count; + work_table[i].stream_enc = stream_enc; + work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; + } + } + + /* update link->mst_stream_alloc_table with work_table */ + link->mst_stream_alloc_table.stream_count = + proposed_table->stream_count; + for (i = 0; i < MAX_CONTROLLER_NUM; i++) + link->mst_stream_alloc_table.stream_allocations[i] = + work_table[i]; +} + +static void remove_stream_from_alloc_table( + struct dc_link *link, + struct stream_encoder *dio_stream_enc, + struct hpo_dp_stream_encoder *hpo_dp_stream_enc) +{ + int i = 0; + struct link_mst_stream_allocation_table *table = + &link->mst_stream_alloc_table; + + if (hpo_dp_stream_enc) { + for (; i < table->stream_count; i++) + if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc) + break; + } else { + for (; i < table->stream_count; i++) + if (dio_stream_enc == table->stream_allocations[i].stream_enc) + break; + } + + if (i < table->stream_count) { + i++; + for (; i < table->stream_count; i++) + table->stream_allocations[i-1] = table->stream_allocations[i]; + memset(&table->stream_allocations[table->stream_count-1], 0, + sizeof(struct link_mst_stream_allocation)); + table->stream_count--; + } +} + +static enum dc_status deallocate_mst_payload_with_temp_drm_wa( + struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); + int i; + bool mst_mode = (link->type == dc_connection_mst_branch); + /* adjust for drm changes*/ + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + const struct dc_link_settings empty_link_settings = {0}; + DC_LOGGER_INIT(link->ctx->logger); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); + + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + false)) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + else + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + + DC_LOG_MST("%s" + "stream_count: %d: ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_DEBUG("Unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + if (mst_mode) { + dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + } + + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + false); + + return DC_OK; +} + +static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); + int i; + bool mst_mode = (link->type == dc_connection_mst_branch); + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + const struct dc_link_settings empty_link_settings = {0}; + DC_LOGGER_INIT(link->ctx->logger); + + if (link->dc->debug.temp_mst_deallocation_sequence) + return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); + + /* deallocate_mst_payload is called before disable link. When mode or + * disable/enable monitor, new stream is created which is not in link + * stream[] yet. For this, payload is not allocated yet, so de-alloc + * should not done. For new mode set, map_resources will get engine + * for new stream, so stream_enc->id should be validated until here. + */ + + /* slot X.Y */ + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); + + if (mst_mode) { + /* when link is in mst mode, reply on mst manager to remove + * payload + */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + false)) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + else + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + } else { + /* when link is no longer in mst mode (mst hub unplugged), + * remove payload with default dc logic + */ + remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc); + } + + DC_LOG_MST("%s" + "stream_count: %d: ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + /* update mst stream allocation table hardware state */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_DEBUG("Unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + if (mst_mode) { + dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + false); + } + + return DC_OK; +} + +/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table + * because stream_encoder is not exposed to dm + */ +static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + int i; + enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* enable_link_dp_mst already check link->enabled_stream_count + * and stream is in link->stream[]. This is called during set mode, + * stream_enc is available. + */ + + /* get calculate VC payload for stream: stream_alloc */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + else + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* program DP source TX for payload */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, + &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + /* send down message */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + if (ret != ACT_LINK_LOST) { + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } + + /* slot X.Y for only current stream */ + pbn_per_slot = get_pbn_per_slot(stream); + if (pbn_per_slot.value == 0) { + DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n"); + return DC_UNSUPPORTED_VALUE; + } + pbn = get_pbn_from_timing(pipe_ctx); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + + return DC_OK; +} + +struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link) +{ + struct fixed31_32 link_bw_effective = + dc_fixpt_from_int( + dc_link_bandwidth_kbps(link, &link->cur_link_settings)); + struct fixed31_32 timeslot_bw_effective = + dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); + struct fixed31_32 timing_bw = + dc_fixpt_from_int( + dc_bandwidth_in_kbps_from_timing(&stream->timing)); + struct fixed31_32 avg_time_slots_per_mtp = + dc_fixpt_div(timing_bw, timeslot_bw_effective); + + return avg_time_slots_per_mtp; +} + + +static bool write_128b_132b_sst_payload_allocation_table( + const struct dc_stream_state *stream, + struct dc_link *link, + struct link_mst_stream_allocation_table *proposed_table, + bool allocate) +{ + const uint8_t vc_id = 1; /// VC ID always 1 for SST + const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST + bool result = false; + uint8_t req_slot_count = 0; + struct fixed31_32 avg_time_slots_per_mtp = { 0 }; + union payload_table_update_status update_status = { 0 }; + const uint32_t max_retries = 30; + uint32_t retries = 0; + DC_LOGGER_INIT(link->ctx->logger); + + if (allocate) { + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + /// Validation should filter out modes that exceed link BW + ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); + if (req_slot_count > MAX_MTP_SLOT_COUNT) + return false; + } else { + /// Leave req_slot_count = 0 if allocate is false. + } + + proposed_table->stream_count = 1; /// Always 1 stream for SST + proposed_table->stream_allocations[0].slot_count = req_slot_count; + proposed_table->stream_allocations[0].vcp_id = vc_id; + + if (link->aux_access_disabled) + return true; + + /// Write DPCD 2C0 = 1 to start updating + update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; + core_link_write_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + /// Program the changes in DPCD 1C0 - 1C2 + ASSERT(vc_id == 1); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_SET, + &vc_id, + 1); + + ASSERT(start_time_slot == 0); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, + &start_time_slot, + 1); + + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, + &req_slot_count, + 1); + + /// Poll till DPCD 2C0 read 1 + /// Try for at least 150ms (30 retries, with 5ms delay after each attempt) + + while (retries < max_retries) { + if (core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1) == DC_OK) { + if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { + DC_LOG_DP2("SST Update Payload: downstream payload table updated."); + result = true; + break; + } + } else { + union dpcd_rev dpcdRev; + + if (core_link_read_dpcd( + link, + DP_DPCD_REV, + &dpcdRev.raw, + 1) != DC_OK) { + DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " + "of sink while polling payload table " + "updated status bit."); + break; + } + } + retries++; + msleep(5); + } + + if (!result && retries == max_retries) { + DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " + "continue on. Something is wrong with the branch."); + // TODO - DP2.0 Payload: Read and log the payload table from downstream branch + } + + return result; +} + +/* + * Payload allocation/deallocation for SST introduced in DP2.0 + */ +static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx, + bool allocate) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + const struct dc_link_settings empty_link_settings = {0}; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* slot X.Y for SST payload deallocate */ + if (!allocate) { + avg_time_slots_per_mtp = dc_fixpt_from_int(0); + + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, + avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); + } + + /* calculate VC payload and update branch with new payload allocation table*/ + if (!write_128b_132b_sst_payload_allocation_table( + stream, + link, + &proposed_table, + allocate)) { + DC_LOG_ERROR("SST Update Payload: Failed to update " + "allocation table for " + "pipe idx: %d\n", + pipe_ctx->pipe_idx); + return DC_FAIL_DP_PAYLOAD_ALLOCATION; + } + + proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + + ASSERT(proposed_table.stream_count == 1); + + //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id + DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p " + "vcp_id: %d " + "slot_count: %d\n", + (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, + proposed_table.stream_allocations[0].vcp_id, + proposed_table.stream_allocations[0].slot_count); + + /* program DP source TX for payload */ + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &proposed_table); + + /* poll for ACT handled */ + if (!poll_for_allocation_change_trigger(link)) { + // Failures will result in blackscreen and errors logged + BREAK_TO_DEBUGGER(); + } + + /* slot X.Y for SST payload allocate */ + if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); + + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, + avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + } + + /* Always return DC_OK. + * If part of sequence fails, log failure(s) and show blackscreen + */ + return DC_OK; +} + +enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* decrease throttled vcp size */ + pbn_per_slot = get_pbn_per_slot(stream); + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } else { + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + return DC_OK; +} + +enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + if (ret != ACT_LINK_LOST) { + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } + + /* increase throttled vcp size */ + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + pbn_per_slot = get_pbn_per_slot(stream); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + + return DC_OK; +} + +static void disable_link_dp(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc_link_settings link_settings = link->cur_link_settings; + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST && + link->mst_stream_alloc_table.stream_count > 0) + /* disable MST link only when last vc payload is deallocated */ + return; + + dp_disable_link_phy(link, link_res, signal); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + /* set the sink to SST mode after disabling the link */ + enable_mst_on_sink(link, false); + + if (link_dp_get_encoding_format(&link_settings) == + DP_8b_10b_ENCODING) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, link_res, false); + } +} + +static void disable_link(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + if (dc_is_dp_signal(signal)) { + disable_link_dp(link, link_res, signal); + } else if (signal != SIGNAL_TYPE_VIRTUAL) { + link->dc->hwss.disable_link_output(link, link_res, signal); + } + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } +} + +static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dc_color_depth display_color_depth; + enum engine_id eng_id; + struct ext_hdmi_settings settings = {0}; + bool is_over_340mhz = false; + bool is_vga_mode = (stream->timing.h_addressable == 640) + && (stream->timing.v_addressable == 480); + struct dc *dc = pipe_ctx->stream->ctx->dc; + + if (stream->phy_pix_clk == 0) + stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; + if (stream->phy_pix_clk > 340000) + is_over_340mhz = true; + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { + unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & + EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; + if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { + /* DP159, Retimer settings */ + eng_id = pipe_ctx->stream_res.stream_enc->id; + + if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) { + write_i2c_retimer_setting(pipe_ctx, + is_vga_mode, is_over_340mhz, &settings); + } else { + write_i2c_default_retimer_setting(pipe_ctx, + is_vga_mode, is_over_340mhz); + } + } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { + /* PI3EQX1204, Redriver settings */ + write_i2c_redriver_setting(pipe_ctx, is_over_340mhz); + } + } + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + write_scdc_data( + stream->link->ddc, + stream->phy_pix_clk, + stream->timing.flags.LTE_340MCSC_SCRAMBLE); + + memset(&stream->link->cur_link_settings, 0, + sizeof(struct dc_link_settings)); + + display_color_depth = stream->timing.display_color_depth; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + display_color_depth = COLOR_DEPTH_888; + + dc->hwss.enable_tmds_link_output( + link, + &pipe_ctx->link_res, + pipe_ctx->stream->signal, + pipe_ctx->clock_source->id, + display_color_depth, + stream->phy_pix_clk); + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + read_scdc_data(link->ddc); +} + +static enum dc_status enable_link_dp(struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + enum dc_status status; + bool skip_video_pattern; + struct dc_link *link = stream->link; + const struct dc_link_settings *link_settings = + &pipe_ctx->link_config.dp_link_settings; + bool fec_enable; + int i; + bool apply_seamless_boot_optimization = false; + uint32_t bl_oled_enable_delay = 50; // in ms + uint32_t post_oui_delay = 30; // 30ms + /* Reduce link bandwidth between failed link training attempts. */ + bool do_fallback = false; + + // check for seamless boot + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i]->apply_seamless_boot_optimization) { + apply_seamless_boot_optimization = true; + break; + } + } + + /* Train with fallback when enabling DPIA link. Conventional links are + * trained with fallback during sink detection. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + do_fallback = true; + + /* + * Temporary w/a to get DP2.0 link rates to work with SST. + * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. + */ + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + link->dc->debug.set_mst_en_for_sst) { + enable_mst_on_sink(link, true); + } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { + /*in case it is not on*/ + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ + } else { + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = + link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + if (state->clk_mgr && !apply_seamless_boot_optimization) + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); + } + + // during mode switch we do DP_SET_POWER off then on, and OUI is lost + dpcd_set_source_specific_data(link); + if (link->dpcd_sink_ext_caps.raw != 0) { + post_oui_delay += link->panel_config.pps.extra_post_OUI_ms; + msleep(post_oui_delay); + } + + // similarly, mode switch can cause loss of cable ID + dpcd_write_cable_id_to_dprx(link); + + skip_video_pattern = true; + + if (link_settings->link_rate == LINK_RATE_LOW) + skip_video_pattern = false; + + if (perform_link_training_with_retries(link_settings, + skip_video_pattern, + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal, + do_fallback)) { + status = DC_OK; + } else { + status = DC_FAIL_DP_LINK_TRAINING; + } + + if (link->preferred_training_settings.fec_enable) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) + dp_set_fec_enable(link, fec_enable); + + // during mode set we do DP_SET_POWER off then on, aux writes are lost + if (link->dpcd_sink_ext_caps.bits.oled == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { + set_default_brightness_aux(link); // TODO: use cached if known + if (link->dpcd_sink_ext_caps.bits.oled == 1) + msleep(bl_oled_enable_delay); + link_backlight_enable_aux(link, true); + } + + return status; +} + +static enum dc_status enable_link_edp( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + return enable_link_dp(state, pipe_ctx); +} + +static void enable_link_lvds(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc *dc = stream->ctx->dc; + + if (stream->phy_pix_clk == 0) + stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; + + memset(&stream->link->cur_link_settings, 0, + sizeof(struct dc_link_settings)); + dc->hwss.enable_lvds_link_output( + link, + &pipe_ctx->link_res, + pipe_ctx->clock_source->id, + stream->phy_pix_clk); + +} + +static enum dc_status enable_link_dp_mst( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc_link *link = pipe_ctx->stream->link; + + /* sink signal type after MST branch is MST. Multiple MST sinks + * share one link. Link DP PHY is enable or training only once. + */ + if (link->link_status.link_active) + return DC_OK; + + /* clear payload table */ + dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); + + /* to make sure the pending down rep can be processed + * before enabling the link + */ + dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); + + /* set the sink to MST mode before enabling the link */ + enable_mst_on_sink(link, true); + + return enable_link_dp(state, pipe_ctx); +} + +static enum dc_status enable_link( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + enum dc_status status = DC_ERROR_UNEXPECTED; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + /* There's some scenarios where driver is unloaded with display + * still enabled. When driver is reloaded, it may cause a display + * to not light up if there is a mismatch between old and new + * link settings. Need to call disable first before enabling at + * new link settings. + */ + if (link->link_status.link_active) { + disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + } + + switch (pipe_ctx->stream->signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + status = enable_link_dp(state, pipe_ctx); + break; + case SIGNAL_TYPE_EDP: + status = enable_link_edp(state, pipe_ctx); + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + status = enable_link_dp_mst(state, pipe_ctx); + msleep(200); + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + enable_link_hdmi(pipe_ctx); + status = DC_OK; + break; + case SIGNAL_TYPE_LVDS: + enable_link_lvds(pipe_ctx); + status = DC_OK; + break; + case SIGNAL_TYPE_VIRTUAL: + status = DC_OK; + break; + default: + break; + } + + if (status == DC_OK) { + pipe_ctx->stream->link->link_status.link_active = true; + } + + return status; +} + +void link_set_dpms_off(struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + + ASSERT(is_master_pipe_for_link(link, pipe_ctx)); + + if (link_is_dp_128b_132b_signal(pipe_ctx)) + vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; + + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + if (pipe_ctx->stream->sink) { + if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + pipe_ctx->stream->sink->edid_caps.display_name, + pipe_ctx->stream->signal); + } + } + + if (!IS_DIAG_DC(dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + + if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + set_avmute(pipe_ctx, true); + } + + dc->hwss.disable_audio_stream(pipe_ctx); + +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, true); +#endif + dc->hwss.blank_stream(pipe_ctx); + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + deallocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + link_is_dp_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, false); + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { + struct ext_hdmi_settings settings = {0}; + enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id; + + unsigned short masked_chip_caps = link->chip_caps & + EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; + //Need to inform that sink is going to use legacy HDMI mode. + write_scdc_data( + link->ddc, + 165000,//vbios only handles 165Mhz. + false); + if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { + /* DP159, Retimer settings */ + if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) + write_i2c_retimer_setting(pipe_ctx, + false, false, &settings); + else + write_i2c_default_retimer_setting(pipe_ctx, + false, false); + } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { + /* PI3EQX1204, Redriver settings */ + write_i2c_redriver_setting(pipe_ctx, false); + } + } + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + !link_is_dp_128b_132b_signal(pipe_ctx)) { + + /* In DP1.x SST mode, our encoder will go to TPS1 + * when link is on but stream is off. + * Disabling link before stream will avoid exposing TPS1 pattern + * during the disable sequence as it will confuse some receivers + * state machine. + * In DP2 or MST mode, our encoder will stay video active + */ + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + dc->hwss.disable_stream(pipe_ctx); + } else { + dc->hwss.disable_stream(pipe_ctx); + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + } + + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, false); + } + if (link_is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); + } + + if (vpg && vpg->funcs->vpg_powerdown) + vpg->funcs->vpg_powerdown(vpg); +} + +void link_set_dpms_on( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; + enum dc_status status; + struct link_encoder *link_enc; + enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + + ASSERT(is_master_pipe_for_link(link, pipe_ctx)); + + if (link_is_dp_128b_132b_signal(pipe_ctx)) + vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; + + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + if (pipe_ctx->stream->sink) { + if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + pipe_ctx->stream->sink->edid_caps.display_name, + pipe_ctx->stream->signal); + } + } + + if (!IS_DIAG_DC(dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) + && !link_is_dp_128b_132b_signal(pipe_ctx)) { + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); + } + + pipe_ctx->stream->link->link_state_valid = true; + + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) + otg_out_dest = OUT_MUX_HPO_DP; + else + otg_out_dest = OUT_MUX_DIO; + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); + } + + link_hwss->setup_stream_attribute(pipe_ctx); + + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + bool apply_edp_fast_boot_optimization = + pipe_ctx->stream->apply_edp_fast_boot_optimization; + + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); + + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + + /* Do not touch link on seamless boot optimization. */ + if (pipe_ctx->stream->apply_seamless_boot_optimization) { + pipe_ctx->stream->dpms_off = false; + + /* Still enable stream features & audio on seamless boot for DP external displays */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + enable_stream_features(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); + } + +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif + return; + } + + /* eDP lit up by bios already, no need to enable again. */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + apply_edp_fast_boot_optimization && + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { + pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif + return; + } + + if (pipe_ctx->stream->dpms_off) + return; + + /* Have to setup DSC before DIG FE and BE are connected (which happens before the + * link training). This is to make sure the bandwidth sent to DIG BE won't be + * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag + * will be automatically set at a later time when the video is enabled + * (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, true); + + } + + status = enable_link(state, pipe_ctx); + + if (status != DC_OK) { + DC_LOG_WARNING("enabling link %u failed: %d\n", + pipe_ctx->stream->link->link_index, + status); + + /* Abort stream enable *unless* the failure was due to + * DP link training - some DP monitors will recover and + * show the stream anyway. But MST displays can't proceed + * without link training. + */ + if (status != DC_FAIL_DP_LINK_TRAINING || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, &pipe_ctx->link_res, + pipe_ctx->stream->signal); + BREAK_TO_DEBUGGER(); + return; + } + } + + /* turn off otg test pattern if enable */ + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + COLOR_DEPTH_UNDEFINED); + + /* This second call is needed to reconfigure the DIG + * as a workaround for the incorrect value being applied + * from transmitter control. + */ + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + link_is_dp_128b_132b_signal(pipe_ctx))) + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); + + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) { + dp_set_dsc_on_rx(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + } + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + allocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + link_is_dp_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, true); + + dc->hwss.unblank_stream(pipe_ctx, + &pipe_ctx->stream->link->cur_link_settings); + + if (stream->sink_patches.delay_ignore_msa > 0) + msleep(stream->sink_patches.delay_ignore_msa); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + enable_stream_features(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif + + dc->hwss.enable_audio_stream(pipe_ctx); + + } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) + dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, true); + } + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { + set_avmute(pipe_ctx, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h new file mode 100644 index 00000000000000..33d312dabdb8b3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h @@ -0,0 +1,40 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPMS_H__ +#define __DC_LINK_DPMS_H__ + +#include "link.h" +bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, + bool enable, bool immediate_update); +struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link); +void link_set_all_streams_dpms_off_for_link(struct dc_link *link); +void link_get_master_pipes_with_dpms_on(const struct dc_link *link, + struct dc_state *state, + uint8_t *count, + struct pipe_ctx *pipes[MAX_PIPES]); +#endif /* __DC_LINK_DPMS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c new file mode 100644 index 00000000000000..aeb26a4d539e9f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -0,0 +1,577 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns the creation/destruction of link structure. + */ +#include "link_factory.h" +#include "protocols/link_ddc.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_hpd.h" +#include "gpio_service_interface.h" +#include "atomfirmware.h" + +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ + DC_LOG_HW_HOTPLUG( \ + __VA_ARGS__) + +static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder) +{ + switch (encoder.id) { + case ENCODER_ID_INTERNAL_UNIPHY: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_A; + case ENUM_ID_2: + return TRANSMITTER_UNIPHY_B; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_INTERNAL_UNIPHY1: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_C; + case ENUM_ID_2: + return TRANSMITTER_UNIPHY_D; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_INTERNAL_UNIPHY2: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_E; + case ENUM_ID_2: + return TRANSMITTER_UNIPHY_F; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_INTERNAL_UNIPHY3: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_G; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_EXTERNAL_NUTMEG: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_NUTMEG_CRT; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_EXTERNAL_TRAVIS: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_TRAVIS_CRT; + case ENUM_ID_2: + return TRANSMITTER_TRAVIS_LCD; + default: + return TRANSMITTER_UNKNOWN; + } + break; + default: + return TRANSMITTER_UNKNOWN; + } +} + +static void link_destruct(struct dc_link *link) +{ + int i; + + if (link->hpd_gpio) { + dal_gpio_destroy_irq(&link->hpd_gpio); + link->hpd_gpio = NULL; + } + + if (link->ddc) + link_destroy_ddc_service(&link->ddc); + + if (link->panel_cntl) + link->panel_cntl->funcs->destroy(&link->panel_cntl); + + if (link->link_enc) { + /* Update link encoder resource tracking variables. These are used for + * the dynamic assignment of link encoders to streams. Virtual links + * are not assigned encoder resources on creation. + */ + if (link->link_id.id != CONNECTOR_ID_VIRTUAL) { + link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL; + link->dc->res_pool->dig_link_enc_count--; + } + link->link_enc->funcs->destroy(&link->link_enc); + } + + if (link->local_sink) + dc_sink_release(link->local_sink); + + for (i = 0; i < link->sink_count; ++i) + dc_sink_release(link->remote_sinks[i]); +} + +static enum channel_id get_ddc_line(struct dc_link *link) +{ + struct ddc *ddc; + enum channel_id channel; + + channel = CHANNEL_ID_UNKNOWN; + + ddc = get_ddc_pin(link->ddc); + + if (ddc) { + switch (dal_ddc_get_line(ddc)) { + case GPIO_DDC_LINE_DDC1: + channel = CHANNEL_ID_DDC1; + break; + case GPIO_DDC_LINE_DDC2: + channel = CHANNEL_ID_DDC2; + break; + case GPIO_DDC_LINE_DDC3: + channel = CHANNEL_ID_DDC3; + break; + case GPIO_DDC_LINE_DDC4: + channel = CHANNEL_ID_DDC4; + break; + case GPIO_DDC_LINE_DDC5: + channel = CHANNEL_ID_DDC5; + break; + case GPIO_DDC_LINE_DDC6: + channel = CHANNEL_ID_DDC6; + break; + case GPIO_DDC_LINE_DDC_VGA: + channel = CHANNEL_ID_DDC_VGA; + break; + case GPIO_DDC_LINE_I2C_PAD: + channel = CHANNEL_ID_I2C_PAD; + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + } + + return channel; +} + +static bool dc_link_construct_phy(struct dc_link *link, + const struct link_init_data *init_params) +{ + uint8_t i; + struct ddc_service_init_data ddc_service_init_data = { 0 }; + struct dc_context *dc_ctx = init_params->ctx; + struct encoder_init_data enc_init_data = { 0 }; + struct panel_cntl_init_data panel_cntl_init_data = { 0 }; + struct integrated_info info = { 0 }; + struct dc_bios *bios = init_params->dc->ctx->dc_bios; + const struct dc_vbios_funcs *bp_funcs = bios->funcs; + struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 }; + + DC_LOGGER_INIT(dc_ctx->logger); + + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; + link->link_status.dpcd_caps = &link->dpcd_caps; + + link->dc = init_params->dc; + link->ctx = dc_ctx; + link->link_index = init_params->link_index; + + memset(&link->preferred_training_settings, 0, + sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, + sizeof(struct dc_link_settings)); + + link->link_id = + bios->funcs->get_connector_id(bios, init_params->connector_index); + + link->ep_type = DISPLAY_ENDPOINT_PHY; + + DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); + + if (bios->funcs->get_disp_connector_caps_info) { + bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); + link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; + DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display); + } + + if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { + dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", + __func__, init_params->connector_index, + link->link_id.type, OBJECT_TYPE_CONNECTOR); + goto create_fail; + } + + if (link->dc->res_pool->funcs->link_init) + link->dc->res_pool->funcs->link_init(link); + + link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (link->hpd_gpio) { + dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); + dal_gpio_unlock_pin(link->hpd_gpio); + link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); + + DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id); + DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en); + } + + switch (link->link_id.id) { + case CONNECTOR_ID_HDMI_TYPE_A: + link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; + + break; + case CONNECTOR_ID_SINGLE_LINK_DVID: + case CONNECTOR_ID_SINGLE_LINK_DVII: + link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + case CONNECTOR_ID_DUAL_LINK_DVID: + case CONNECTOR_ID_DUAL_LINK_DVII: + link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_USBC: + link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + + if (link->hpd_gpio) + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + + break; + case CONNECTOR_ID_EDP: + link->connector_signal = SIGNAL_TYPE_EDP; + + if (link->hpd_gpio) { + if (!link->dc->config.allow_edp_hotplug_detection) + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + + switch (link->dc->config.allow_edp_hotplug_detection) { + case 1: // only the 1st eDP handles hotplug + if (link->link_index == 0) + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + else + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + break; + case 2: // only the 2nd eDP handles hotplug + if (link->link_index == 1) + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + else + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + break; + default: + break; + } + } + + break; + case CONNECTOR_ID_LVDS: + link->connector_signal = SIGNAL_TYPE_LVDS; + break; + default: + DC_LOG_WARNING("Unsupported Connector type:%d!\n", + link->link_id.id); + goto create_fail; + } + + /* TODO: #DAL3 Implement id to str function.*/ + LINK_INFO("Connector[%d] description:" + "signal %d\n", + init_params->connector_index, + link->connector_signal); + + ddc_service_init_data.ctx = link->ctx; + ddc_service_init_data.id = link->link_id; + ddc_service_init_data.link = link; + link->ddc = link_create_ddc_service(&ddc_service_init_data); + + if (!link->ddc) { + DC_ERROR("Failed to create ddc_service!\n"); + goto ddc_create_fail; + } + + if (!link->ddc->ddc_pin) { + DC_ERROR("Failed to get I2C info for connector!\n"); + goto ddc_create_fail; + } + + link->ddc_hw_inst = + dal_ddc_get_line(get_ddc_pin(link->ddc)); + + + if (link->dc->res_pool->funcs->panel_cntl_create && + (link->link_id.id == CONNECTOR_ID_EDP || + link->link_id.id == CONNECTOR_ID_LVDS)) { + panel_cntl_init_data.ctx = dc_ctx; + panel_cntl_init_data.inst = + panel_cntl_init_data.ctx->dc_edp_id_count; + link->panel_cntl = + link->dc->res_pool->funcs->panel_cntl_create( + &panel_cntl_init_data); + panel_cntl_init_data.ctx->dc_edp_id_count++; + + if (link->panel_cntl == NULL) { + DC_ERROR("Failed to create link panel_cntl!\n"); + goto panel_cntl_create_fail; + } + } + + enc_init_data.ctx = dc_ctx; + bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, + &enc_init_data.encoder); + enc_init_data.connector = link->link_id; + enc_init_data.channel = get_ddc_line(link); + enc_init_data.hpd_source = get_hpd_line(link); + + link->hpd_src = enc_init_data.hpd_source; + + enc_init_data.transmitter = + translate_encoder_to_transmitter(enc_init_data.encoder); + link->link_enc = + link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); + + DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); + DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); + + if (!link->link_enc) { + DC_ERROR("Failed to create link encoder!\n"); + goto link_enc_create_fail; + } + + /* Update link encoder tracking variables. These are used for the dynamic + * assignment of link encoders to streams. + */ + link->eng_id = link->link_enc->preferred_engine; + link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc; + link->dc->res_pool->dig_link_enc_count++; + + link->link_enc_hw_inst = link->link_enc->transmitter; + for (i = 0; i < 4; i++) { + if (bp_funcs->get_device_tag(dc_ctx->dc_bios, + link->link_id, i, + &link->device_tag) != BP_RESULT_OK) { + DC_ERROR("Failed to find device tag!\n"); + goto device_tag_fail; + } + + /* Look for device tag that matches connector signal, + * CRT for rgb, LCD for other supported signal tyes + */ + if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, + link->device_tag.dev_id)) + continue; + if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && + link->connector_signal != SIGNAL_TYPE_RGB) + continue; + if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && + link->connector_signal == SIGNAL_TYPE_RGB) + continue; + + DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device); + DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type); + DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id); + break; + } + + if (bios->integrated_info) + info = *bios->integrated_info; + + /* Look for channel mapping corresponding to connector and device tag */ + for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { + struct external_display_path *path = + &info.ext_disp_conn_info.path[i]; + + if (path->device_connector_id.enum_id == link->link_id.enum_id && + path->device_connector_id.id == link->link_id.id && + path->device_connector_id.type == link->link_id.type) { + if (link->device_tag.acpi_device != 0 && + path->device_acpi_enum == link->device_tag.acpi_device) { + link->ddi_channel_mapping = path->channel_mapping; + link->chip_caps = path->caps; + DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); + DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); + } else if (path->device_tag == + link->device_tag.dev_id.raw_device_tag) { + link->ddi_channel_mapping = path->channel_mapping; + link->chip_caps = path->caps; + DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); + DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); + } + + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { + link->bios_forced_drive_settings.VOLTAGE_SWING = + (info.ext_disp_conn_info.fixdpvoltageswing & 0x3); + link->bios_forced_drive_settings.PRE_EMPHASIS = + ((info.ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); + } + + break; + } + } + + if (bios->funcs->get_atom_dc_golden_table) + bios->funcs->get_atom_dc_golden_table(bios); + + /* + * TODO check if GPIO programmed correctly + * + * If GPIO isn't programmed correctly HPD might not rise or drain + * fast enough, leading to bounces. + */ + program_hpd_filter(link); + + link->psr_settings.psr_vtotal_control_support = false; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); + return true; +device_tag_fail: + link->link_enc->funcs->destroy(&link->link_enc); +link_enc_create_fail: + if (link->panel_cntl != NULL) + link->panel_cntl->funcs->destroy(&link->panel_cntl); +panel_cntl_create_fail: + link_destroy_ddc_service(&link->ddc); +ddc_create_fail: +create_fail: + + if (link->hpd_gpio) { + dal_gpio_destroy_irq(&link->hpd_gpio); + link->hpd_gpio = NULL; + } + + DC_LOG_DC("BIOS object table - %s failed.\n", __func__); + return false; +} + +static bool dc_link_construct_dpia(struct dc_link *link, + const struct link_init_data *init_params) +{ + struct ddc_service_init_data ddc_service_init_data = { 0 }; + struct dc_context *dc_ctx = init_params->ctx; + + DC_LOGGER_INIT(dc_ctx->logger); + + /* Initialized irq source for hpd and hpd rx */ + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; + link->link_status.dpcd_caps = &link->dpcd_caps; + + link->dc = init_params->dc; + link->ctx = dc_ctx; + link->link_index = init_params->link_index; + + memset(&link->preferred_training_settings, 0, + sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, + sizeof(struct dc_link_settings)); + + /* Dummy Init for linkid */ + link->link_id.type = OBJECT_TYPE_CONNECTOR; + link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; + link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; + link->is_internal_display = false; + link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + LINK_INFO("Connector[%d] description:signal %d\n", + init_params->connector_index, + link->connector_signal); + + link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; + link->is_dig_mapping_flexible = true; + + /* TODO: Initialize link : funcs->link_init */ + + ddc_service_init_data.ctx = link->ctx; + ddc_service_init_data.id = link->link_id; + ddc_service_init_data.link = link; + /* Set indicator for dpia link so that ddc wont be created */ + ddc_service_init_data.is_dpia_link = true; + + link->ddc = link_create_ddc_service(&ddc_service_init_data); + if (!link->ddc) { + DC_ERROR("Failed to create ddc_service!\n"); + goto ddc_create_fail; + } + + /* Set dpia port index : 0 to number of dpia ports */ + link->ddc_hw_inst = init_params->connector_index; + + /* TODO: Create link encoder */ + + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ + link->wa_flags.dp_mot_reset_segment = true; + + return true; + +ddc_create_fail: + return false; +} + +static bool link_construct(struct dc_link *link, + const struct link_init_data *init_params) +{ + /* Handle dpia case */ + if (init_params->is_dpia_link == true) + return dc_link_construct_dpia(link, init_params); + else + return dc_link_construct_phy(link, init_params); +} + +struct dc_link *link_create(const struct link_init_data *init_params) +{ + struct dc_link *link = + kzalloc(sizeof(*link), GFP_KERNEL); + + if (NULL == link) + goto alloc_fail; + + if (false == link_construct(link, init_params)) + goto construct_fail; + + return link; + +construct_fail: + kfree(link); + +alloc_fail: + return NULL; +} + +void link_destroy(struct dc_link **link) +{ + link_destruct(*link); + kfree(*link); + *link = NULL; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h new file mode 100644 index 00000000000000..5b846147c4a64b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_FACTORY_H__ +#define __LINK_FACTORY_H__ +#include "link.h" + +#endif /* __LINK_FACTORY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.c b/drivers/gpu/drm/amd/display/dc/link/link_resource.c new file mode 100644 index 00000000000000..bd42bb273c0ce8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.c @@ -0,0 +1,114 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +/* FILE POLICY AND INTENDED USAGE: + * This file implements accessors to link resource. + */ + +#include "link_resource.h" +#include "protocols/link_dp_capability.h" + +void link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res) +{ + int i; + struct pipe_ctx *pipe = NULL; + + memset(link_res, 0, sizeof(*link_res)); + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { + if (pipe->stream->link == link) { + *link_res = pipe->link_res; + break; + } + } + } + +} + +void link_get_cur_res_map(const struct dc *dc, uint32_t *map) +{ + struct dc_link *link; + uint32_t i; + uint32_t hpo_dp_recycle_map = 0; + + *map = 0; + + if (dc->caps.dp_hpo) { + for (i = 0; i < dc->caps.max_links; i++) { + link = dc->links[i]; + if (link->link_status.link_active && + link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && + link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) + /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability + * but current link doesn't use it. + */ + hpo_dp_recycle_map |= (1 << i); + } + *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); + } +} + +void link_restore_res_map(const struct dc *dc, uint32_t *map) +{ + struct dc_link *link; + uint32_t i; + unsigned int available_hpo_dp_count; + uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) + >> LINK_RES_HPO_DP_REC_MAP__SHIFT; + + if (dc->caps.dp_hpo) { + available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; + /* remove excess 128b/132b encoding support for not recycled links */ + for (i = 0; i < dc->caps.max_links; i++) { + if ((hpo_dp_recycle_map & (1 << i)) == 0) { + link = dc->links[i]; + if (link->type != dc_connection_none && + link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + if (available_hpo_dp_count > 0) + available_hpo_dp_count--; + else + /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ + link->verified_link_cap.link_rate = LINK_RATE_HIGH3; + } + } + } + /* remove excess 128b/132b encoding support for recycled links */ + for (i = 0; i < dc->caps.max_links; i++) { + if ((hpo_dp_recycle_map & (1 << i)) != 0) { + link = dc->links[i]; + if (link->type != dc_connection_none && + link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + if (available_hpo_dp_count > 0) + available_hpo_dp_count--; + else + /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ + link->verified_link_cap.link_rate = LINK_RATE_HIGH3; + } + } + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h new file mode 100644 index 00000000000000..45554d30adf092 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_RESOURCE_H__ +#define __LINK_RESOURCE_H__ +#include "link.h" +void link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res); + +#endif /* __LINK_RESOURCE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c new file mode 100644 index 00000000000000..d4f6ee6ca948cb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -0,0 +1,398 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns timing validation against various link limitations. (ex. + * link bandwidth, receiver capability or our hardware capability) It also + * provides helper functions exposing bandwidth formulas used in validation. + */ +#include "link_validation.h" +#include "resource.h" + +#define DC_LOGGER_INIT(logger) + +static uint32_t get_tmds_output_pixel_clock_100hz(const struct dc_crtc_timing *timing) +{ + + uint32_t pxl_clk = timing->pix_clk_100hz; + + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + pxl_clk /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + pxl_clk = pxl_clk * 2 / 3; + + if (timing->display_color_depth == COLOR_DEPTH_101010) + pxl_clk = pxl_clk * 10 / 8; + else if (timing->display_color_depth == COLOR_DEPTH_121212) + pxl_clk = pxl_clk * 12 / 8; + + return pxl_clk; +} + +static bool dp_active_dongle_validate_timing( + const struct dc_crtc_timing *timing, + const struct dpcd_caps *dpcd_caps) +{ + const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; + + switch (dpcd_caps->dongle_type) { + case DISPLAY_DONGLE_DP_VGA_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_DONGLE: + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) + return true; + else + return false; + default: + break; + } + + if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && + dongle_caps->extendedCapValid == true) { + /* Check Pixel Encoding */ + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + break; + case PIXEL_ENCODING_YCBCR422: + if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) + return false; + break; + case PIXEL_ENCODING_YCBCR420: + if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) + return false; + break; + default: + /* Invalid Pixel Encoding*/ + return false; + } + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + case COLOR_DEPTH_888: + /*888 and 666 should always be supported*/ + break; + case COLOR_DEPTH_101010: + if (dongle_caps->dp_hdmi_max_bpc < 10) + return false; + break; + case COLOR_DEPTH_121212: + if (dongle_caps->dp_hdmi_max_bpc < 12) + return false; + break; + case COLOR_DEPTH_141414: + case COLOR_DEPTH_161616: + default: + /* These color depths are currently not supported */ + return false; + } + + /* Check 3D format */ + switch (timing->timing_3d_format) { + case TIMING_3D_FORMAT_NONE: + case TIMING_3D_FORMAT_FRAME_ALTERNATE: + /*Only frame alternate 3D is supported on active dongle*/ + break; + default: + /*other 3D formats are not supported due to bad infoframe translation */ + return false; + } + + if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter + struct dc_crtc_timing outputTiming = *timing; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (timing->flags.DSC && !timing->dsc_cfg.is_frl) + /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ + outputTiming.flags.DSC = 0; +#endif + if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + return false; + } else { // DP to HDMI TMDS converter + if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) + return false; + } + } + + if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && + dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && + dongle_caps->dfp_cap_ext.supported) { + + if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) + return false; + + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_666 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) + return false; + } + } + + return true; +} + +uint32_t dp_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + uint32_t total_data_bw_efficiency_x10000 = 0; + uint32_t link_rate_per_lane_kbps = 0; + + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + /* For 8b/10b encoding: + * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. + * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. + */ + link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; + if (dc_link_should_enable_fec(link)) { + total_data_bw_efficiency_x10000 /= 100; + total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; + } + break; + case DP_128b_132b_ENCODING: + /* For 128b/132b encoding: + * link rate is defined in the unit of 10mbps per lane. + * total data bandwidth efficiency is always 96.71%. + */ + link_rate_per_lane_kbps = link_settings->link_rate * 10000; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; + break; + default: + break; + } + + /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ + return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; +} + +uint32_t link_timing_bandwidth_kbps( + const struct dc_crtc_timing *timing) +{ + uint32_t bits_per_channel = 0; + uint32_t kbps; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (timing->flags.DSC) + return dc_dsc_stream_bandwidth_in_kbps(timing, + timing->dsc_cfg.bits_per_pixel, + timing->dsc_cfg.num_slices_h, + timing->dsc_cfg.is_dp); +#endif /* CONFIG_DRM_AMD_DC_DCN */ + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + ASSERT(bits_per_channel != 0); + bits_per_channel = 8; + break; + } + + kbps = timing->pix_clk_100hz / 10; + kbps *= bits_per_channel; + + if (timing->flags.Y_ONLY != 1) { + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ + kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } + + return kbps; +} + +static bool dp_validate_mode_timing( + struct dc_link *link, + const struct dc_crtc_timing *timing) +{ + uint32_t req_bw; + uint32_t max_bw; + + const struct dc_link_settings *link_setting; + + /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && + !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) + return false; + + /*always DP fail safe mode*/ + if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && + timing->h_addressable == (uint32_t) 640 && + timing->v_addressable == (uint32_t) 480) + return true; + + link_setting = dc_link_get_link_cap(link); + + /* TODO: DYNAMIC_VALIDATION needs to be implemented */ + /*if (flags.DYNAMIC_VALIDATION == 1 && + link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN) + link_setting = &link->verified_link_cap; + */ + + req_bw = dc_bandwidth_in_kbps_from_timing(timing); + max_bw = dc_link_bandwidth_kbps(link, link_setting); + + if (req_bw <= max_bw) { + /* remember the biggest mode here, during + * initial link training (to get + * verified_link_cap), LS sends event about + * cannot train at reported cap to upper + * layer and upper layer will re-enumerate modes. + * this is not necessary if the lower + * verified_link_cap is enough to drive + * all the modes */ + + /* TODO: DYNAMIC_VALIDATION needs to be implemented */ + /* if (flags.DYNAMIC_VALIDATION == 1) + dpsst->max_req_bw_for_verified_linkcap = dal_max( + dpsst->max_req_bw_for_verified_linkcap, req_bw); */ + return true; + } else + return false; +} + +enum dc_status link_validate_mode_timing( + const struct dc_stream_state *stream, + struct dc_link *link, + const struct dc_crtc_timing *timing) +{ + uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + + /* A hack to avoid failing any modes for EDID override feature on + * topology change such as lower quality cable for DP or different dongle + */ + if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) + return DC_OK; + + /* Passive Dongle */ + if (max_pix_clk != 0 && get_tmds_output_pixel_clock_100hz(timing) > max_pix_clk) + return DC_EXCEED_DONGLE_CAP; + + /* Active Dongle*/ + if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) + return DC_EXCEED_DONGLE_CAP; + + switch (stream->signal) { + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + if (!dp_validate_mode_timing( + link, + timing)) + return DC_NO_DP_LINK_BANDWIDTH; + break; + + default: + break; + } + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h new file mode 100644 index 00000000000000..ab6a44f5003263 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_VALIDATION_H__ +#define __LINK_VALIDATION_H__ +#include "link.h" +#endif /* __LINK_VALIDATION_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c similarity index 57% rename from drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index ce8d6a54ca54b0..5269125bc2a470 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -23,20 +23,20 @@ * */ -#include "dm_services.h" -#include "dm_helpers.h" -#include "gpio_service_interface.h" -#include "include/ddc_service_types.h" -#include "include/grph_object_id.h" -#include "include/dpcd_defs.h" -#include "include/logger_interface.h" -#include "include/vector.h" -#include "core_types.h" -#include "dc_link_ddc.h" +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements generic display communication protocols such as i2c, aux + * and scdc. The file should not contain any specific applications of these + * protocols such as display capability query, detection, or handshaking such as + * link training. + */ +#include "link_ddc.h" +#include "vector.h" #include "dce/dce_aux.h" -#include "dmub/inc/dmub_cmd.h" +#include "dal_asic_id.h" #include "link_dpcd.h" -#include "include/dal_asic_id.h" +#include "dm_helpers.h" +#include "atomfirmware.h" #define DC_LOGGER_INIT(logger) @@ -45,87 +45,6 @@ static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga"; static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa"; static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2"; -#define AUX_POWER_UP_WA_DELAY 500 -#define I2C_OVER_AUX_DEFER_WA_DELAY 70 -#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 -#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 - -/* CV smart dongle slave address for retrieving supported HDTV modes*/ -#define CV_SMART_DONGLE_ADDRESS 0x20 -/* DVI-HDMI dongle slave address for retrieving dongle signature*/ -#define DVI_HDMI_DONGLE_ADDRESS 0x68 -struct dvi_hdmi_dongle_signature_data { - int8_t vendor[3];/* "AMD" */ - uint8_t version[2]; - uint8_t size; - int8_t id[11];/* "6140063500G"*/ -}; -/* DP-HDMI dongle slave address for retrieving dongle signature*/ -#define DP_HDMI_DONGLE_ADDRESS 0x40 -static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; -#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04 - -struct dp_hdmi_dongle_signature_data { - int8_t id[15];/* "DP-HDMI ADAPTOR"*/ - uint8_t eot;/* end of transmition '\x4' */ -}; - -/* SCDC Address defines (HDMI 2.0)*/ -#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3 -#define HDMI_SCDC_ADDRESS 0x54 -#define HDMI_SCDC_SINK_VERSION 0x01 -#define HDMI_SCDC_SOURCE_VERSION 0x02 -#define HDMI_SCDC_UPDATE_0 0x10 -#define HDMI_SCDC_TMDS_CONFIG 0x20 -#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 -#define HDMI_SCDC_CONFIG_0 0x30 -#define HDMI_SCDC_STATUS_FLAGS 0x40 -#define HDMI_SCDC_ERR_DETECT 0x50 -#define HDMI_SCDC_TEST_CONFIG 0xC0 -#define HDMI_SCDC_DEVICE_ID 0xD3 - -union hdmi_scdc_update_read_data { - uint8_t byte[2]; - struct { - uint8_t STATUS_UPDATE:1; - uint8_t CED_UPDATE:1; - uint8_t RR_TEST:1; - uint8_t RESERVED:5; - uint8_t RESERVED2:8; - } fields; -}; - -union hdmi_scdc_status_flags_data { - uint8_t byte; - struct { - uint8_t CLOCK_DETECTED:1; - uint8_t CH0_LOCKED:1; - uint8_t CH1_LOCKED:1; - uint8_t CH2_LOCKED:1; - uint8_t RESERVED:4; - } fields; -}; - -union hdmi_scdc_ced_data { - uint8_t byte[7]; - struct { - uint8_t CH0_8LOW:8; - uint8_t CH0_7HIGH:7; - uint8_t CH0_VALID:1; - uint8_t CH1_8LOW:8; - uint8_t CH1_7HIGH:7; - uint8_t CH1_VALID:1; - uint8_t CH2_8LOW:8; - uint8_t CH2_7HIGH:7; - uint8_t CH2_VALID:1; - uint8_t CHECKSUM:8; - uint8_t RESERVED:8; - uint8_t RESERVED2:8; - uint8_t RESERVED3:8; - uint8_t RESERVED4:4; - } fields; -}; - struct i2c_payloads { struct vector payloads; }; @@ -158,7 +77,7 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) -void dal_ddc_i2c_payloads_add( +static void i2c_payloads_add( struct i2c_payloads *payloads, uint32_t address, uint32_t len, @@ -226,7 +145,7 @@ static void ddc_service_construct( ddc_service->wa.raw = 0; } -struct ddc_service *dal_ddc_service_create( +struct ddc_service *link_create_ddc_service( struct ddc_service_init_data *init_data) { struct ddc_service *ddc_service; @@ -246,7 +165,7 @@ static void ddc_service_destruct(struct ddc_service *ddc) dal_gpio_destroy_ddc(&ddc->ddc_pin); } -void dal_ddc_service_destroy(struct ddc_service **ddc) +void link_destroy_ddc_service(struct ddc_service **ddc) { if (!ddc || !*ddc) { BREAK_TO_DEBUGGER(); @@ -257,19 +176,14 @@ void dal_ddc_service_destroy(struct ddc_service **ddc) *ddc = NULL; } -enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc) -{ - return DDC_SERVICE_TYPE_CONNECTOR; -} - -void dal_ddc_service_set_transaction_type( +void set_ddc_transaction_type( struct ddc_service *ddc, enum ddc_transaction_type type) { ddc->transaction_type = type; } -bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc) +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc) { switch (ddc->transaction_type) { case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: @@ -282,7 +196,7 @@ bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc) return false; } -void ddc_service_set_dongle_type(struct ddc_service *ddc, +void set_dongle_type(struct ddc_service *ddc, enum display_dongle_type dongle_type) { ddc->dongle_type = dongle_type; @@ -324,7 +238,7 @@ static uint32_t defer_delay_converter_wa( #define DP_TRANSLATOR_DELAY 5 -uint32_t get_defer_delay(struct ddc_service *ddc) +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc) { uint32_t defer_delay = 0; @@ -352,175 +266,45 @@ uint32_t get_defer_delay(struct ddc_service *ddc) return defer_delay; } -static bool i2c_read( - struct ddc_service *ddc, - uint32_t address, - uint8_t *buffer, - uint32_t len) -{ - uint8_t offs_data = 0; - struct i2c_payload payloads[2] = { - { - .write = true, - .address = address, - .length = 1, - .data = &offs_data }, - { - .write = false, - .address = address, - .length = len, - .data = buffer } }; - - struct i2c_command command = { - .payloads = payloads, - .number_of_payloads = 2, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; - - return dm_helpers_submit_i2c( - ddc->ctx, - ddc->link, - &command); -} - -void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( - struct ddc_service *ddc, - struct display_sink_capability *sink_cap) +static bool submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) { - uint8_t i; - bool is_valid_hdmi_signature; - enum display_dongle_type *dongle = &sink_cap->dongle_type; - uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; - bool is_type2_dongle = false; - int retry_count = 2; - struct dp_hdmi_dongle_signature_data *dongle_signature; - - /* Assume we have no valid DP passive dongle connected */ - *dongle = DISPLAY_DONGLE_NONE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; - - /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ - if (!i2c_read( - ddc, - DP_HDMI_DONGLE_ADDRESS, - type2_dongle_buf, - sizeof(type2_dongle_buf))) { - /* Passive HDMI dongles can sometimes fail here without retrying*/ - while (retry_count > 0) { - if (i2c_read(ddc, - DP_HDMI_DONGLE_ADDRESS, - type2_dongle_buf, - sizeof(type2_dongle_buf))) - break; - retry_count--; - } - if (retry_count == 0) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - return; - } - } - - /* Check if Type 2 dongle.*/ - if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) - is_type2_dongle = true; - - dongle_signature = - (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; + uint32_t retrieved = 0; + bool ret = false; - is_valid_hdmi_signature = true; + if (!ddc) + return false; - /* Check EOT */ - if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { - is_valid_hdmi_signature = false; - } + if (!payload) + return false; - /* Check signature */ - for (i = 0; i < sizeof(dongle_signature->id); ++i) { - /* If its not the right signature, - * skip mismatch in subversion byte.*/ - if (dongle_signature->id[i] != - dp_hdmi_dongle_signature_str[i] && i != 3) { + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= + payload->length; + uint32_t payload_length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - if (is_type2_dongle) { - is_valid_hdmi_signature = false; - break; - } + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = payload_length; + /* set mot (middle of transaction) to false if it is the last payload */ + current_payload.mot = is_end_of_payload ? payload->mot:true; + current_payload.write_status_update = false; + current_payload.reply = payload->reply; + current_payload.write = payload->write; - } - } + ret = link_aux_transfer_with_retries_no_mutex(ddc, ¤t_payload); - if (is_type2_dongle) { - uint32_t max_tmds_clk = - type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; - - max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; - - if (0 == max_tmds_clk || - max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || - max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - } else { - if (is_valid_hdmi_signature == true) { - *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 2 DP-HDMI passive dongle %dMhz: ", - max_tmds_clk); - } else { - *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", - max_tmds_clk); - - } - - /* Multiply by 1000 to convert to kHz. */ - sink_cap->max_hdmi_pixel_clock = - max_tmds_clk * 1000; - } - sink_cap->is_dongle_type_one = false; - - } else { - if (is_valid_hdmi_signature == true) { - *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 1 DP-HDMI passive dongle %dMhz: ", - sink_cap->max_hdmi_pixel_clock / 1000); - } else { - *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", - sink_cap->max_hdmi_pixel_clock / 1000); - } - sink_cap->is_dongle_type_one = true; - } + retrieved += payload_length; + } while (retrieved < payload->length && ret == true); - return; + return ret; } -enum { - DP_SINK_CAP_SIZE = - DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 -}; - -bool dal_ddc_service_query_ddc_data( +bool link_query_ddc_data( struct ddc_service *ddc, uint32_t address, uint8_t *write_buf, @@ -530,7 +314,7 @@ bool dal_ddc_service_query_ddc_data( { bool success = true; uint32_t payload_size = - dal_ddc_service_is_in_aux_transaction_mode(ddc) ? + link_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; uint32_t write_payloads = @@ -544,13 +328,13 @@ bool dal_ddc_service_query_ddc_data( if (!payloads_num) return false; - if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { + if (link_is_in_aux_transaction_mode(ddc)) { struct aux_payload payload; payload.i2c_over_aux = true; payload.address = address; payload.reply = NULL; - payload.defer_delay = get_defer_delay(ddc); + payload.defer_delay = link_get_aux_defer_delay(ddc); payload.write_status_update = false; if (write_size != 0) { @@ -562,7 +346,7 @@ bool dal_ddc_service_query_ddc_data( payload.length = write_size; payload.data = write_buf; - success = dal_ddc_submit_aux_command(ddc, &payload); + success = submit_aux_command(ddc, &payload); } if (read_size != 0 && success) { @@ -574,7 +358,7 @@ bool dal_ddc_service_query_ddc_data( payload.length = read_size; payload.data = read_buf; - success = dal_ddc_submit_aux_command(ddc, &payload); + success = submit_aux_command(ddc, &payload); } } else { struct i2c_command command = {0}; @@ -588,10 +372,10 @@ bool dal_ddc_service_query_ddc_data( command.engine = DDC_I2C_COMMAND_ENGINE; command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; - dal_ddc_i2c_payloads_add( + i2c_payloads_add( &payloads, address, write_size, write_buf, true); - dal_ddc_i2c_payloads_add( + i2c_payloads_add( &payloads, address, read_size, read_buf, false); command.number_of_payloads = @@ -608,51 +392,6 @@ bool dal_ddc_service_query_ddc_data( return success; } -bool dal_ddc_submit_aux_command(struct ddc_service *ddc, - struct aux_payload *payload) -{ - uint32_t retrieved = 0; - bool ret = false; - - if (!ddc) - return false; - - if (!payload) - return false; - - do { - struct aux_payload current_payload; - bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= - payload->length; - uint32_t payload_length = is_end_of_payload ? - payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - - current_payload.address = payload->address; - current_payload.data = &payload->data[retrieved]; - current_payload.defer_delay = payload->defer_delay; - current_payload.i2c_over_aux = payload->i2c_over_aux; - current_payload.length = payload_length; - /* set mot (middle of transaction) to false if it is the last payload */ - current_payload.mot = is_end_of_payload ? payload->mot:true; - current_payload.write_status_update = false; - current_payload.reply = payload->reply; - current_payload.write = payload->write; - - ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); - - retrieved += payload_length; - } while (retrieved < payload->length && ret == true); - - return ret; -} - -/* dc_link_aux_transfer_raw() - Attempt to transfer - * the given aux payload. This function does not perform - * retries or handle error states. The reply is returned - * in the payload->reply and the result through - * *operation_result. Returns the number of bytes transferred, - * or -1 on a failure. - */ int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) @@ -665,22 +404,14 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, } } -/* dc_link_aux_transfer_with_retries() - Attempt to submit an - * aux payload, retrying on timeouts, defers, and busy states - * as outlined in the DP spec. Returns true if the request - * was successful. - * - * Unless you want to implement your own retry semantics, this - * is probably the one you want. - */ -bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload) { return dce_aux_transfer_with_retries(ddc, payload); } -bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, +bool try_to_configure_aux_timeout(struct ddc_service *ddc, uint32_t timeout) { bool result = false; @@ -713,20 +444,12 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, return result; } -/*test only function*/ -void dal_ddc_service_set_ddc_pin( - struct ddc_service *ddc_service, - struct ddc *ddc) -{ - ddc_service->ddc_pin = ddc; -} - -struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service) +struct ddc *get_ddc_pin(struct ddc_service *ddc_service) { return ddc_service->ddc_pin; } -void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, +void write_scdc_data(struct ddc_service *ddc_service, uint32_t pix_clk, bool lte_340_scramble) { @@ -741,13 +464,13 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) return; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &sink_version, sizeof(sink_version)); if (sink_version == 1) { /*Source Version = 1*/ write_buffer[0] = HDMI_SCDC_SOURCE_VERSION; write_buffer[1] = 1; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, + link_query_ddc_data(ddc_service, slave_address, write_buffer, sizeof(write_buffer), NULL, 0); /*Read Request from SCDC caps*/ } @@ -760,11 +483,11 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, } else { write_buffer[1] = 0; } - dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer, + link_query_ddc_data(ddc_service, slave_address, write_buffer, sizeof(write_buffer), NULL, 0); } -void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) +void read_scdc_data(struct ddc_service *ddc_service) { uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_TMDS_CONFIG; @@ -774,20 +497,19 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) return; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &tmds_config, sizeof(tmds_config)); if (tmds_config & 0x1) { union hdmi_scdc_status_flags_data status_data = {0}; uint8_t scramble_status = 0; offset = HDMI_SCDC_SCRAMBLER_STATUS; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &scramble_status, sizeof(scramble_status)); offset = HDMI_SCDC_STATUS_FLAGS; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &status_data.byte, sizeof(status_data.byte)); } } - diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h similarity index 52% rename from drivers/gpu/drm/amd/display/include/i2caux_interface.h rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h index 418fbf8c5c3a61..86e9d2e886d6f3 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -23,60 +23,38 @@ * */ -#ifndef __DAL_I2CAUX_INTERFACE_H__ -#define __DAL_I2CAUX_INTERFACE_H__ +#ifndef __DAL_DDC_SERVICE_H__ +#define __DAL_DDC_SERVICE_H__ -#include "dc_types.h" -#include "gpio_service_interface.h" +#include "link.h" +#define AUX_POWER_UP_WA_DELAY 500 +#define I2C_OVER_AUX_DEFER_WA_DELAY 70 +#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 +#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 +#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ -#define DEFAULT_AUX_MAX_DATA_SIZE 16 -#define AUX_MAX_DEFER_WRITE_RETRY 20 +#define EDID_SEGMENT_SIZE 256 -struct aux_payload { - /* set following flag to read/write I2C data, - * reset it to read/write DPCD data */ - bool i2c_over_aux; - /* set following flag to write data, - * reset it to read data */ - bool write; - bool mot; - bool write_status_update; +void set_ddc_transaction_type( + struct ddc_service *ddc, + enum ddc_transaction_type type); - uint32_t address; - uint32_t length; - uint8_t *data; - /* - * used to return the reply type of the transaction - * ignored if NULL - */ - uint8_t *reply; - /* expressed in milliseconds - * zero means "use default value" - */ - uint32_t defer_delay; +bool try_to_configure_aux_timeout(struct ddc_service *ddc, + uint32_t timeout); -}; +void write_scdc_data( + struct ddc_service *ddc_service, + uint32_t pix_clk, + bool lte_340_scramble); -struct aux_command { - struct aux_payload *payloads; - uint8_t number_of_payloads; +void read_scdc_data( + struct ddc_service *ddc_service); - /* expressed in milliseconds - * zero means "use default value" */ - uint32_t defer_delay; +void set_dongle_type(struct ddc_service *ddc, + enum display_dongle_type dongle_type); - /* zero means "use default value" */ - uint32_t max_defer_write_retry; +struct ddc *get_ddc_pin(struct ddc_service *ddc_service); - enum i2c_mot_mode mot; -}; +#endif /* __DAL_DDC_SERVICE_H__ */ -union aux_config { - struct { - uint32_t ALLOW_AUX_WHEN_HPD_LOW:1; - } bits; - uint32_t raw; -}; - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c new file mode 100644 index 00000000000000..4874d1bf1dcb01 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -0,0 +1,2246 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp specific link capability retrieval sequence. It is + * responsible for retrieving, parsing, overriding, deciding capability obtained + * from dp link. Link capability consists of encoders, DPRXs, cables, retimers, + * usb and all other possible backend capabilities. Other components should + * include this header file in order to access link capability. Accessing link + * capability by dereferencing dc_link outside dp_link_capability is not a + * recommended method as it makes the component dependent on the underlying data + * structure used to represent link capability instead of function interfaces. + */ + +#include "link_dp_capability.h" +#include "link_ddc.h" +#include "link_dpcd.h" +#include "link_dp_dpia.h" +#include "link_dp_phy.h" +#include "link_edp_panel_control.h" +#include "link_dp_irq_handler.h" +#include "link/accessories/link_dp_trace.h" +#include "link_dp_training.h" +#include "atomfirmware.h" +#include "resource.h" +#include "link_enc_cfg.h" +#include "dc_dmub_srv.h" +#include "gpio_service_interface.h" + +#define DC_LOGGER \ + link->ctx->logger +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ + +struct dp_lt_fallback_entry { + enum dc_lane_count lane_count; + enum dc_link_rate link_rate; +}; + +static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { + /* This link training fallback array is ordered by + * link bandwidth from highest to lowest. + * DP specs makes it a normative policy to always + * choose the next highest link bandwidth during + * link training fallback. + */ + {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR20}, + {LANE_COUNT_TWO, LINK_RATE_UHBR10}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, + {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_HIGH2}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_HIGH3}, + {LANE_COUNT_FOUR, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH2}, + {LANE_COUNT_TWO, LINK_RATE_HIGH}, + {LANE_COUNT_TWO, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_LOW}, +}; + +static const struct dc_link_settings fail_safe_link_settings = { + .lane_count = LANE_COUNT_ONE, + .link_rate = LINK_RATE_LOW, + .link_spread = LINK_SPREAD_DISABLED, +}; + +bool is_dp_active_dongle(const struct dc_link *link) +{ + return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && + (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); +} + +bool is_dp_branch_device(const struct dc_link *link) +{ + return link->dpcd_caps.is_branch_dev; +} + +static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) +{ + switch (bpc) { + case DOWN_STREAM_MAX_8BPC: + return 8; + case DOWN_STREAM_MAX_10BPC: + return 10; + case DOWN_STREAM_MAX_12BPC: + return 12; + case DOWN_STREAM_MAX_16BPC: + return 16; + default: + break; + } + + return -1; +} + +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ + switch (bw) { + case 0b001: + return 9000000; + case 0b010: + return 18000000; + case 0b011: + return 24000000; + case 0b100: + return 32000000; + case 0b101: + return 40000000; + case 0b110: + return 48000000; + } + + return 0; +} + +static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) +{ + enum dc_link_rate link_rate; + // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. + switch (link_rate_in_khz) { + case 1620000: + link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane + break; + case 2160000: + link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane + break; + case 2430000: + link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane + break; + case 2700000: + link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane + break; + case 3240000: + link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2)- 3.24 Gbps/Lane + break; + case 4320000: + link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane + break; + case 5400000: + link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2)- 5.40 Gbps/Lane + break; + case 8100000: + link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3)- 8.10 Gbps/Lane + break; + default: + link_rate = LINK_RATE_UNKNOWN; + break; + } + return link_rate; +} + +static union dp_cable_id intersect_cable_id( + union dp_cable_id *a, union dp_cable_id *b) +{ + union dp_cable_id out; + + out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, + b->bits.UHBR10_20_CAPABILITY); + out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, + b->bits.UHBR13_5_CAPABILITY); + out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); + + return out; +} + +/* + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( + const uint32_t max_supported_frl_bw_in_kbps, + const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ + uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + + // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) + if (hdmi_encoded_link_bw.bits.FRL_MODE) { + if (hdmi_encoded_link_bw.bits.BW_48Gbps) + supported_bw_in_kbps = 48000000; + else if (hdmi_encoded_link_bw.bits.BW_40Gbps) + supported_bw_in_kbps = 40000000; + else if (hdmi_encoded_link_bw.bits.BW_32Gbps) + supported_bw_in_kbps = 32000000; + else if (hdmi_encoded_link_bw.bits.BW_24Gbps) + supported_bw_in_kbps = 24000000; + else if (hdmi_encoded_link_bw.bits.BW_18Gbps) + supported_bw_in_kbps = 18000000; + else if (hdmi_encoded_link_bw.bits.BW_9Gbps) + supported_bw_in_kbps = 9000000; + } + + return supported_bw_in_kbps; +} + +static enum clock_source_id get_clock_source_id(struct dc_link *link) +{ + enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; + struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; + + if (dp_cs != NULL) { + dp_cs_id = dp_cs->id; + } else { + /* + * dp clock source is not initialized for some reason. + * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used + */ + ASSERT(dp_cs); + } + + return dp_cs_id; +} + +static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, + int length) +{ + int retry = 0; + + if (!link->dpcd_caps.dpcd_rev.raw) { + do { + dc_link_dp_receiver_power_ctrl(link, true); + core_link_read_dpcd(link, DP_DPCD_REV, + dpcd_data, length); + link->dpcd_caps.dpcd_rev.raw = dpcd_data[ + DP_DPCD_REV - + DP_DPCD_REV]; + } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); + } + + if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { + switch (link->dpcd_caps.branch_dev_id) { + /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down + * all internal circuits including AUX communication preventing + * reading DPCD table and EDID (spec violation). + * Encoder will skip DP RX power down on disable_output to + * keep receiver powered all the time.*/ + case DP_BRANCH_DEVICE_ID_0010FA: + case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: + link->wa_flags.dp_keep_receiver_powered = true; + break; + + /* TODO: May need work around for other dongles. */ + default: + link->wa_flags.dp_keep_receiver_powered = false; + break; + } + } else + link->wa_flags.dp_keep_receiver_powered = false; +} + +bool dc_link_is_fec_supported(const struct dc_link *link) +{ + /* TODO - use asic cap instead of link_enc->features + * we no longer know which link enc to use for this link before commit + */ + struct link_encoder *link_enc = NULL; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + return (dc_is_dp_signal(link->connector_signal) && link_enc && + link_enc->features.fec_supported && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); +} + +bool dc_link_should_enable_fec(const struct dc_link *link) +{ + bool force_disable = false; + + if (link->fec_state == dc_link_fec_enabled) + force_disable = false; + else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && + link->local_sink && + link->local_sink->edid_caps.panel_patch.disable_fec) + force_disable = true; + else if (link->connector_signal == SIGNAL_TYPE_EDP + && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. + dsc_support.DSC_SUPPORT == false + || link->panel_config.dsc.disable_dsc_edp + || !link->dc->caps.edp_dsc_support)) + force_disable = true; + + return !force_disable && dc_link_is_fec_supported(link); +} + +bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + /* If this assert is hit then we have a link encoder dynamic management issue */ + ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->link_res.hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +bool dp_is_lttpr_present(struct dc_link *link) +{ + return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + link->dpcd_caps.lttpr_caps.max_lane_count > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && + link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); +} + +/* in DP compliance test, DPR-120 may have + * a random value in its MAX_LINK_BW dpcd field. + * We map it to the maximum supported link rate that + * is smaller than MAX_LINK_BW in this case. + */ +static enum dc_link_rate get_link_rate_from_max_link_bw( + uint8_t max_link_bw) +{ + enum dc_link_rate link_rate; + + if (max_link_bw >= LINK_RATE_HIGH3) { + link_rate = LINK_RATE_HIGH3; + } else if (max_link_bw < LINK_RATE_HIGH3 + && max_link_bw >= LINK_RATE_HIGH2) { + link_rate = LINK_RATE_HIGH2; + } else if (max_link_bw < LINK_RATE_HIGH2 + && max_link_bw >= LINK_RATE_HIGH) { + link_rate = LINK_RATE_HIGH; + } else if (max_link_bw < LINK_RATE_HIGH + && max_link_bw >= LINK_RATE_LOW) { + link_rate = LINK_RATE_LOW; + } else { + link_rate = LINK_RATE_UNKNOWN; + } + + return link_rate; +} + +static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) + lttpr_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) + lttpr_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) + lttpr_max_link_rate = LINK_RATE_UHBR10; + + return lttpr_max_link_rate; +} + +static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; + + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + cable_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + cable_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) + cable_max_link_rate = LINK_RATE_UHBR10; + + return cable_max_link_rate; +} + +static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) +{ + return lane_count <= LANE_COUNT_ONE; +} + +static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) +{ + return link_rate <= LINK_RATE_LOW; +} + +static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) +{ + switch (lane_count) { + case LANE_COUNT_FOUR: + return LANE_COUNT_TWO; + case LANE_COUNT_TWO: + return LANE_COUNT_ONE; + case LANE_COUNT_ONE: + return LANE_COUNT_UNKNOWN; + default: + return LANE_COUNT_UNKNOWN; + } +} + +static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_UHBR20: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_HIGH3; + case LINK_RATE_HIGH3: + return LINK_RATE_HIGH2; + case LINK_RATE_HIGH2: + return LINK_RATE_HIGH; + case LINK_RATE_HIGH: + return LINK_RATE_LOW; + case LINK_RATE_LOW: + return LINK_RATE_UNKNOWN; + default: + return LINK_RATE_UNKNOWN; + } +} + +static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) +{ + switch (lane_count) { + case LANE_COUNT_ONE: + return LANE_COUNT_TWO; + case LANE_COUNT_TWO: + return LANE_COUNT_FOUR; + default: + return LANE_COUNT_UNKNOWN; + } +} + +static enum dc_link_rate increase_link_rate(struct dc_link *link, + enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_LOW: + return LINK_RATE_HIGH; + case LINK_RATE_HIGH: + return LINK_RATE_HIGH2; + case LINK_RATE_HIGH2: + return LINK_RATE_HIGH3; + case LINK_RATE_HIGH3: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + /* upto DP2.x specs UHBR13.5 is the only link rate that could be + * not supported by DPRX when higher link rate is supported. + * so we treat it as a special case for code simplicity. When we + * have new specs with more link rates like this, we should + * consider a more generic solution to handle discrete link + * rate capabilities. + */ + return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? + LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR20; + default: + return LINK_RATE_UNKNOWN; + } +} + +static bool decide_fallback_link_setting_max_bw_policy( + struct dc_link *link, + const struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result) +{ + uint8_t cur_idx = 0, next_idx; + bool found = false; + + if (training_result == LINK_TRAINING_ABORT) + return false; + + while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find current index */ + if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && + dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) + break; + else + cur_idx++; + + next_idx = cur_idx + 1; + + while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find next index */ + if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || + dp_lt_fallbacks[next_idx].link_rate > max->link_rate) + next_idx++; + else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && + link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) + /* upto DP2.x specs UHBR13.5 is the only link rate that + * could be not supported by DPRX when higher link rate + * is supported. so we treat it as a special case for + * code simplicity. When we have new specs with more + * link rates like this, we should consider a more + * generic solution to handle discrete link rate + * capabilities. + */ + next_idx++; + else + break; + + if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { + cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; + cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; + found = true; + } + + return found; +} + +/* + * function: set link rate and lane count fallback based + * on current link setting and last link training result + * return value: + * true - link setting could be set + * false - has reached minimum setting + * and no further fallback could be done + */ +bool decide_fallback_link_setting( + struct dc_link *link, + struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result) +{ + if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING || + link->dc->debug.force_dp2_lt_fallback_method) + return decide_fallback_link_setting_max_bw_policy(link, max, + cur, training_result); + + switch (training_result) { + case LINK_TRAINING_CR_FAIL_LANE0: + case LINK_TRAINING_CR_FAIL_LANE1: + case LINK_TRAINING_CR_FAIL_LANE23: + case LINK_TRAINING_LQA_FAIL: + { + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; + if (training_result == LINK_TRAINING_CR_FAIL_LANE0) + return false; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; + else + cur->lane_count = reduce_lane_count(cur->lane_count); + } else { + return false; + } + break; + } + case LINK_TRAINING_EQ_FAIL_EQ: + case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: + { + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; + } else { + return false; + } + break; + } + case LINK_TRAINING_EQ_FAIL_CR: + { + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; + } else { + return false; + } + break; + } + default: + return false; + } + return true; +} +static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ + struct dc_link_settings initial_link_setting = { + LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; + struct dc_link_settings current_link_setting = + initial_link_setting; + uint32_t link_bw; + + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + link->verified_link_cap.link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + + return false; +} + +bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0) { + *link_setting = link->verified_link_cap; + return true; + } + + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + link->verified_link_cap.link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + return false; +} + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + unsigned int policy = 0; + + policy = link->panel_config.dsc.force_dsc_edp_policy; + if (max_link_rate == LINK_RATE_UNKNOWN) + max_link_rate = link->verified_link_cap.link_rate; + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0)) { + /* for DSC enabled case, we search for minimum lane count */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = LINK_RATE_LOW; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = false; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + } else { + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate = initial_link_setting.link_rate; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + } + return false; + } + + /* if optimize edp link is supported */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate_set < + link->dpcd_caps.edp_supported_link_rates_count + && current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else { + if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + } + return false; +} + +static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) +{ + *link_setting = link->verified_link_cap; + return true; +} + +bool link_decide_link_settings(struct dc_stream_state *stream, + struct dc_link_settings *link_setting) +{ + struct dc_link *link = stream->link; + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + + memset(link_setting, 0, sizeof(*link_setting)); + + /* if preferred is specified through AMDDP, use it, if it's enough + * to drive the mode + */ + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + *link_setting = link->preferred_link_setting; + return true; + } + + /* MST doesn't perform link training for now + * TODO: add MST specific link training routine + */ + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + decide_mst_link_settings(link, link_setting); + } else if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { + enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + + if (link->panel_config.dsc.force_dsc_edp_policy) { + /* calculate link max link rate cap*/ + struct dc_link_settings tmp_link_setting; + struct dc_crtc_timing tmp_timing = stream->timing; + uint32_t orig_req_bw; + + tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; + tmp_timing.flags.DSC = 0; + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + dc_link_decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); + max_link_rate = tmp_link_setting.link_rate; + } + decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); + } else { + dc_link_decide_edp_link_settings(link, link_setting, req_bw); + } + } else { + decide_dp_link_settings(link, link_setting, req_bw); + } + + return link_setting->lane_count != LANE_COUNT_UNKNOWN && + link_setting->link_rate != LINK_RATE_UNKNOWN; +} + +enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings) +{ + if ((link_settings->link_rate >= LINK_RATE_LOW) && + (link_settings->link_rate <= LINK_RATE_HIGH3)) + return DP_8b_10b_ENCODING; + else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && + (link_settings->link_rate <= LINK_RATE_UHBR20)) + return DP_128b_132b_ENCODING; + return DP_UNKNOWN_ENCODING; +} + +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + + if (!dc_is_dp_signal(link->connector_signal)) + return DP_UNKNOWN_ENCODING; + + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + link_settings = link->preferred_link_setting; + } else { + decide_mst_link_settings(link, &link_settings); + } + + return link_dp_get_encoding_format(&link_settings); +} + +static void read_dp_device_vendor_id(struct dc_link *link) +{ + struct dp_device_vendor_id dp_id; + + /* read IEEE branch device id */ + core_link_read_dpcd( + link, + DP_BRANCH_OUI, + (uint8_t *)&dp_id, + sizeof(dp_id)); + + link->dpcd_caps.branch_dev_id = + (dp_id.ieee_oui[0] << 16) + + (dp_id.ieee_oui[1] << 8) + + dp_id.ieee_oui[2]; + + memmove( + link->dpcd_caps.branch_dev_name, + dp_id.ieee_device_id, + sizeof(dp_id.ieee_device_id)); +} + +static enum dc_status wake_up_aux_channel(struct dc_link *link) +{ + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t aux_channel_retry_cnt = 0; + uint8_t dpcd_power_state = '\0'; + + while (status != DC_OK && aux_channel_retry_cnt < 10) { + status = core_link_read_dpcd(link, DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + /* Delay 1 ms if AUX CH is in power down state. Based on spec + * section 2.3.1.2, if AUX CH may be powered down due to + * write to DPCD 600h = 2. Sink AUX CH is monitoring differential + * signal and may need up to 1 ms before being able to reply. + */ + if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { + udelay(1000); + aux_channel_retry_cnt++; + } + } + + if (status != DC_OK) { + dpcd_power_state = DP_SET_POWER_D0; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_power_state, + sizeof(dpcd_power_state)); + + dpcd_power_state = DP_SET_POWER_D3; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_power_state, + sizeof(dpcd_power_state)); + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +static void get_active_converter_info( + uint8_t data, struct dc_link *link) +{ + union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); + + /* decode converter info*/ + if (!ds_port.fields.PORT_PRESENT) { + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + set_dongle_type(link->ddc, + link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; + return; + } + + /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + + switch (ds_port.fields.PORT_TYPE) { + case DOWNSTREAM_VGA: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; + break; + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, + * assume DVI.*/ + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; + break; + default: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; + } + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { + uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ + union dwnstream_port_caps_byte0 *port_caps = + (union dwnstream_port_caps_byte0 *)det_caps; + if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, + det_caps, sizeof(det_caps)) == DC_OK) { + + switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; + case DOWN_STREAM_DETAILED_VGA: + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_VGA_CONVERTER; + break; + case DOWN_STREAM_DETAILED_DVI: + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_DVI_CONVERTER; + break; + case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_HDMI_CONVERTER; + + link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; + if (ds_port.fields.DETAILED_CAPS) { + + union dwnstream_port_caps_byte3_hdmi + hdmi_caps = {.raw = det_caps[3] }; + union dwnstream_port_caps_byte2 + hdmi_color_caps = {.raw = det_caps[2] }; + link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = + det_caps[1] * 2500; + + link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = + hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } + + link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = + translate_dpcd_max_bpc( + hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); + + if (link->dc->caps.dp_hdmi21_pcon_support) { + union hdmi_encoded_link_bw hdmi_encoded_link_bw; + + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = + dc_link_bw_kbps_from_raw_frl_link_rate_data( + hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + + // Intersect reported max link bw support with the supported link rate post FRL link training + if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, + &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, + hdmi_encoded_link_bw); + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } + + break; + } + } + } + + set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + + { + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + + core_link_read_dpcd( + link, + DP_BRANCH_REVISION_START, + (uint8_t *)&dp_hw_fw_revision, + sizeof(dp_hw_fw_revision)); + + link->dpcd_caps.branch_hw_revision = + dp_hw_fw_revision.ieee_hw_rev; + + memmove( + link->dpcd_caps.branch_fw_revision, + dp_hw_fw_revision.ieee_fw_rev, + sizeof(dp_hw_fw_revision.ieee_fw_rev)); + } + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + union dp_dfp_cap_ext dfp_cap_ext; + memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); + core_link_read_dpcd( + link, + DP_DFP_CAPABILITY_EXTENSION_SUPPORT, + dfp_cap_ext.raw, + sizeof(dfp_cap_ext.raw)); + link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = + dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + + (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = + dfp_cap_ext.fields.max_video_h_active_width[0] + + (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = + dfp_cap_ext.fields.max_video_v_active_height[0] + + (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = + dfp_cap_ext.fields.encoding_format_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = + dfp_cap_ext.fields.rgb_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = + dfp_cap_ext.fields.ycbcr444_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = + dfp_cap_ext.fields.ycbcr422_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = + dfp_cap_ext.fields.ycbcr420_color_depth_caps; + DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); + DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); + DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); + DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); + DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); + } +} + +static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, + struct dc_link_settings *link_settings) +{ + /* Temporary Renoir-specific workaround PHY will sometimes be in bad + * state on hotplugging display from certain USB-C dongle, so add extra + * cycle of enabling and disabling the PHY before first link training. + */ + struct link_resource link_res = {0}; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + + dp_enable_link_phy(link, &link_res, link->connector_signal, + dp_cs_id, link_settings); + dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +static bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ + uint8_t dpcd_data[16]; + uint32_t read_dpcd_retry_cnt = 3; + enum dc_status status = DC_ERROR_UNEXPECTED; + union dp_downstream_port_present ds_port = { 0 }; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + + int i; + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + get_active_converter_info(ds_port.byte, link); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = dpcd_data[ + DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + + return true; +} + +void dc_link_overwrite_extended_receiver_cap( + struct dc_link *link) +{ + dp_overwrite_extended_receiver_cap(link); +} + +void dpcd_set_source_specific_data(struct dc_link *link) +{ + if (!link->dc->vendor_signature.is_valid) { + enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED; + struct dpcd_amd_signature amd_signature = {0}; + struct dpcd_amd_device_id amd_device_id = {0}; + + amd_device_id.device_id_byte1 = + (uint8_t)(link->ctx->asic_id.chip_id); + amd_device_id.device_id_byte2 = + (uint8_t)(link->ctx->asic_id.chip_id >> 8); + amd_device_id.dce_version = + (uint8_t)(link->ctx->dce_version); + amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? + amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? + + core_link_read_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + + if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && + (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && + (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { + + amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; + + core_link_write_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + } + + core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, + (uint8_t *)(&amd_device_id), + sizeof(amd_device_id)); + + if (link->ctx->dce_version >= DCN_VERSION_2_0 && + link->dc->caps.min_horizontal_blanking_period != 0) { + + uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; + + result_write_min_hblank = core_link_write_dpcd(link, + DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), + sizeof(hblank_size)); + } + DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, + WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, + "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", + result_write_min_hblank, + link->link_index, + link->ctx->dce_version, + DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, + link->dc->caps.min_horizontal_blanking_period, + link->dpcd_caps.branch_dev_id, + link->dpcd_caps.branch_dev_name[0], + link->dpcd_caps.branch_dev_name[1], + link->dpcd_caps.branch_dev_name[2], + link->dpcd_caps.branch_dev_name[3], + link->dpcd_caps.branch_dev_name[4], + link->dpcd_caps.branch_dev_name[5]); + } else { + core_link_write_dpcd(link, DP_SOURCE_OUI, + link->dc->vendor_signature.data.raw, + sizeof(link->dc->vendor_signature.data.raw)); + } +} + +void dpcd_write_cable_id_to_dprx(struct dc_link *link) +{ + if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || + link->dpcd_caps.cable_id.raw == 0 || + link->dprx_states.cable_id_written) + return; + + core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, + &link->dpcd_caps.cable_id.raw, + sizeof(link->dpcd_caps.cable_id.raw)); + + link->dprx_states.cable_id_written = 1; +} + +static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) +{ + union dmub_rb_cmd cmd; + + if (!link->ctx->dmub_srv || + link->ep_type != DISPLAY_ENDPOINT_PHY || + link->link_enc->features.flags.bits.DP_IS_USB_C == 0) + return false; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; + cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); + cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( + link->dc, link->link_enc->transmitter); + if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && + cmd.cable_id.header.ret_status == 1) { + cable_id->raw = cmd.cable_id.data.output_raw; + DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); + } + return cmd.cable_id.header.ret_status == 1; +} + +static void retrieve_cable_id(struct dc_link *link) +{ + union dp_cable_id usbc_cable_id; + + link->dpcd_caps.cable_id.raw = 0; + core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, + &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); + + if (get_usbc_cable_id(link, &usbc_cable_id)) + link->dpcd_caps.cable_id = intersect_cable_id( + &link->dpcd_caps.cable_id, &usbc_cable_id); +} + +bool read_is_mst_supported(struct dc_link *link) +{ + bool mst = false; + enum dc_status st = DC_OK; + union dpcd_rev rev; + union mstm_cap cap; + + if (link->preferred_training_settings.mst_enable && + *link->preferred_training_settings.mst_enable == false) { + return false; + } + + rev.raw = 0; + cap.raw = 0; + + st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, + sizeof(rev)); + + if (st == DC_OK && rev.raw >= DPCD_REV_12) { + + st = core_link_read_dpcd(link, DP_MSTM_CAP, + &cap.raw, sizeof(cap)); + if (st == DC_OK && cap.bits.MST_CAP == 1) + mst = true; + } + return mst; + +} + +/* Read additional sink caps defined in source specific DPCD area + * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) + * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well + */ +static bool dpcd_read_sink_ext_caps(struct dc_link *link) +{ + uint8_t dpcd_data; + + if (!link) + return false; + + if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) + return false; + + link->dpcd_sink_ext_caps.raw = dpcd_data; + return true; +} + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) +{ + uint8_t lttpr_dpcd_data[8]; + enum dc_status status; + bool is_lttpr_present; + + /* Logic to determine LTTPR support*/ + bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; + + if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) + return DC_NOT_SUPPORTED; + + /* By reading LTTPR capability, RX assumes that we will enable + * LTTPR extended aux timeout if LTTPR is present. + */ + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + lttpr_dpcd_data, + sizeof(lttpr_dpcd_data)); + + link->dpcd_caps.lttpr_caps.revision.raw = + lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_link_rate = + lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = + lttpr_dpcd_data[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_lane_count = + lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.mode = + lttpr_dpcd_data[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_ext_timeout = + lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = + lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = + lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + /* If this chip cap is set, at least one retimer must exist in the chain + * Override count to 1 if we receive a known bad count (0 or an invalid value) */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { + ASSERT(0); + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; + DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + } + + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ + is_lttpr_present = dp_is_lttpr_present(link); + + if (is_lttpr_present) + CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); + + DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); + return status; +} + +static bool retrieve_link_cap(struct dc_link *link) +{ + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. + */ + uint8_t dpcd_dprx_data = '\0'; + + struct dp_device_vendor_id sink_id; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + union dp_downstream_port_present ds_port = { 0 }; + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t read_dpcd_retry_cnt = 3; + int i; + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + const uint32_t post_oui_delay = 30; // 30ms + + memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(&down_strm_port_count, + '\0', sizeof(union down_stream_port_count)); + memset(&edp_config_cap, '\0', + sizeof(union edp_configuration_cap)); + + /* if extended timeout is supported in hardware, + * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer + * CTS 4.2.1.1 regression introduced by CTS specs requirement update. + */ + try_to_configure_aux_timeout(link->ddc, + LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + + status = dp_retrieve_lttpr_cap(link); + + if (status != DC_OK) { + status = wake_up_aux_channel(link); + if (status == DC_OK) + dp_retrieve_lttpr_cap(link); + else + return false; + } + + if (dp_is_lttpr_present(link)) + configure_lttpr_mode_transparent(link); + + /* Read DP tunneling information. */ + status = dpcd_get_tunneling_device_data(link); + + dpcd_set_source_specific_data(link); + /* Sink may need to configure internals based on vendor, so allow some + * time before proceeding with possibly vendor specific transactions + */ + msleep(post_oui_delay); + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + + if (status != DC_OK) { + dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); + return false; + } + + if (!dp_is_lttpr_present(link)) + try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + + + { + union training_aux_rd_interval aux_rd_interval; + + aux_rd_interval.raw = + dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; + + link->dpcd_caps.ext_receiver_cap_field_present = + aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; + + if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { + uint8_t ext_cap_data[16]; + + memset(ext_cap_data, '\0', sizeof(ext_cap_data)); + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DP13_DPCD_REV, + ext_cap_data, + sizeof(ext_cap_data)); + if (status == DC_OK) { + memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); + break; + } + } + if (status != DC_OK) + dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); + } + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (link->dpcd_caps.ext_receiver_cap_field_present) { + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPRX_FEATURE_ENUMERATION_LIST, + &dpcd_dprx_data, + sizeof(dpcd_dprx_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; + + if (status != DC_OK) + dm_error("%s: Read DPRX caps data failed.\n", __func__); + + /* AdaptiveSyncCapability */ + dpcd_dprx_data = 0; + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, + &dpcd_dprx_data, sizeof(dpcd_dprx_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data; + + if (status != DC_OK) + dm_error("%s: Read DPRX caps data failed. Addr:%#x\n", + __func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1); + } + + else { + link->dpcd_caps.dprx_feature.raw = 0; + } + + + /* Error condition checking... + * It is impossible for Sink to report Max Lane Count = 0. + * It is possible for Sink to report Max Link Rate = 0, if it is + * an eDP device that is reporting specialized link rates in the + * SUPPORTED_LINK_RATE table. + */ + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + read_dp_device_vendor_id(link); + + /* TODO - decouple raw mst capability from policy decision */ + link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); + + get_active_converter_info(ds_port.byte, link); + + dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( + dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + link->dpcd_caps.channel_coding_cap.raw = + dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; + link->test_pattern_enabled = false; + link->compliance_test_state.raw = 0; + + /* read sink count */ + core_link_read_dpcd(link, + DP_SINK_COUNT, + &link->dpcd_caps.sink_count.raw, + sizeof(link->dpcd_caps.sink_count.raw)); + + /* read sink ieee oui */ + core_link_read_dpcd(link, + DP_SINK_OUI, + (uint8_t *)(&sink_id), + sizeof(sink_id)); + + link->dpcd_caps.sink_dev_id = + (sink_id.ieee_oui[0] << 16) + + (sink_id.ieee_oui[1] << 8) + + (sink_id.ieee_oui[2]); + + memmove( + link->dpcd_caps.sink_dev_id_str, + sink_id.ieee_device_id, + sizeof(sink_id.ieee_device_id)); + + core_link_read_dpcd( + link, + DP_SINK_HW_REVISION_START, + (uint8_t *)&dp_hw_fw_revision, + sizeof(dp_hw_fw_revision)); + + link->dpcd_caps.sink_hw_revision = + dp_hw_fw_revision.ieee_hw_rev; + + memmove( + link->dpcd_caps.sink_fw_revision, + dp_hw_fw_revision.ieee_fw_rev, + sizeof(dp_hw_fw_revision.ieee_fw_rev)); + + /* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; + uint8_t fwrev_mbp_2018[] = { 7, 4 }; + uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + + /* We also check for the firmware revision as 16,1 models have an + * identical device id and are incorrectly quirked otherwise. + */ + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, + sizeof(str_mbp_2018)) && + (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, + sizeof(fwrev_mbp_2018)) || + !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, + sizeof(fwrev_mbp_2018_vega)))) { + link->reported_link_cap.link_rate = LINK_RATE_RBR2; + } + } + + memset(&link->dpcd_caps.dsc_caps, '\0', + sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { + status = core_link_read_dpcd( + link, + DP_FEC_CAPABILITY, + &link->dpcd_caps.fec_cap.raw, + sizeof(link->dpcd_caps.fec_cap.raw)); + status = core_link_read_dpcd( + link, + DP_DSC_SUPPORT, + link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); + if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + status = core_link_read_dpcd( + link, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); + DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); + } + + /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode + * only if required. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && + link->dpcd_caps.is_branch_dev && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && + (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { + /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. + * Clear FEC and DSC capabilities as a work around if that is not the case. + */ + link->wa_flags.dpia_forced_tbt3_mode = true; + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); + } else + link->wa_flags.dpia_forced_tbt3_mode = false; + } + + if (!dpcd_read_sink_ext_caps(link)) + link->dpcd_sink_ext_caps.raw = 0; + + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); + + core_link_read_dpcd(link, + DP_128B132B_SUPPORTED_LINK_RATES, + &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, + sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); + if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) + link->reported_link_cap.link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) + link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) + link->reported_link_cap.link_rate = LINK_RATE_UHBR10; + else + dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); + DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); + DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", + link->reported_link_cap.link_rate / 100, + link->reported_link_cap.link_rate % 100); + + core_link_read_dpcd(link, + DP_SINK_VIDEO_FALLBACK_FORMATS, + &link->dpcd_caps.fallback_formats.raw, + sizeof(link->dpcd_caps.fallback_formats.raw)); + DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); + if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) + DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) + DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) + DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.raw == 0) { + DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); + link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; + } + + core_link_read_dpcd(link, + DP_FEC_CAPABILITY_1, + &link->dpcd_caps.fec_cap1.raw, + sizeof(link->dpcd_caps.fec_cap1.raw)); + DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); + if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) + DC_LOG_DP2("\tFEC aggregated error counters are supported"); + } + + retrieve_cable_id(link); + dpcd_write_cable_id_to_dprx(link); + + /* Connectivity log: detection */ + CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); + + return true; +} + +bool detect_dp_sink_caps(struct dc_link *link) +{ + return retrieve_link_cap(link); +} + +void detect_edp_sink_caps(struct dc_link *link) +{ + uint8_t supported_link_rates[16]; + uint32_t entry; + uint32_t link_rate_in_khz; + enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; + uint8_t backlight_adj_cap; + uint8_t general_edp_cap; + + retrieve_link_cap(link); + link->dpcd_caps.edp_supported_link_rates_count = 0; + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && + (link->panel_config.ilr.optimize_edp_link_rate || + link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { + // Read DPCD 00010h - 0001Fh 16 bytes at one shot + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + for (entry = 0; entry < 16; entry += 2) { + // DPCD register reports per-lane link rate = 16-bit link rate capability + // value X 200 kHz. Need multiplier to find link rate in kHz. + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + + if (link_rate_in_khz != 0) { + link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); + link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; + link->dpcd_caps.edp_supported_link_rates_count++; + + if (link->reported_link_cap.link_rate < link_rate) + link->reported_link_cap.link_rate = link_rate; + } + } + } + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, + &backlight_adj_cap, sizeof(backlight_adj_cap)); + + link->dpcd_caps.dynamic_backlight_capable_edp = + (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; + + core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, + &general_edp_cap, sizeof(general_edp_cap)); + + link->dpcd_caps.set_power_state_capable_edp = + (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; + + set_default_brightness_aux(link); + + core_link_read_dpcd(link, DP_EDP_DPCD_REV, + &link->dpcd_caps.edp_rev, + sizeof(link->dpcd_caps.edp_rev)); + /* + * PSR is only valid for eDP v1.3 or higher. + */ + if (link->dpcd_caps.edp_rev >= DP_EDP_13) { + core_link_read_dpcd(link, DP_PSR_SUPPORT, + &link->dpcd_caps.psr_info.psr_version, + sizeof(link->dpcd_caps.psr_info.psr_version)); + if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) + core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, + &link->dpcd_caps.psr_info.force_psrsu_cap, + sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); + core_link_read_dpcd(link, DP_PSR_CAPS, + &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); + if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { + core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, + &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, + sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); + } + } + + /* + * ALPM is only valid for eDP v1.4 or higher. + */ + if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) + core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, + &link->dpcd_caps.alpm_caps.raw, + sizeof(link->dpcd_caps.alpm_caps.raw)); +} + +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) +{ + struct link_encoder *link_enc = NULL; + + if (!max_link_enc_cap) { + DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); + return false; + } + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (link_enc && link_enc->funcs->get_max_link_cap) { + link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); + return true; + } + + DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); + max_link_enc_cap->lane_count = 1; + max_link_enc_cap->link_rate = 6; + return false; +} + +const struct dc_link_settings *dc_link_get_link_cap( + const struct dc_link *link) +{ + if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) + return &link->preferred_link_setting; + return &link->verified_link_cap; +} + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + enum dc_link_rate lttpr_max_link_rate; + enum dc_link_rate cable_max_link_rate; + struct link_encoder *link_enc = NULL; + + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + /* get max link encoder capability */ + if (link_enc) + link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); + + /* Lower link settings based on sink's link cap */ + if (link->reported_link_cap.lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = + link->reported_link_cap.lane_count; + if (link->reported_link_cap.link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = + link->reported_link_cap.link_rate; + if (link->reported_link_cap.link_spread < + max_link_cap.link_spread) + max_link_cap.link_spread = + link->reported_link_cap.link_spread; + + /* Lower link settings based on cable attributes + * Cable ID is a DP2 feature to identify max certified link rate that + * a cable can carry. The cable identification method requires both + * cable and display hardware support. Since the specs comes late, it is + * anticipated that the first round of DP2 cables and displays may not + * be fully compatible to reliably return cable ID data. Therefore the + * decision of our cable id policy is that if the cable can return non + * zero cable id data, we will take cable's link rate capability into + * account. However if we get zero data, the cable link rate capability + * is considered inconclusive. In this case, we will not take cable's + * capability into account to avoid of over limiting hardware capability + * from users. The max overall link rate capability is still determined + * after actual dp pre-training. Cable id is considered as an auxiliary + * method of determining max link bandwidth capability. + */ + cable_max_link_rate = get_cable_max_link_rate(link); + + if (!link->dc->debug.ignore_cable_id && + cable_max_link_rate != LINK_RATE_UNKNOWN && + cable_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = cable_max_link_rate; + + /* account for lttpr repeaters cap + * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). + */ + if (dp_is_lttpr_present(link)) { + if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + lttpr_max_link_rate = get_lttpr_max_link_rate(link); + + if (lttpr_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = lttpr_max_link_rate; + + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", + __func__, + max_link_cap.lane_count, + max_link_cap.link_rate); + } + + if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && + link->dc->debug.disable_uhbr) + max_link_cap.link_rate = LINK_RATE_HIGH3; + + return max_link_cap; +} + +static bool dp_verify_link_cap( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int *fail_count) +{ + struct dc_link_settings cur_link_settings = {0}; + struct dc_link_settings max_link_settings = *known_limit_link_setting; + bool success = false; + bool skip_video_pattern; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + enum link_training_result status = LINK_TRAINING_SUCCESS; + union hpd_irq_data irq_data; + struct link_resource link_res; + + memset(&irq_data, 0, sizeof(irq_data)); + cur_link_settings = max_link_settings; + + /* Grant extended timeout request */ + if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { + uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + + core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); + } + + do { + if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) + continue; + + skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; + dp_enable_link_phy( + link, + &link_res, + link->connector_signal, + dp_cs_id, + &cur_link_settings); + + status = dp_perform_link_training( + link, + &link_res, + &cur_link_settings, + skip_video_pattern); + + if (status == LINK_TRAINING_SUCCESS) { + success = true; + udelay(1000); + if (dc_link_dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK && + dc_link_check_link_loss_status( + link, + &irq_data)) + (*fail_count)++; + + } else { + (*fail_count)++; + } + dp_trace_lt_total_count_increment(link, true); + dp_trace_lt_result_update(link, status, true); + dp_disable_link_phy(link, &link_res, link->connector_signal); + } while (!success && decide_fallback_link_setting(link, + &max_link_settings, &cur_link_settings, status)); + + link->verified_link_cap = success ? + cur_link_settings : fail_safe_link_settings; + return success; +} + +bool dp_verify_link_cap_with_retries( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int attempts) +{ + int i = 0; + bool success = false; + int fail_count = 0; + + dp_trace_detect_lt_init(link); + + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && + link->dc->debug.usbc_combo_phy_reset_wa) + apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); + + dp_trace_set_lt_start_timestamp(link, false); + for (i = 0; i < attempts; i++) { + enum dc_connection_type type = dc_connection_none; + + memset(&link->verified_link_cap, 0, + sizeof(struct dc_link_settings)); + if (!dc_link_detect_connection_type(link, &type) || type == dc_connection_none) { + link->verified_link_cap = fail_safe_link_settings; + break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, + &fail_count) && fail_count == 0) { + success = true; + break; + } + msleep(10); + } + + dp_trace_lt_fail_count_update(link, fail_count, true); + dp_trace_set_lt_end_timestamp(link, true); + + return success; +} + +/** + * dc_link_is_dp_sink_present() - Check if there is a native DP + * or passive DP-HDMI dongle connected + */ +bool dc_link_is_dp_sink_present(struct dc_link *link) +{ + enum gpio_result gpio_result; + uint32_t clock_pin = 0; + uint8_t retry = 0; + struct ddc *ddc; + + enum connector_id connector_id = + dal_graphics_object_id_get_connector_id(link->link_id); + + bool present = + ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || + (connector_id == CONNECTOR_ID_EDP) || + (connector_id == CONNECTOR_ID_USBC)); + + ddc = get_ddc_pin(link->ddc); + + if (!ddc) { + BREAK_TO_DEBUGGER(); + return present; + } + + /* Open GPIO and set it to I2C mode */ + /* Note: this GpioMode_Input will be converted + * to GpioConfigType_I2cAuxDualMode in GPIO component, + * which indicates we need additional delay + */ + + if (dal_ddc_open(ddc, GPIO_MODE_INPUT, + GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { + dal_ddc_close(ddc); + + return present; + } + + /* + * Read GPIO: DP sink is present if both clock and data pins are zero + * + * [W/A] plug-unplug DP cable, sometimes customer board has + * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI + * then monitor can't br light up. Add retry 3 times + * But in real passive dongle, it need additional 3ms to detect + */ + do { + gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); + ASSERT(gpio_result == GPIO_RESULT_OK); + if (clock_pin) + udelay(1000); + else + break; + } while (retry++ < 3); + + present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; + + dal_ddc_close(ddc); + + return present; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h new file mode 100644 index 00000000000000..f79e4a4a9db628 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_CAPABILITY_H__ +#define __DC_LINK_DP_CAPABILITY_H__ + +#include "link.h" + +bool detect_dp_sink_caps(struct dc_link *link); + +void detect_edp_sink_caps(struct dc_link *link); + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); + + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); + +/* Convert PHY repeater count read from DPCD uint8_t. */ +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count); + +bool dp_is_lttpr_present(struct dc_link *link); + +bool is_dp_active_dongle(const struct dc_link *link); + +bool is_dp_branch_device(const struct dc_link *link); + +void dpcd_write_cable_id_to_dprx(struct dc_link *link); + +/* Initialize output parameter lt_settings. */ +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings); + + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate); + +void dpcd_set_source_specific_data(struct dc_link *link); + +/*query dpcd for version and mst cap addresses*/ +bool read_is_mst_supported(struct dc_link *link); + +bool decide_fallback_link_setting( + struct dc_link *link, + struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result); + +bool dp_verify_link_cap_with_retries( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int attempts); + +#endif /* __DC_LINK_DP_CAPABILITY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c new file mode 100644 index 00000000000000..32f48a48e9ddef --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "inc/core_status.h" +#include "dc_link.h" +#include "dpcd_defs.h" + +#include "link_dp_dpia.h" +#include "link_hwss.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ + link->ctx->logger + +/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ +/* DPCD DP Tunneling over USB4 */ +#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d +#define DP_IN_ADAPTER_INFO 0xe000e +#define DP_USB4_DRIVER_ID 0xe000f +#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b + +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) +{ + enum dc_status status = DC_OK; + uint8_t dpcd_dp_tun_data[3] = {0}; + uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; + uint8_t i = 0; + + status = core_link_read_dpcd( + link, + DP_TUNNELING_CAPABILITIES_SUPPORT, + dpcd_dp_tun_data, + sizeof(dpcd_dp_tun_data)); + + status = core_link_read_dpcd( + link, + DP_USB4_ROUTER_TOPOLOGY_ID, + dpcd_topology_data, + sizeof(dpcd_topology_data)); + + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = + dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = + dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) + link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; + + return status; +} + +bool dc_link_dpia_query_hpd_status(struct dc_link *link) +{ + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; + bool is_hpd_high = false; + + /* prepare QUERY_HPD command */ + cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; + cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; + cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; + + /* Return HPD status reported by DMUB if query successfully executed. */ + if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) + is_hpd_high = cmd.query_hpd.data.result; + + DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + cmd.query_hpd.data.status, + cmd.query_hpd.data.result); + + return is_hpd_high; +} + diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h similarity index 52% rename from drivers/gpu/drm/tdfx/tdfx_drv.h rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h index 84204ec1b046b9..98935cc10bb781 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -1,9 +1,6 @@ -/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*- - * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com - */ +/* SPDX-License-Identifier: MIT */ /* - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. + * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -12,36 +9,35 @@ * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: - * Gareth Hughes + * Authors: AMD + * */ -#ifndef __TDFX_H__ -#define __TDFX_H__ +#ifndef __DC_LINK_DPIA_H__ +#define __DC_LINK_DPIA_H__ -/* General customization: - */ +#include "link.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." +/* Read tunneling device capability from DPCD and update link capability + * accordingly. + */ +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); -#define DRIVER_NAME "tdfx" -#define DRIVER_DESC "3dfx Banshee/Voodoo3+" -#define DRIVER_DATE "20010216" +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dc_link_dpia_query_hpd_status(struct dc_link *link); -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 -#endif +#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c new file mode 100644 index 00000000000000..f69e681b3b5bfc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -0,0 +1,441 @@ + +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +/*********************************************************************/ +// USB4 DPIA BANDWIDTH ALLOCATION LOGIC +/*********************************************************************/ +#include "dc.h" +#include "dc_link.h" +#include "link_dp_dpia_bw.h" +#include "drm_dp_helper_dc.h" +#include "link_dpcd.h" + +#define Kbps_TO_Gbps (1000 * 1000) + +// ------------------------------------------------------------------ +// PRIVATE FUNCTIONS +// ------------------------------------------------------------------ +/* + * Always Check the following: + * - Is it USB4 link? + * - Is HPD HIGH? + * - Is BW Allocation Support Mode enabled on DP-Tx? + */ +static bool get_bw_alloc_proceed_flag(struct dc_link *tmp) +{ + return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type + && tmp->hpd_status + && tmp->dpia_bw_alloc_config.bw_alloc_enabled); +} +static void reset_bw_alloc_struct(struct dc_link *link) +{ + link->dpia_bw_alloc_config.bw_alloc_enabled = false; + link->dpia_bw_alloc_config.sink_verified_bw = 0; + link->dpia_bw_alloc_config.sink_max_bw = 0; + link->dpia_bw_alloc_config.estimated_bw = 0; + link->dpia_bw_alloc_config.bw_granularity = 0; + link->dpia_bw_alloc_config.response_ready = false; +} +static uint8_t get_bw_granularity(struct dc_link *link) +{ + uint8_t bw_granularity = 0; + + core_link_read_dpcd( + link, + DP_BW_GRANULALITY, + &bw_granularity, + sizeof(uint8_t)); + + switch (bw_granularity & 0x3) { + case 0: + bw_granularity = 4; + break; + case 1: + default: + bw_granularity = 2; + break; + } + + return bw_granularity; +} +static int get_estimated_bw(struct dc_link *link) +{ + uint8_t bw_estimated_bw = 0; + + if (core_link_read_dpcd( + link, + ESTIMATED_BW, + &bw_estimated_bw, + sizeof(uint8_t)) != DC_OK) + dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, ESTIMATED_BW); + + return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); +} +static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link) +{ + if (bw_needed > 0) + *stream_allocated_bw += bw_needed; + + return true; +} +static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link) +{ + bool ret = false; + + if (*stream_allocated_bw > 0) { + *stream_allocated_bw -= bw_to_dealloc; + ret = true; + } else { + //Do nothing for now + ret = true; + } + + // Unplug so reset values + if (!link->hpd_status) + reset_bw_alloc_struct(link); + + return ret; +} +/* + * Read all New BW alloc configuration ex: estimated_bw, allocated_bw, + * granuality, Driver_ID, CM_Group, & populate the BW allocation structs + * for host router and dpia + */ +static void init_usb4_bw_struct(struct dc_link *link) +{ + // Init the known values + link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); +} +static uint8_t get_lowest_dpia_index(struct dc_link *link) +{ + const struct dc *dc_struct = link->dc; + uint8_t idx = 0xFF; + + for (int i = 0; i < MAX_PIPES * 2; ++i) { + + if (!dc_struct->links[i] || + dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + continue; + + if (idx > dc_struct->links[i]->link_index) + idx = dc_struct->links[i]->link_index; + } + + return idx; +} +/* + * Get the Max Available BW or Max Estimated BW for each Host Router + * + * @link: pointer to the dc_link struct instance + * @type: ESTIMATD BW or MAX AVAILABLE BW + * + * return: response_ready flag from dc_link struct + */ +static int get_host_router_total_bw(struct dc_link *link, uint8_t type) +{ + const struct dc *dc_struct = link->dc; + uint8_t lowest_dpia_index = get_lowest_dpia_index(link); + uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0; + struct dc_link *link_temp; + int total_bw = 0; + + for (int i = 0; i < MAX_PIPES * 2; ++i) { + + if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + continue; + + link_temp = dc_struct->links[i]; + if (!link_temp || !link_temp->hpd_status) + continue; + + idx_temp = (link_temp->link_index - lowest_dpia_index) / 2; + + if (idx_temp == idx) { + + if (type == HOST_ROUTER_BW_ESTIMATED) + total_bw += link_temp->dpia_bw_alloc_config.estimated_bw; + else if (type == HOST_ROUTER_BW_ALLOCATED) + total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw; + } + } + + return total_bw; +} +/* + * Cleanup function for when the dpia is unplugged to reset struct + * and perform any required clean up + * + * @link: pointer to the dc_link struct instance + * + * return: none + */ +static bool dpia_bw_alloc_unplug(struct dc_link *link) +{ + bool ret = false; + + if (!link) + return true; + + return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, + link->dpia_bw_alloc_config.sink_allocated_bw, link); +} +static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw) +{ + uint8_t requested_bw; + uint32_t temp; + + // 1. Add check for this corner case #1 + if (req_bw > link->dpia_bw_alloc_config.estimated_bw) + req_bw = link->dpia_bw_alloc_config.estimated_bw; + + temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; + requested_bw = temp / Kbps_TO_Gbps; + + // Always make sure to add more to account for floating points + if (temp % Kbps_TO_Gbps) + ++requested_bw; + + // 2. Add check for this corner case #2 + req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) + return; + + if (core_link_write_dpcd( + link, + REQUESTED_BW, + &requested_bw, + sizeof(uint8_t)) != DC_OK) + dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, REQUESTED_BW); + else + link->dpia_bw_alloc_config.response_ready = false; // Reset flag +} +/* + * Return the response_ready flag from dc_link struct + * + * @link: pointer to the dc_link struct instance + * + * return: response_ready flag from dc_link struct + */ +static bool get_cm_response_ready_flag(struct dc_link *link) +{ + return link->dpia_bw_alloc_config.response_ready; +} +// ------------------------------------------------------------------ +// PUBLIC FUNCTIONS +// ------------------------------------------------------------------ +bool set_dptx_usb4_bw_alloc_support(struct dc_link *link) +{ + bool ret = false; + uint8_t response = 0, + bw_support_dpia = 0, + bw_support_cm = 0; + + if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status)) + goto out; + + if (core_link_read_dpcd( + link, + DP_TUNNELING_CAPABILITIES, + &response, + sizeof(uint8_t)) != DC_OK) + dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES); + + bw_support_dpia = (response >> 7) & 1; + + if (core_link_read_dpcd( + link, + USB4_DRIVER_BW_CAPABILITY, + &response, + sizeof(uint8_t)) != DC_OK) + dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES); + + bw_support_cm = (response >> 7) & 1; + + /* Send request acknowledgment to Turn ON DPTX support */ + if (bw_support_cm && bw_support_dpia) { + + response = 0x80; + if (core_link_write_dpcd( + link, + DPTX_BW_ALLOCATION_MODE_CONTROL, + &response, + sizeof(uint8_t)) != DC_OK) + dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", + "**** FAILURE Enabling DPtx BW Allocation Mode Support ***\n", + __func__, DP_TUNNELING_CAPABILITIES); + else { + + // SUCCESS Enabled DPtx BW Allocation Mode Support + link->dpia_bw_alloc_config.bw_alloc_enabled = true; + dm_output_to_console("**** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n"); + + ret = true; + init_usb4_bw_struct(link); + } + } + +out: + return ret; +} +void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result) +{ + if (!get_bw_alloc_proceed_flag((link))) + return; + + switch (result) { + + case DPIA_BW_REQ_FAILED: + + dm_output_to_console("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__); + + // Update the new Estimated BW value updated by CM + link->dpia_bw_alloc_config.estimated_bw = + bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw); + link->dpia_bw_alloc_config.response_ready = false; + + /* + * If FAIL then it is either: + * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally + * => get estimated and allocate that + * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then + * CM will have to update 0xE0023 with new ESTIMATED BW value. + */ + break; + + case DPIA_BW_REQ_SUCCESS: + + dm_output_to_console("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__); + + // 1. SUCCESS 1st time before any Pruning is done + // 2. SUCCESS after prev. FAIL before any Pruning is done + // 3. SUCCESS after Pruning is done but before enabling link + + int needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + // 1. + if (!link->dpia_bw_alloc_config.sink_allocated_bw) { + + allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, needed, link); + link->dpia_bw_alloc_config.sink_verified_bw = + link->dpia_bw_alloc_config.sink_allocated_bw; + + // SUCCESS from first attempt + if (link->dpia_bw_alloc_config.sink_allocated_bw > + link->dpia_bw_alloc_config.sink_max_bw) + link->dpia_bw_alloc_config.sink_verified_bw = + link->dpia_bw_alloc_config.sink_max_bw; + } + // 3. + else if (link->dpia_bw_alloc_config.sink_allocated_bw) { + + // Find out how much do we need to de-alloc + if (link->dpia_bw_alloc_config.sink_allocated_bw > needed) + deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, + link->dpia_bw_alloc_config.sink_allocated_bw - needed, link); + else + allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, + needed - link->dpia_bw_alloc_config.sink_allocated_bw, link); + } + + // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA + // => check if estimated_bw changed + + link->dpia_bw_alloc_config.response_ready = true; + break; + + case DPIA_EST_BW_CHANGED: + + dm_output_to_console("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__); + + int available = 0, estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + int host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED); + + // 1. If due to unplug of other sink + if (estimated == host_router_total_estimated_bw) { + + // First update the estimated & max_bw fields + if (link->dpia_bw_alloc_config.estimated_bw < estimated) { + available = estimated - link->dpia_bw_alloc_config.estimated_bw; + link->dpia_bw_alloc_config.estimated_bw = estimated; + } + } + // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw + else { + + // We took from another unplugged/problematic sink to give to us + if (link->dpia_bw_alloc_config.estimated_bw < estimated) + available = estimated - link->dpia_bw_alloc_config.estimated_bw; + + // We lost estimated bw usually due to plug event of other dpia + link->dpia_bw_alloc_config.estimated_bw = estimated; + } + break; + + case DPIA_BW_ALLOC_CAPS_CHANGED: + + dm_output_to_console("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__); + link->dpia_bw_alloc_config.bw_alloc_enabled = false; + break; + } +} +int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw) +{ + int ret = 0; + uint8_t timeout = 10; + + if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type + && link->dpia_bw_alloc_config.bw_alloc_enabled)) + goto out; + + //1. Hot Plug + if (link->hpd_status && peak_bw > 0) { + + // If DP over USB4 then we need to check BW allocation + link->dpia_bw_alloc_config.sink_max_bw = peak_bw; + dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw); + + do { + if (!timeout > 0) + timeout--; + else + break; + udelay(10 * 1000); + } while (!get_cm_response_ready_flag(link)); + + if (!timeout) + ret = 0;// ERROR TIMEOUT waiting for response for allocating bw + else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) + ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); + } + //2. Cold Unplug + else if (!link->hpd_status) + dpia_bw_alloc_unplug(link); + +out: + return ret; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h similarity index 59% rename from drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 669e995f825f28..c2c3049adcd143 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -26,13 +26,13 @@ #ifndef DC_INC_LINK_DP_DPIA_BW_H_ #define DC_INC_LINK_DP_DPIA_BW_H_ -// XXX: TODO: Re-add for Phase 2 -/* Number of Host Routers per motherboard is 2 and 2 DPIA per host router */ -#define MAX_HR_NUM 2 - -struct dc_host_router_bw_alloc { - int max_bw[MAX_HR_NUM]; // The Max BW that each Host Router has available to be shared btw DPIAs - int total_estimated_bw[MAX_HR_NUM]; // The Total Verified and available BW that Host Router has +/* + * Host Router BW type + */ +enum bw_type { + HOST_ROUTER_BW_ESTIMATED, + HOST_ROUTER_BW_ALLOCATED, + HOST_ROUTER_BW_INVALID, }; /* @@ -44,26 +44,4 @@ struct dc_host_router_bw_alloc { */ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link); -/* - * Send a request from DP-Tx requesting to allocate BW remotely after - * allocating it locally. This will get processed by CM and a CB function - * will be called. - * - * @link: pointer to the dc_link struct instance - * @req_bw: The requested bw in Kbyte to allocated - * - * return: none - */ -void set_usb4_req_bw_req(struct dc_link *link, int req_bw); - -/* - * CB function for when the status of the Req above is complete. We will - * find out the result of allocating on CM and update structs accordingly - * - * @link: pointer to the dc_link struct instance - * - * return: none - */ -void get_usb4_req_bw_resp(struct dc_link *link); - #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c new file mode 100644 index 00000000000000..9d80427520cf40 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -0,0 +1,389 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements DP HPD short pulse handling sequence according to DP + * specifications + * + */ + +#include "link_dp_irq_handler.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "link/accessories/link_dp_trace.h" +#include "link/link_dpms.h" +#include "dm_helpers.h" + +#define DC_LOGGER_INIT(logger) + +bool dc_link_check_link_loss_status( + struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data) +{ + uint8_t irq_reg_rx_power_state = 0; + enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; + union lane_status lane_status; + uint32_t lane; + bool sink_status_changed; + bool return_code; + + sink_status_changed = false; + return_code = false; + + if (link->cur_link_settings.lane_count == 0) + return return_code; + + /*1. Check that Link Status changed, before re-training.*/ + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* check status of lanes 0,1 + * changed DpcdAddress_Lane01Status (0x202) + */ + lane_status.raw = dp_get_nibble_at_index( + &hpd_irq_dpcd_data->bytes.lane01_status.raw, + lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + sink_status_changed = true; + break; + } + } + + /* Check interlane align.*/ + if (sink_status_changed || + !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + + DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); + + return_code = true; + + /*2. Check that we can handle interrupt: Not in FS DOS, + * Not in "Display Timeout" state, Link is trained. + */ + dpcd_result = core_link_read_dpcd(link, + DP_SET_POWER, + &irq_reg_rx_power_state, + sizeof(irq_reg_rx_power_state)); + + if (dpcd_result != DC_OK) { + DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", + __func__); + } else { + if (irq_reg_rx_power_state != DP_SET_POWER_D0) + return_code = false; + } + } + + return return_code; +} + +static bool handle_hpd_irq_psr_sink(struct dc_link *link) +{ + union dpcd_psr_configuration psr_configuration; + + if (!link->psr_settings.psr_feature_enabled) + return false; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + 368,/*DpcdAddress_PSR_Enable_Cfg*/ + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + if (psr_configuration.bits.ENABLE) { + unsigned char dpcdbuf[3] = {0}; + union psr_error_status psr_error_status; + union psr_sink_psr_status psr_sink_psr_status; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + 0x2006, /*DpcdAddress_PSR_Error_Status*/ + (unsigned char *) dpcdbuf, + sizeof(dpcdbuf)); + + /*DPCD 2006h ERROR STATUS*/ + psr_error_status.raw = dpcdbuf[0]; + /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/ + psr_sink_psr_status.raw = dpcdbuf[2]; + + if (psr_error_status.bits.LINK_CRC_ERROR || + psr_error_status.bits.RFB_STORAGE_ERROR || + psr_error_status.bits.VSC_SDP_ERROR) { + bool allow_active; + + /* Acknowledge and clear error bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 8198,/*DpcdAddress_PSR_Error_Status*/ + &psr_error_status.raw, + sizeof(psr_error_status.raw)); + + /* PSR error, disable and re-enable PSR */ + if (link->psr_settings.psr_allow_active) { + allow_active = false; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + } + + return true; + } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == + PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){ + /* No error is detect, PSR is active. + * We should return with IRQ_HPD handled without + * checking for loss of sync since PSR would have + * powered down main link. + */ + return true; + } + } + return false; +} + +void dc_link_dp_handle_link_loss(struct dc_link *link) +{ + struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + + for (i = 0; i < count; i++) + link_set_dpms_off(pipes[i]); + + for (i = count - 1; i >= 0; i--) { + // Always use max settings here for DP 1.4a LL Compliance CTS + if (link->is_automated) { + pipes[i]->link_config.dp_link_settings.lane_count = + link->verified_link_cap.lane_count; + pipes[i]->link_config.dp_link_settings.link_rate = + link->verified_link_cap.link_rate; + pipes[i]->link_config.dp_link_settings.link_spread = + link->verified_link_cap.link_spread; + } + link_set_dpms_on(link->dc->current_state, pipes[i]); + } +} + +enum dc_status dc_link_dp_read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data) +{ + static enum dc_status retval; + + /* The HW reads 16 bytes from 200h on HPD, + * but if we get an AUX_DEFER, the HW cannot retry + * and this causes the CTS tests 4.3.2.1 - 3.2.4 to + * fail, so we now explicitly read 6 bytes which is + * the req from the above mentioned test cases. + * + * For DP 1.4 we need to read those from 2002h range. + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT, + irq_data->raw, + sizeof(union hpd_irq_data)); + else { + /* Read 14 bytes in a single read and then copy only the required fields. + * This is more efficient than doing it in two separate AUX reads. */ + + uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT_ESI, + tmp, + sizeof(tmp)); + + if (retval != DC_OK) + return retval; + + irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; + } + + return retval; +} + +/*************************Short Pulse IRQ***************************/ +bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) +{ + /* + * Don't handle RX IRQ unless one of following is met: + * 1) The link is established (cur_link_settings != unknown) + * 2) We know we're dealing with a branch device, SST or MST + */ + + if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || + is_dp_branch_device(link)) + return true; + + return false; +} + +bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work) +{ + union hpd_irq_data hpd_irq_dpcd_data = {0}; + union device_service_irq device_service_clear = {0}; + enum dc_status result; + bool status = false; + + if (out_link_loss) + *out_link_loss = false; + + if (has_left_work) + *has_left_work = false; + /* For use cases related to down stream connection status change, + * PSR and device auto test, refer to function handle_sst_hpd_irq + * in DAL2.1*/ + + DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n", + __func__, link->link_index); + + + /* All the "handle_hpd_irq_xxx()" methods + * should be called only after + * dal_dpsst_ls_read_hpd_irq_data + * Order of calls is important too + */ + result = dc_link_dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data); + if (out_hpd_irq_dpcd_data) + *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; + + if (result != DC_OK) { + DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n", + __func__); + return false; + } + + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC + link->is_automated = true; + device_service_clear.bits.AUTOMATED_TEST = 1; + core_link_write_dpcd( + link, + DP_DEVICE_SERVICE_IRQ_VECTOR, + &device_service_clear.raw, + sizeof(device_service_clear.raw)); + device_service_clear.raw = 0; + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_automated_test(link); + return false; + } + + if (!dc_link_dp_allow_hpd_rx_irq(link)) { + DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", + __func__, link->link_index); + return false; + } + + if (handle_hpd_irq_psr_sink(link)) + /* PSR-related error was detected and handled */ + return true; + + /* If PSR-related error handled, Main link may be off, + * so do not handle as a normal sink status change interrupt. + */ + + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; + return true; + } + + /* check if we have MST msg and return since we poll for it */ + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; + return false; + } + + /* For now we only handle 'Downstream port status' case. + * If we got sink count changed it means + * Downstream port status changed, + * then DM should call DC to do the detection. + * NOTE: Do not handle link loss on eDP since it is internal link*/ + if ((link->connector_signal != SIGNAL_TYPE_EDP) && + dc_link_check_link_loss_status( + link, + &hpd_irq_dpcd_data)) { + /* Connectivity log: link loss */ + CONN_DATA_LINK_LOSS(link, + hpd_irq_dpcd_data.raw, + sizeof(hpd_irq_dpcd_data), + "Status: "); + + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_link_loss(link); + + status = false; + if (out_link_loss) + *out_link_loss = true; + + dp_trace_link_loss_increment(link); + } + + if (link->type == dc_connection_sst_branch && + hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT + != link->dpcd_sink_count) + status = true; + + /* reasons for HPD RX: + * 1. Link Loss - ie Re-train the Link + * 2. MST sideband message + * 3. Automated Test - ie. Internal Commit + * 4. CP (copy protection) - (not interesting for DM???) + * 5. DRR + * 6. Downstream Port status changed + * -ie. Detect - this the only one + * which is interesting for DM because + * it must call dc_link_detect. + */ + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h similarity index 86% rename from drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h index 801a95b34e8c0b..39b2e51ea79da8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h @@ -1,4 +1,3 @@ - /* * Copyright 2022 Advanced Micro Devices, Inc. * @@ -23,6 +22,10 @@ * Authors: AMD * */ -/*********************************************************************/ -// USB4 DPIA BANDWIDTH ALLOCATION LOGIC -/*********************************************************************/ + +#ifndef __DC_LINK_DP_IRQ_HANDLER_H__ +#define __DC_LINK_DP_IRQ_HANDLER_H__ + +#include "link.h" + +#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c new file mode 100644 index 00000000000000..cd9fb8126bcf1a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -0,0 +1,208 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements basic dp phy functionality such as enable/disable phy + * output and set lane/drive settings. This file is responsible for maintaining + * and update software state representing current phy status such as current + * link settings. + */ + +#include "link_dp_phy.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "clk_mgr.h" +#include "resource.h" +#include "link_enc_cfg.h" +#define DC_LOGGER \ + link->ctx->logger + +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on) +{ + uint8_t state; + + state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; + + if (link->sync_lt_in_progress) + return; + + core_link_write_dpcd(link, DP_SET_POWER, &state, + sizeof(state)); + +} + +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + link->cur_link_settings = *link_settings; + link->dc->hwss.enable_dp_link_output(link, link_res, signal, + clock_source, link_settings); + dc_link_dp_receiver_power_ctrl(link, true); +} + +void dp_disable_link_phy(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + + if (!link->wa_flags.dp_keep_receiver_powered) + dc_link_dp_receiver_power_ctrl(link, false); + + dc->hwss.disable_link_output(link, link_res, signal); + /* Clear current link setting.*/ + memset(&link->cur_link_settings, 0, + sizeof(link->cur_link_settings)); + + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == + offset); +} + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + + if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && + !is_immediate_downstream(link, offset)) + return; + + if (link_hwss->ext.set_dp_lane_settings) + link_hwss->ext.set_dp_lane_settings(link, link_res, + &link_settings->link_settings, + link_settings->hw_lane_settings); + + memmove(link->cur_lane_setting, + link_settings->hw_lane_settings, + sizeof(link->cur_lane_setting)); +} + +void dp_set_drive_settings( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + /* program ASIC PHY settings*/ + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, + lt_settings->dpcd_lane_settings); + + /* Notify DP sink the PHY settings from source */ + dpcd_set_lane_settings(link, lt_settings, DPRX); +} + +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) +{ + /* FEC has to be "set ready" before the link training. + * The policy is to always train with FEC + * if the sink supports it and leave it enabled on link. + * If FEC is not supported, disable it. + */ + struct link_encoder *link_enc = NULL; + enum dc_status status = DC_OK; + uint8_t fec_config = 0; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (!dc_link_should_enable_fec(link)) + return status; + + if (link_enc->funcs->fec_set_ready && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (ready) { + fec_config = 1; + status = core_link_write_dpcd(link, + DP_FEC_CONFIGURATION, + &fec_config, + sizeof(fec_config)); + if (status == DC_OK) { + link_enc->funcs->fec_set_ready(link_enc, true); + link->fec_state = dc_link_fec_ready; + } else { + link_enc->funcs->fec_set_ready(link_enc, false); + link->fec_state = dc_link_fec_not_ready; + dm_error("dpcd write failed to set fec_ready"); + } + } else if (link->fec_state == dc_link_fec_ready) { + fec_config = 0; + status = core_link_write_dpcd(link, + DP_FEC_CONFIGURATION, + &fec_config, + sizeof(fec_config)); + link_enc->funcs->fec_set_ready(link_enc, false); + link->fec_state = dc_link_fec_not_ready; + } + } + + return status; +} + +void dp_set_fec_enable(struct dc_link *link, bool enable) +{ + struct link_encoder *link_enc = NULL; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (!dc_link_should_enable_fec(link)) + return; + + if (link_enc->funcs->fec_set_enable && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (link->fec_state == dc_link_fec_ready && enable) { + /* Accord to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum + * time for transmitting 1000 LL codes which is 6.173 us. + * So use 7 microseconds delay instead. + */ + udelay(7); + link_enc->funcs->fec_set_enable(link_enc, true); + link->fec_state = dc_link_fec_enabled; + } else if (link->fec_state == dc_link_fec_enabled && !enable) { + link_enc->funcs->fec_set_enable(link_enc, false); + link->fec_state = dc_link_fec_ready; + } + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h new file mode 100644 index 00000000000000..dba1f29df319b1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_PHY_H__ +#define __DC_LINK_DP_PHY_H__ + +#include "link.h" +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); + +void dp_disable_link_phy(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset); + +void dp_set_drive_settings( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +enum dc_status dp_set_fec_ready(struct dc_link *link, + const struct link_resource *link_res, bool ready); +void dp_set_fec_enable(struct dc_link *link, bool enable); + +#endif /* __DC_LINK_DP_PHY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c new file mode 100644 index 00000000000000..b48d4d82299116 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -0,0 +1,1701 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements all generic dp link training helper functions and top + * level generic training sequence. All variations of dp link training sequence + * should be called inside the top level training functions in this file to + * ensure the integrity of our overall training procedure across different types + * of link encoding and back end hardware. + */ +#include "link_dp_training.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_auxless.h" +#include "link_dp_training_dpia.h" +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dpcd.h" +#include "link/accessories/link_dp_trace.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "link_edp_panel_control.h" +#include "atomfirmware.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dm_helpers.h" + +#define DC_LOGGER \ + link->ctx->logger + +#define POST_LT_ADJ_REQ_LIMIT 6 +#define POST_LT_ADJ_REQ_TIMEOUT 200 +#define LINK_TRAINING_RETRY_DELAY 50 /* ms */ + +void dp_log_training_result( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status) +{ + char *link_rate = "Unknown"; + char *lt_result = "Unknown"; + char *lt_spread = "Disabled"; + + switch (lt_settings->link_settings.link_rate) { + case LINK_RATE_LOW: + link_rate = "RBR"; + break; + case LINK_RATE_RATE_2: + link_rate = "R2"; + break; + case LINK_RATE_RATE_3: + link_rate = "R3"; + break; + case LINK_RATE_HIGH: + link_rate = "HBR"; + break; + case LINK_RATE_RBR2: + link_rate = "RBR2"; + break; + case LINK_RATE_RATE_6: + link_rate = "R6"; + break; + case LINK_RATE_HIGH2: + link_rate = "HBR2"; + break; + case LINK_RATE_HIGH3: + link_rate = "HBR3"; + break; + case LINK_RATE_UHBR10: + link_rate = "UHBR10"; + break; + case LINK_RATE_UHBR13_5: + link_rate = "UHBR13.5"; + break; + case LINK_RATE_UHBR20: + link_rate = "UHBR20"; + break; + default: + break; + } + + switch (status) { + case LINK_TRAINING_SUCCESS: + lt_result = "pass"; + break; + case LINK_TRAINING_CR_FAIL_LANE0: + lt_result = "CR failed lane0"; + break; + case LINK_TRAINING_CR_FAIL_LANE1: + lt_result = "CR failed lane1"; + break; + case LINK_TRAINING_CR_FAIL_LANE23: + lt_result = "CR failed lane23"; + break; + case LINK_TRAINING_EQ_FAIL_CR: + lt_result = "CR failed in EQ"; + break; + case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: + lt_result = "CR failed in EQ partially"; + break; + case LINK_TRAINING_EQ_FAIL_EQ: + lt_result = "EQ failed"; + break; + case LINK_TRAINING_LQA_FAIL: + lt_result = "LQA failed"; + break; + case LINK_TRAINING_LINK_LOSS: + lt_result = "Link loss"; + break; + case DP_128b_132b_LT_FAILED: + lt_result = "LT_FAILED received"; + break; + case DP_128b_132b_MAX_LOOP_COUNT_REACHED: + lt_result = "max loop count reached"; + break; + case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: + lt_result = "channel EQ timeout"; + break; + case DP_128b_132b_CDS_DONE_TIMEOUT: + lt_result = "CDS timeout"; + break; + default: + break; + } + + switch (lt_settings->link_settings.link_spread) { + case LINK_SPREAD_DISABLED: + lt_spread = "Disabled"; + break; + case LINK_SPREAD_05_DOWNSPREAD_30KHZ: + lt_spread = "0.5% 30KHz"; + break; + case LINK_SPREAD_05_DOWNSPREAD_33KHZ: + lt_spread = "0.5% 33KHz"; + break; + default: + break; + } + + /* Connectivity log: link training */ + + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ + + CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", + link_rate, + lt_settings->link_settings.lane_count, + lt_result, + lt_settings->hw_lane_settings[0].VOLTAGE_SWING, + lt_settings->hw_lane_settings[0].PRE_EMPHASIS, + lt_spread); +} + +uint8_t dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + uint8_t disable_scrabled_data_symbols = 0; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + case DP_TRAINING_PATTERN_SEQUENCE_2: + case DP_TRAINING_PATTERN_SEQUENCE_3: + disable_scrabled_data_symbols = 1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + case DP_128b_132b_TPS1: + case DP_128b_132b_TPS2: + disable_scrabled_data_symbols = 0; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + return disable_scrabled_data_symbols; +} + +enum dpcd_training_patterns + dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + enum dpcd_training_patterns dpcd_tr_pattern = + DPCD_TRAINING_PATTERN_VIDEOIDLE; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; + break; + case DP_128b_132b_TPS1: + dpcd_tr_pattern = DPCD_128b_132b_TPS1; + break; + case DP_128b_132b_TPS2: + dpcd_tr_pattern = DPCD_128b_132b_TPS2; + break; + case DP_128b_132b_TPS2_CDS: + dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; + break; + case DP_TRAINING_PATTERN_VIDEOIDLE: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + + return dpcd_tr_pattern; +} + +uint8_t dp_get_nibble_at_index(const uint8_t *buf, + uint32_t index) +{ + uint8_t nibble; + nibble = buf[index / 2]; + + if (index % 2) + nibble >>= 4; + else + nibble &= 0x0F; + + return nibble; +} + +void dp_wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs) +{ + if (wait_in_micro_secs > 1000) + msleep(wait_in_micro_secs/1000); + else + udelay(wait_in_micro_secs); + + DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", + __func__, + wait_in_micro_secs); +} + +/* maximum pre emphasis level allowed for each voltage swing level*/ +static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { + PRE_EMPHASIS_LEVEL3, + PRE_EMPHASIS_LEVEL2, + PRE_EMPHASIS_LEVEL1, + PRE_EMPHASIS_DISABLED }; + +static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( + enum dc_voltage_swing voltage) +{ + enum dc_pre_emphasis pre_emphasis; + pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; + + if (voltage <= VOLTAGE_SWING_MAX_LEVEL) + pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; + + return pre_emphasis; + +} + +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + struct dc_lane_settings max_requested; + + max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; + max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; + max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; + + /* Determine what the maximum of the requested settings are*/ + for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { + if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) + max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; + + if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) + max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; + if (lane_settings[lane].FFE_PRESET.settings.level > + max_requested.FFE_PRESET.settings.level) + max_requested.FFE_PRESET.settings.level = + lane_settings[lane].FFE_PRESET.settings.level; + } + + /* make sure the requested settings are + * not higher than maximum settings*/ + if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) + max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; + + if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) + max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; + if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) + max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; + + /* make sure the pre-emphasis matches the voltage swing*/ + if (max_requested.PRE_EMPHASIS > + get_max_pre_emphasis_for_voltage_swing( + max_requested.VOLTAGE_SWING)) + max_requested.PRE_EMPHASIS = + get_max_pre_emphasis_for_voltage_swing( + max_requested.VOLTAGE_SWING); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; + lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; + lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; + } +} + +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ + uint8_t lane = 0; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = + (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); + dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = + (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); + dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = + (hw_lane_settings[lane].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); + dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = + (hw_lane_settings[lane].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = + hw_lane_settings[lane].FFE_PRESET.settings.level; + } + } +} + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) +{ + uint8_t link_rate = 0; + enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings); + + if (encoding == DP_128b_132b_ENCODING) + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + link_rate = 0x1; + break; + case LINK_RATE_UHBR20: + link_rate = 0x2; + break; + case LINK_RATE_UHBR13_5: + link_rate = 0x4; + break; + default: + link_rate = 0; + break; + } + else if (encoding == DP_8b_10b_ENCODING) + link_rate = (uint8_t) link_settings->link_rate; + else + link_rate = 0; + + return link_rate; +} + +/* Only used for channel equalization */ +uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ + unsigned int aux_rd_interval_us = 400; + + switch (dpcd_aux_read_interval) { + case 0x01: + aux_rd_interval_us = 4000; + break; + case 0x02: + aux_rd_interval_us = 8000; + break; + case 0x03: + aux_rd_interval_us = 12000; + break; + case 0x04: + aux_rd_interval_us = 16000; + break; + case 0x05: + aux_rd_interval_us = 32000; + break; + case 0x06: + aux_rd_interval_us = 64000; + break; + default: + break; + } + + return aux_rd_interval_us; +} + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE0; + else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE1; + else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE23; + else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE23; + return result; +} + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) +{ + return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); +} + +bool dp_is_max_vs_reached( + const struct link_training_settings *lt_settings) +{ + uint32_t lane; + for (lane = 0; lane < + (uint32_t)(lt_settings->link_settings.lane_count); + lane++) { + if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET + == VOLTAGE_SWING_MAX_LEVEL) + return true; + } + return false; + +} + +bool dp_is_cr_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool done = true; + uint32_t lane; + /*LANEx_CR_DONE bits All 1's?*/ + for (lane = 0; lane < (uint32_t)(ln_count); lane++) { + if (!dpcd_lane_status[lane].bits.CR_DONE_0) + done = false; + } + return done; + +} + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool done = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) + done = false; + return done; +} + +bool dp_is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool locked = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) + locked = false; + return locked; +} + +bool dp_is_interlane_aligned(union lane_align_status_updated align_status) +{ + return align_status.bits.INTERLANE_ALIGN_DONE == 1; +} + +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_status lane_status; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + + core_link_read_dpcd( + link, + DP_SINK_COUNT, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* + * check lanes status + */ + lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + status = LINK_TRAINING_LINK_LOSS; + break; + } + } + + return status; +} + +enum dc_status dp_get_lane_status_and_lane_adjust( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + uint32_t offset) +{ + unsigned int lane01_status_address = DP_LANE0_1_STATUS; + uint8_t lane_adjust_offset = 4; + unsigned int lane01_adjust_address; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + enum dc_status status; + + if (is_repeater(link_training_setting, offset)) { + lane01_status_address = + DP_LANE0_1_STATUS_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + lane_adjust_offset = 3; + } + + status = core_link_read_dpcd( + link, + lane01_status_address, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + if (status != DC_OK) { + DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," + " keep current lane status and lane adjust unchanged", + __func__, + lane01_status_address); + return status; + } + + for (lane = 0; lane < + (uint32_t)(link_training_setting->link_settings.lane_count); + lane++) { + + ln_status[lane].raw = + dp_get_nibble_at_index(&dpcd_buf[0], lane); + ln_adjust[lane].raw = + dp_get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); + } + + ln_align->raw = dpcd_buf[2]; + + if (is_repeater(link_training_setting, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + offset, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + offset, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } + + return status; +} + +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + if (lt_settings->voltage_swing == NULL && + lt_settings->pre_emphasis == NULL && + lt_settings->ffe_preset == NULL && + lt_settings->post_cursor2 == NULL) + + return; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (lt_settings->voltage_swing) + lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; + if (lt_settings->pre_emphasis) + lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; + if (lt_settings->post_cursor2) + lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; + if (lt_settings->ffe_preset) + lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; + } +} + +void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) +{ + if (!dp_is_lttpr_present(link)) + return; + + if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { + *override = LTTPR_MODE_TRANSPARENT; + } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { + *override = LTTPR_MODE_NON_TRANSPARENT; + } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { + *override = LTTPR_MODE_NON_LTTPR; + } + DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); +} + +void override_training_settings( + struct dc_link *link, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings) +{ + uint32_t lane; + + /* Override link spread */ + if (!link->dp_ss_off && overrides->downspread != NULL) + lt_settings->link_settings.link_spread = *overrides->downspread ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ + : LINK_SPREAD_DISABLED; + + /* Override lane settings */ + if (overrides->voltage_swing != NULL) + lt_settings->voltage_swing = overrides->voltage_swing; + if (overrides->pre_emphasis != NULL) + lt_settings->pre_emphasis = overrides->pre_emphasis; + if (overrides->post_cursor2 != NULL) + lt_settings->post_cursor2 = overrides->post_cursor2; + if (overrides->ffe_preset != NULL) + lt_settings->ffe_preset = overrides->ffe_preset; + /* Override HW lane settings with BIOS forced values if present */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; + lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; + lt_settings->always_match_dpcd_with_hw_lane_settings = false; + } + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = + lt_settings->voltage_swing != NULL ? + *lt_settings->voltage_swing : + VOLTAGE_SWING_LEVEL0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = + lt_settings->pre_emphasis != NULL ? + *lt_settings->pre_emphasis + : PRE_EMPHASIS_DISABLED; + lt_settings->hw_lane_settings[lane].POST_CURSOR2 = + lt_settings->post_cursor2 != NULL ? + *lt_settings->post_cursor2 + : POST_CURSOR2_DISABLED; + } + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + /* Override training timings */ + if (overrides->cr_pattern_time != NULL) + lt_settings->cr_pattern_time = *overrides->cr_pattern_time; + if (overrides->eq_pattern_time != NULL) + lt_settings->eq_pattern_time = *overrides->eq_pattern_time; + if (overrides->pattern_for_cr != NULL) + lt_settings->pattern_for_cr = *overrides->pattern_for_cr; + if (overrides->pattern_for_eq != NULL) + lt_settings->pattern_for_eq = *overrides->pattern_for_eq; + if (overrides->enhanced_framing != NULL) + lt_settings->enhanced_framing = *overrides->enhanced_framing; + if (link->preferred_training_settings.fec_enable != NULL) + lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Check DP tunnel LTTPR mode debug option. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) + lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; + +#endif + dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); + +} + +enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings) +{ + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + default: + return DP_TRAINING_PATTERN_SEQUENCE_1; + case DP_128b_132b_ENCODING: + return DP_128b_132b_TPS1; + } +} + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + struct link_encoder *link_enc; + struct encoder_feature_support *enc_caps; + struct dpcd_caps *rx_caps = &link->dpcd_caps; + enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + enc_caps = &link_enc->features; + + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + if (enc_caps->flags.bits.IS_TPS4_CAPABLE && + rx_caps->max_down_spread.bits.TPS4_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && + rx_caps->max_ln_count.bits.TPS3_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_3; + else + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + case DP_128b_132b_ENCODING: + pattern = DP_128b_132b_TPS2; + break; + default: + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + } + return pattern; +} + +enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, + struct dc_link_settings *link_setting) +{ + enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting); + + if (encoding == DP_8b_10b_ENCODING) + return dp_decide_8b_10b_lttpr_mode(link); + else if (encoding == DP_128b_132b_ENCODING) + return dp_decide_128b_132b_lttpr_mode(link); + + ASSERT(0); + return LTTPR_MODE_NON_LTTPR; +} + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing)(ln_adjust[lane].bits. + VOLTAGE_SWING_LANE); + hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis)(ln_adjust[lane].bits. + PRE_EMPHASIS_LANE); + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + hw_lane_settings[lane].FFE_PRESET.raw = + ln_adjust[lane].tx_ffe.PRESET_VALUE; + } + } + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + + if (lt_settings->disallow_per_lane_settings) { + /* we find the maximum of the requested settings across all lanes*/ + /* and set this maximum for all lanes*/ + maximize_lane_settings(lt_settings, hw_lane_settings); + override_lane_settings(lt_settings, hw_lane_settings); + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + } + +} + +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) + decide_8b_10b_training_settings(link, link_settings, lt_settings); + else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) + decide_128b_132b_training_settings(link, link_settings, lt_settings); +} + + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) +{ + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + return core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); +} + +static enum dc_status configure_lttpr_mode_non_transparent( + struct dc_link *link, + const struct link_training_settings *lt_settings) +{ + /* aux timeout is already set to extended */ + /* RESET/SET lttpr mode to enable non transparent mode */ + uint8_t repeater_cnt; + uint32_t aux_interval_address; + uint8_t repeater_id; + enum dc_status result = DC_ERROR_UNEXPECTED; + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + enum dp_link_encoding encoding = link_dp_get_encoding_format(<_settings->link_settings); + + if (encoding == DP_8b_10b_ENCODING) { + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + } + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (encoding == DP_8b_10b_ENCODING) { + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Driver does not need to train the first hop. Skip DPCD read and clear + * AUX_RD_INTERVAL for DPTX-to-DPIA hop. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; + + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } + } + } + + return result; +} + +enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) +{ + enum dc_status status = DC_OK; + + if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) + status = configure_lttpr_mode_transparent(link); + + else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + status = configure_lttpr_mode_non_transparent(link, lt_settings); + + return status; +} + +void repeater_training_done(struct dc_link *link, uint32_t offset) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + + const uint32_t dpcd_base_lt_offset = + DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + /* Set training not in progress*/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) +{ + uint8_t sink_status = 0; + uint8_t i; + + /* clear training pattern set */ + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (encoding == DP_128b_132b_ENCODING) { + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + udelay(1000); + } + } +} + +enum dc_status dpcd_configure_channel_coding(struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum dp_link_encoding encoding = + link_dp_get_encoding_format( + <_settings->link_settings); + enum dc_status status; + + status = core_link_write_dpcd( + link, + DP_MAIN_LINK_CHANNEL_CODING_SET, + (uint8_t *) &encoding, + 1); + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", + __func__, + DP_MAIN_LINK_CHANNEL_CODING_SET, + encoding); + + return status; +} + +void dpcd_set_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern training_pattern) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern( + link, training_pattern); + + core_link_write_dpcd( + link, + DP_TRAINING_PATTERN_SET, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", + __func__, + DP_TRAINING_PATTERN_SET, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +enum dc_status dpcd_set_link_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings) +{ + uint8_t rate; + enum dc_status status; + + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + + downspread.raw = (uint8_t) + (lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && + lt_settings->link_settings.use_link_rate_set == true) { + rate = 0; + /* WA for some MUX chips that will power down with eDP and lose supported + * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure + * MUX chip gets link rate set back before link training. + */ + if (link->connector_signal == SIGNAL_TYPE_EDP) { + uint8_t supported_link_rates[16]; + + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + } + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + status = core_link_write_dpcd(link, DP_LINK_RATE_SET, + <_settings->link_settings.link_rate_set, 1); + } else { + rate = get_dpcd_link_rate(<_settings->link_settings); + + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + } + + if (rate) { + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_RATE_SET, + lt_settings->link_settings.link_rate_set, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + } + + return status; +} + +enum dc_status dpcd_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + uint32_t offset) +{ + unsigned int lane0_set_address; + enum dc_status status; + lane0_set_address = DP_TRAINING_LANE0_SET; + + if (is_repeater(link_training_setting, offset)) + lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + status = core_link_write_dpcd(link, + lane0_set_address, + (uint8_t *)(link_training_setting->dpcd_lane_settings), + link_training_setting->link_settings.lane_count); + + if (is_repeater(link_training_setting, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } + + return status; +} + +void dpcd_set_lt_pattern_and_lane_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + uint32_t dpcd_base_lt_offset; + uint8_t dpcd_lt_buffer[5] = {0}; + union dpcd_training_pattern dpcd_pattern = {0}; + uint32_t size_in_bytes; + bool edp_workaround = false; /* TODO link_prop.INTERNAL */ + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + + if (is_repeater(lt_settings, offset)) + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /***************************************************************** + * DpcdAddress_TrainingPatternSet + *****************************************************************/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern(link, pattern); + + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dp_initialize_scrambling_data_symbols(link, pattern); + + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] + = dpcd_pattern.raw; + + if (is_repeater(lt_settings, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } + + /* concatenate everything into one buffer*/ + size_in_bytes = lt_settings->link_settings.lane_count * + sizeof(lt_settings->dpcd_lane_settings[0]); + + // 0x00103 - 0x00102 + memmove( + &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], + lt_settings->dpcd_lane_settings, + size_in_bytes); + + if (is_repeater(lt_settings, offset)) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } + if (edp_workaround) { + /* for eDP write in 2 parts because the 5-byte burst is + * causing issues on some eDP panels (EPR#366724) + */ + core_link_write_dpcd( + link, + DP_TRAINING_PATTERN_SET, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + core_link_write_dpcd( + link, + DP_TRAINING_LANE0_SET, + (uint8_t *)(lt_settings->dpcd_lane_settings), + size_in_bytes); + + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + sizeof(dpcd_lt_buffer)); + } else + /* write it all in (1 + number-of-lanes)-byte burst*/ + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + size_in_bytes + sizeof(dpcd_pattern.raw)); +} + +void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", + __func__); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + udelay(400); +} + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct encoder_set_dp_phy_pattern_param pattern_param = {0}; + + pattern_param.dp_phy_pattern = test_pattern; + pattern_param.custom_pattern = custom_pattern; + pattern_param.custom_pattern_size = custom_pattern_size; + pattern_param.dp_panel_mode = dp_get_panel_mode(link); + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); +} + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; + break; + case DP_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; + break; + case DP_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; + break; + default: + break; + } + + dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); + + return true; +} + +static bool perform_post_lt_adj_req_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum dc_lane_count lane_count = + lt_settings->link_settings.lane_count; + + uint32_t adj_req_count; + uint32_t adj_req_timer; + bool req_drv_setting_changed; + uint32_t lane; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + req_drv_setting_changed = false; + for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; + adj_req_count++) { + + req_drv_setting_changed = false; + + for (adj_req_timer = 0; + adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; + adj_req_timer++) { + + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + + if (dpcd_lane_status_updated.bits. + POST_LT_ADJ_REQ_IN_PROGRESS == 0) + return true; + + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) + return false; + + if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) + return false; + + for (lane = 0; lane < (uint32_t)(lane_count); lane++) { + + if (lt_settings-> + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { + + req_drv_setting_changed = true; + break; + } + } + + if (req_drv_setting_changed) { + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + dp_set_drive_settings(link, + link_res, + lt_settings); + break; + } + + msleep(1); + } + + if (!req_drv_setting_changed) { + DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", + __func__); + + ASSERT(0); + return true; + } + } + DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", + __func__); + + ASSERT(0); + return true; + +} + +static enum link_training_result dp_transition_to_video_idle( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + enum link_training_result status) +{ + union lane_count_set lane_count_set = {0}; + + /* 4. mainlink output idle pattern*/ + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + /* + * 5. post training adjust if required + * If the upstream DPTX and downstream DPRX both support TPS4, + * TPS4 must be used instead of POST_LT_ADJ_REQ. + */ + if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || + lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { + /* delay 5ms after Main Link output idle pattern and then check + * DPCD 0202h. + */ + if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { + msleep(5); + status = dp_check_link_loss_status(link, lt_settings); + } + return status; + } + + if (status == LINK_TRAINING_SUCCESS && + perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) + status = LINK_TRAINING_LQA_FAIL; + + lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + core_link_write_dpcd( + link, + DP_LANE_COUNT_SET, + &lane_count_set.raw, + sizeof(lane_count_set)); + + return status; +} + +enum link_training_result dp_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + bool skip_video_pattern) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + struct link_training_settings lt_settings = {0}; + enum dp_link_encoding encoding = + link_dp_get_encoding_format(link_settings); + + /* decide training settings */ + dp_decide_training_settings( + link, + link_settings, + <_settings); + + override_training_settings( + link, + &link->preferred_training_settings, + <_settings); + + /* reset previous training states */ + dpcd_exit_training_mode(link, encoding); + + /* configure link prior to entering training mode */ + dpcd_configure_lttpr_mode(link, <_settings); + dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); + dpcd_configure_channel_coding(link, <_settings); + + /* enter training mode: + * Per DP specs starting from here, DPTX device shall not issue + * Non-LT AUX transactions inside training mode. + */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) + status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + else if (encoding == DP_8b_10b_ENCODING) + status = dp_perform_8b_10b_link_training(link, link_res, <_settings); + else if (encoding == DP_128b_132b_ENCODING) + status = dp_perform_128b_132b_link_training(link, link_res, <_settings); + else + ASSERT(0); + + /* exit training mode */ + dpcd_exit_training_mode(link, encoding); + + /* switch to video idle */ + if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) + status = dp_transition_to_video_idle(link, + link_res, + <_settings, + status); + + /* dump debug data */ + dp_log_training_result(link, <_settings, status); + if (status != LINK_TRAINING_SUCCESS) + link->ctx->dc->debug_data.ltFailCount++; + return status; +} + +bool perform_link_training_with_retries( + const struct dc_link_settings *link_setting, + bool skip_video_pattern, + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal, + bool do_fallback) +{ + int j; + uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dp_panel_mode panel_mode = dp_get_panel_mode(link); + enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; + struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + int fail_count = 0; + bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ + bool is_link_bw_min = /* RBR x 1 */ + (cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE); + + dp_trace_commit_lt_init(link); + + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + /* We need to do this before the link training to ensure the idle + * pattern in SST mode will be sent right after the link training + */ + link_hwss->setup_stream_encoder(pipe_ctx); + + dp_trace_set_lt_start_timestamp(link, false); + j = 0; + while (j < attempts && fail_count < (attempts * 10)) { + + DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, + cur_link_settings.lane_count); + + dp_enable_link_phy( + link, + &pipe_ctx->link_res, + signal, + pipe_ctx->clock_source->id, + &cur_link_settings); + + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + + msleep(delay_dp_power_up_in_ms); + } + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (panel_mode == DP_PANEL_MODE_EDP) { + struct cp_psp *cp_psp = &stream->ctx->cp_psp; + + if (cp_psp && cp_psp->funcs.enable_assr) { + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + bool result; + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + } + } +#endif + + dp_set_panel_mode(link, panel_mode); + + if (link->aux_access_disabled) { + dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); + return true; + } else { + /** @todo Consolidate USB4 DP and DPx.x training. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dc_link_dpia_perform_link_training( + link, + &pipe_ctx->link_res, + &cur_link_settings, + skip_video_pattern); + + /* Transmit idle pattern once training successful. */ + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + // Update verified link settings to current one + // Because DPIA LT might fallback to lower link setting. + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; + link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; + dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); + } + } + } else { + status = dp_perform_link_training( + link, + &pipe_ctx->link_res, + &cur_link_settings, + skip_video_pattern); + } + + dp_trace_lt_total_count_increment(link, false); + dp_trace_lt_result_update(link, status, false); + dp_trace_set_lt_end_timestamp(link, false); + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) + return true; + } + + fail_count++; + dp_trace_lt_fail_count_update(link, fail_count, false); + if (link->ep_type == DISPLAY_ENDPOINT_PHY) { + /* latest link training still fail or link training is aborted + * skip delay and keep PHY on + */ + if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) + break; + } + + DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, + cur_link_settings.lane_count, status); + + dp_disable_link_phy(link, &pipe_ctx->link_res, signal); + + /* Abort link training if failure due to sink being unplugged. */ + if (status == LINK_TRAINING_ABORT) { + enum dc_connection_type type = dc_connection_none; + + dc_link_detect_connection_type(link, &type); + if (type == dc_connection_none) { + DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); + break; + } + } + + /* Try to train again at original settings if: + * - not falling back between training attempts; + * - aborted previous attempt due to reasons other than sink unplug; + * - successfully trained but at a link rate lower than that required by stream; + * - reached minimum link bandwidth. + */ + if (!do_fallback || (status == LINK_TRAINING_ABORT) || + (status == LINK_TRAINING_SUCCESS && is_link_bw_low) || + is_link_bw_min) { + j++; + cur_link_settings = *link_setting; + delay_between_attempts += LINK_TRAINING_RETRY_DELAY; + is_link_bw_low = false; + is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE); + + } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ + uint32_t req_bw; + uint32_t link_bw; + + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to + * minimum link bandwidth. + */ + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); + is_link_bw_low = (req_bw > link_bw); + is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE)); + + if (is_link_bw_low) + DC_LOG_WARNING( + "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", + __func__, link->link_index, req_bw, link_bw); + } + + msleep(delay_between_attempts); + } + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h new file mode 100644 index 00000000000000..a04948635369f6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -0,0 +1,182 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_H__ +#define __DC_LINK_DP_TRAINING_H__ +#include "link.h" + +bool perform_link_training_with_retries( + const struct dc_link_settings *link_setting, + bool skip_video_pattern, + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal, + bool do_fallback); + +enum link_training_result dp_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + bool skip_video_pattern); + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size); + +void dpcd_set_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern training_pattern); + +/* Write DPCD drive settings. */ +enum dc_status dpcd_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + uint32_t offset); + +/* Write DPCD link configuration data. */ +enum dc_status dpcd_set_link_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings); + +void dpcd_set_lt_pattern_and_lane_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +/* Read training status and adjustment requests from DPCD. */ +enum dc_status dp_get_lane_status_and_lane_adjust( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + uint32_t offset); + +enum dc_status dpcd_configure_lttpr_mode( + struct dc_link *link, + struct link_training_settings *lt_settings); + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link); + +enum dc_status dpcd_configure_channel_coding( + struct dc_link *link, + struct link_training_settings *lt_settings); + +void repeater_training_done(struct dc_link *link, uint32_t offset); + +void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings); + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings); + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings); + +void dp_get_lttpr_mode_override(struct dc_link *link, + enum lttpr_mode *override); + +void override_training_settings( + struct dc_link *link, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings); + +/* Check DPCD training status registers to detect link loss. */ +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting); + +bool dp_is_cr_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); +bool dp_is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); +bool dp_is_interlane_aligned(union lane_align_status_updated align_status); + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset); + +bool dp_is_max_vs_reached( + const struct link_training_settings *lt_settings); + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings); + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); + +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +void dp_wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs); + +enum dpcd_training_patterns + dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern); + +uint8_t dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern); + +void dp_log_training_result( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status); + +uint32_t dp_translate_training_aux_read_interval( + uint32_t dpcd_aux_read_interval); + +uint8_t dp_get_nibble_at_index(const uint8_t *buf, + uint32_t index); +#endif /* __DC_LINK_DP_TRAINING_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c new file mode 100644 index 00000000000000..23d380f09a21c8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c @@ -0,0 +1,259 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 128b/132b link training software policies and + * sequences. + */ +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ + link->ctx->logger + +static enum dc_status dpcd_128b_132b_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum dc_status status = core_link_write_dpcd(link, + DP_TRAINING_LANE0_SET, + (uint8_t *)(link_training_setting->dpcd_lane_settings), + sizeof(link_training_setting->dpcd_lane_settings)); + + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + DP_TRAINING_LANE0_SET, + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + return status; +} + +static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, + uint32_t *interval_in_us) +{ + union dp_128b_132b_training_aux_rd_interval dpcd_interval; + uint32_t interval_unit = 0; + + dpcd_interval.raw = 0; + core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL, + &dpcd_interval.raw, sizeof(dpcd_interval.raw)); + interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ + /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * + * INTERVAL_UNIT. The maximum is 256 ms + */ + *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; +} + +static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + uint8_t loop_count; + uint32_t aux_rd_interval = 0; + uint32_t wait_time = 0; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + enum dc_status status = DC_OK; + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* Transmit 128b/132b_TPS1 over Main-Link */ + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); + + /* Set TRAINING_PATTERN_SET to 01h */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); + + /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); + + /* Set loop counter to start from 1 */ + loop_count = 1; + + /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ + dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, + lt_settings->pattern_for_eq, DPRX); + + /* poll for channel EQ done */ + while (result == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); + wait_time += aux_rd_interval; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, + dpcd_lane_status)) { + /* pass */ + break; + } else if (loop_count >= lt_settings->eq_loop_count_limit) { + result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else { + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dpcd_128b_132b_set_lane_settings(link, lt_settings); + } + loop_count++; + } + + /* poll for EQ interlane align done */ + while (result == LINK_TRAINING_SUCCESS) { + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (wait_time >= lt_settings->eq_wait_time_limit) { + result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->eq_pattern_time); + wait_time += lt_settings->eq_pattern_time; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + } + } + + return result; +} + +static enum link_training_result dp_perform_128b_132b_cds_done_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + /* Assumption: assume hardware has transmitted eq pattern */ + enum dc_status status = DC_OK; + enum link_training_result result = LINK_TRAINING_SUCCESS; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint32_t wait_time = 0; + + /* initiate CDS done sequence */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); + + /* poll for CDS interlane align done and symbol lock */ + while (result == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->cds_pattern_time); + wait_time += lt_settings->cds_pattern_time; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else if (wait_time >= lt_settings->cds_wait_time_limit) { + result = DP_128b_132b_CDS_DONE_TIMEOUT; + } + } + + return result; +} + +enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ + if (link->dc->debug.legacy_dp2_lt) { + struct link_training_settings legacy_settings; + + decide_8b_10b_training_settings(link, + <_settings->link_settings, + &legacy_settings); + return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); + } + + dpcd_set_link_settings(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); + + return result; +} + +void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, 0, sizeof(*lt_settings)); + + lt_settings->link_settings = *link_settings; + /* TODO: should decide link spread when populating link_settings */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : + LINK_SPREAD_05_DOWNSPREAD_30KHZ; + + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); + lt_settings->eq_pattern_time = 2500; + lt_settings->eq_wait_time_limit = 400000; + lt_settings->eq_loop_count_limit = 20; + lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; + lt_settings->cds_pattern_time = 2500; + lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; + lt_settings->disallow_per_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) +{ + enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; + + if (dp_is_lttpr_present(link)) + mode = LTTPR_MODE_NON_TRANSPARENT; + + DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); + return mode; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h new file mode 100644 index 00000000000000..2147f24efc8bf3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h @@ -0,0 +1,42 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_128B_132B_H__ +#define __DC_LINK_DP_TRAINING_128B_132B_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings); + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); + +#endif /* __DC_LINK_DP_TRAINING_128B_132B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c new file mode 100644 index 00000000000000..14b98e096d3926 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -0,0 +1,414 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 8b/10b link training software policies and + * sequences. + */ +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ + link->ctx->logger + +static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + union training_aux_rd_interval training_rd_interval; + uint32_t wait_in_micro_secs = 100; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } + return wait_in_micro_secs; +} + +static uint32_t get_eq_training_aux_rd_interval( + struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + union training_aux_rd_interval training_rd_interval; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + core_link_read_dpcd( + link, + DP_128B132B_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } + + switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { + case 0: return 400; + case 1: return 4000; + case 2: return 8000; + case 3: return 12000; + case 4: return 16000; + case 5: return 32000; + case 6: return 64000; + default: return 400; + } +} + +void decide_8b_10b_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, '\0', sizeof(struct link_training_settings)); + + /* Initialize link settings */ + lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; + lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; + lt_settings->link_settings.link_rate = link_setting->link_rate; + lt_settings->link_settings.lane_count = link_setting->lane_count; + /* TODO hard coded to SS for now + * lt_settings.link_settings.link_spread = + * dal_display_path_is_ss_supported( + * path_mode->display_path) ? + * LINK_SPREAD_05_DOWNSPREAD_30KHZ : + * LINK_SPREAD_DISABLED; + */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? + LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; + lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); + lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); + lt_settings->enhanced_framing = 1; + lt_settings->should_set_fec_ready = true; + lt_settings->disallow_per_lane_settings = true; + lt_settings->always_match_dpcd_with_hw_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); + dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) +{ + bool is_lttpr_present = dp_is_lttpr_present(link); + bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; + bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; + + if (!is_lttpr_present) + return LTTPR_MODE_NON_LTTPR; + + if (vbios_lttpr_aware) { + if (vbios_lttpr_force_non_transparent) { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); + return LTTPR_MODE_NON_TRANSPARENT; + } else { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); + return LTTPR_MODE_TRANSPARENT; + } + } + + if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && + link->dc->caps.extended_aux_timeout_support) { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); + return LTTPR_MODE_NON_TRANSPARENT; + } + + DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); + return LTTPR_MODE_NON_LTTPR; +} + +enum link_training_result perform_8b_10b_clock_recovery_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + + /* najeeb - The synaptics MST hub can put the LT in + * infinite loop by switching the VS + */ + /* between level 0 and level 1 continuously, here + * we try for CR lock for LinkTrainingMaxCRRetry count*/ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings*/ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + offset); + + /* 2. update DPCD of the receiver*/ + if (!retry_count) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration.*/ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + offset); + else + dpcd_set_lane_settings( + link, + lt_settings, + offset); + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + offset); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) + return LINK_TRAINING_SUCCESS; + + /* 6. max VS reached*/ + if ((link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) && + dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings*/ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient*/ + if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == + dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + return dp_get_cr_failure(lane_count, dpcd_lane_status); +} + +enum link_training_result perform_8b_10b_channel_equalization_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, offset); + else + dpcd_set_lane_settings(link, lt_settings, offset); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + if (is_repeater(lt_settings, offset)) + wait_time_microsec = + dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink*/ + + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + offset); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) + return dpcd_lane_status[0].bits.CR_DONE_0 ? + LINK_TRAINING_EQ_FAIL_CR_PARTIAL : + LINK_TRAINING_EQ_FAIL_CR; + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) + return LINK_TRAINING_SUCCESS; + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + return LINK_TRAINING_EQ_FAIL_EQ; + +} + +enum link_training_result dp_perform_8b_10b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + + uint8_t repeater_cnt; + uint8_t repeater_id; + uint8_t lane = 0; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + } + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h new file mode 100644 index 00000000000000..d26de15ce954e1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_8B_10B_H__ +#define __DC_LINK_DP_TRAINING_8B_10B_H__ +#include "link_dp_training.h" + +/* to avoid infinite loop where-in the receiver + * switches between different VS + */ +#define LINK_TRAINING_MAX_CR_RETRY 100 +#define LINK_TRAINING_MAX_RETRY_COUNT 5 + +enum link_training_result dp_perform_8b_10b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +enum link_training_result perform_8b_10b_clock_recovery_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +enum link_training_result perform_8b_10b_channel_equalization_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); + +void decide_8b_10b_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings); + +#endif /* __DC_LINK_DP_TRAINING_8B_10B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c new file mode 100644 index 00000000000000..e50ec5012559bd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + */ +#include "link_dp_training_auxless.h" +#include "link_dp_phy.h" +#define DC_LOGGER \ + link->ctx->logger +bool dc_link_dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting) +{ + struct link_training_settings lt_settings = {0}; + + dp_decide_training_settings( + link, + link_setting, + <_settings); + override_training_settings( + link, + &link->preferred_training_settings, + <_settings); + + /* 1. Perform_clock_recovery_sequence. */ + + /* transmit training pattern for clock recovery */ + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + + /* wait receiver to lock-on*/ + dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); + + /* 2. Perform_channel_equalization_sequence. */ + + /* transmit training pattern for channel equalization. */ + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + + /* wait receiver to lock-on. */ + dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); + + /* 3. Perform_link_training_int. */ + + /* Mainlink output idle pattern. */ + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + dp_log_training_result(link, <_settings, LINK_TRAINING_SUCCESS); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h new file mode 100644 index 00000000000000..413999cd03c4c9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_AUXLESS_H__ +#define __DC_LINK_DP_TRAINING_AUXLESS_H__ +#include "link_dp_training.h" + +bool dc_link_dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting); +#endif /* __DC_LINK_DP_TRAINING_AUXLESS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c similarity index 82% rename from drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index d130d58ac08e70..e60da0532c539c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -1,6 +1,5 @@ -// SPDX-License-Identifier: MIT /* - * Copyright 2021 Advanced Micro Devices, Inc. + * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,76 +23,72 @@ * */ +/* FILE POLICY AND INTENDED USAGE: + * This module implements functionality for training DPIA links. + */ +#include "link_dp_training_dpia.h" #include "dc.h" -#include "dc_link_dpia.h" #include "inc/core_status.h" #include "dc_link.h" -#include "dc_link_dp.h" #include "dpcd_defs.h" + +#include "link_dp_dpia.h" #include "link_hwss.h" #include "dm_helpers.h" #include "dmub/inc/dmub_cmd.h" -#include "inc/link_dpcd.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_capability.h" #include "dc_dmub_srv.h" - #define DC_LOGGER \ link->ctx->logger -enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) -{ - enum dc_status status = DC_OK; - uint8_t dpcd_dp_tun_data[3] = {0}; - uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; - uint8_t i = 0; - - status = core_link_read_dpcd(link, - DP_TUNNELING_CAPABILITIES_SUPPORT, - dpcd_dp_tun_data, - sizeof(dpcd_dp_tun_data)); - - status = core_link_read_dpcd(link, - DP_USB4_ROUTER_TOPOLOGY_ID, - dpcd_topology_data, - sizeof(dpcd_topology_data)); - - link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = - dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = - dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = - dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; - - for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) - link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; - - return status; -} - -bool dc_link_dpia_query_hpd_status(struct dc_link *link) -{ - union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; - bool is_hpd_high = false; - - /* prepare QUERY_HPD command */ - cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; - cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; - cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; - - /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) - is_hpd_high = cmd.query_hpd.data.result; +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + +/* Extend interval between training status checks for manual testing. */ +#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 + +#define TRAINING_AUX_RD_INTERVAL 100 //us + +/* SET_CONFIG message types sent by driver. */ +enum dpia_set_config_type { + DPIA_SET_CFG_SET_LINK = 0x01, + DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, + DPIA_SET_CFG_SET_TRAINING = 0x18, + DPIA_SET_CFG_SET_VSPE = 0x19 +}; + +/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ +enum dpia_set_config_ts { + DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ + DPIA_TS_TPS1 = 0x01, + DPIA_TS_TPS2 = 0x02, + DPIA_TS_TPS3 = 0x03, + DPIA_TS_TPS4 = 0x07, + DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ +}; + +/* SET_CONFIG message data associated with messages sent by driver. */ +union dpia_set_config_data { + struct { + uint8_t mode : 1; + uint8_t reserved : 7; + } set_link; + struct { + uint8_t stage; + } set_training; + struct { + uint8_t swing : 2; + uint8_t max_swing_reached : 1; + uint8_t pre_emph : 2; + uint8_t max_pre_emph_reached : 1; + uint8_t reserved : 2; + } set_vspe; + uint8_t raw; +}; - DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", - __func__, - link->link_index, - link->link_id.enum_id - ENUM_ID_1, - cmd.query_hpd.data.status, - cmd.query_hpd.data.result); - - return is_hpd_high; -} /* Configure link as prescribed in link_setting; set LTTPR mode; and * Initialize link training settings. @@ -113,11 +108,12 @@ static enum link_training_result dpia_configure_link( bool fec_enable; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n", - __func__, - link->link_id.enum_id - ENUM_ID_1, - lt_settings->lttpr_mode); + __func__, + link->link_id.enum_id - ENUM_ID_1, + lt_settings->lttpr_mode); - dp_decide_training_settings(link, + dp_decide_training_settings( + link, link_setting, lt_settings); @@ -137,7 +133,7 @@ static enum link_training_result dpia_configure_link( if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; - if (link->preferred_training_settings.fec_enable) + if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; @@ -148,7 +144,8 @@ static enum link_training_result dpia_configure_link( return LINK_TRAINING_SUCCESS; } -static enum dc_status core_link_send_set_config(struct dc_link *link, +static enum dc_status core_link_send_set_config( + struct dc_link *link, uint8_t msg_type, uint8_t msg_data) { @@ -160,8 +157,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link, payload.msg_data = msg_data; if (!link->ddc->ddc_pin && !link->aux_access_disabled && - (dm_helpers_dmub_set_config_sync(link->ctx, link, - &payload, &set_config_result) == -1)) { + (dm_helpers_dmub_set_config_sync(link->ctx, + link, &payload, &set_config_result) == -1)) { return DC_ERROR_UNEXPECTED; } @@ -170,7 +167,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link, } /* Build SET_CONFIG message data payload for specified message type. */ -static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type, +static uint8_t dpia_build_set_config_data( + enum dpia_set_config_type type, struct dc_link *link, struct link_training_settings *lt_settings) { @@ -189,11 +187,9 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type, data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING; data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS; data.set_vspe.max_swing_reached = - lt_settings->hw_lane_settings[0].VOLTAGE_SWING == - VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; + lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; data.set_vspe.max_pre_emph_reached = - lt_settings->hw_lane_settings[0].PRE_EMPHASIS == - PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; + lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; break; default: ASSERT(false); /* Message type not supported by helper function. */ @@ -235,7 +231,8 @@ static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern t } /* Write training pattern to DPCD. */ -static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, +static enum dc_status dpcd_set_lt_pattern( + struct dc_link *link, enum dc_dp_training_pattern pattern, uint32_t hop) { @@ -249,28 +246,29 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, /* DpcdAddress_TrainingPatternSet */ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); + dp_training_pattern_to_dpcd_training_pattern(link, pattern); dpcd_pattern.v1_4.SCRAMBLING_DISABLE = - dc_dp_initialize_scrambling_data_symbols(link, pattern); + dp_initialize_scrambling_data_symbols(link, pattern); if (hop != DPRX) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", - __func__, - hop, - dpcd_tps_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + __func__, + hop, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } else { DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", - __func__, - dpcd_tps_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + __func__, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } - status = core_link_write_dpcd(link, - dpcd_tps_offset, - &dpcd_pattern.raw, - sizeof(dpcd_pattern.raw)); + status = core_link_write_dpcd( + link, + dpcd_tps_offset, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); return status; } @@ -284,7 +282,7 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_cr_non_transparent( struct dc_link *link, @@ -297,8 +295,7 @@ static enum link_training_result dpia_training_cr_non_transparent( enum dc_status status; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; - /* From DP spec, CR read interval is always 100us. */ - uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_align_status_updated dpcd_lane_status_updated = {0}; @@ -306,7 +303,7 @@ static enum link_training_result dpia_training_cr_non_transparent( uint8_t set_cfg_data; enum dpia_set_config_ts ts; - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. * Fix inherited from perform_clock_recovery_sequence() - @@ -316,17 +313,20 @@ static enum link_training_result dpia_training_cr_non_transparent( * continuously. */ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + /* DPTX-to-DPIA */ if (hop == repeater_cnt) { /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that * non-transparent link training has started. * This also enables the transmission of clk_sync packets. */ - set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK, + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_LINK, link, lt_settings); - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_LINK, set_cfg_data); /* CR for this hop is considered successful as long as @@ -347,6 +347,14 @@ static enum link_training_result dpia_training_cr_non_transparent( result = LINK_TRAINING_ABORT; break; } + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_TRAINING, + ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; @@ -358,10 +366,12 @@ static enum link_training_result dpia_training_cr_non_transparent( * drive settings for hops immediately downstream. */ if (hop == repeater_cnt - 1) { - set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_VSPE, link, lt_settings); - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_VSPE, set_cfg_data); if (status != DC_OK) { @@ -468,7 +478,8 @@ static enum link_training_result dpia_training_cr_transparent( * continuously. */ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + /* Write TPS1 (not VS or PE) to DPCD to start CR phase. * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to * start link training. @@ -529,8 +540,7 @@ static enum link_training_result dpia_training_cr_transparent( if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" - " -hop(%d)\n - result(%d)\n - retries(%d)\n", + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, DPRX, @@ -545,7 +555,7 @@ static enum link_training_result dpia_training_cr_transparent( * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_cr_phase( struct dc_link *link, @@ -564,7 +574,8 @@ static enum link_training_result dpia_training_cr_phase( } /* Return status read interval during equalization phase. */ -static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, +static uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, const struct link_training_settings *lt_settings, uint32_t hop) { @@ -590,12 +601,11 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, * - TPSx is transmitted for any hops downstream of DPOA. * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful. - * - DPRX EQ only reported successful when both DPRX and DPIA requirements - * (clk sync packets sent) fulfilled. + * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_non_transparent( struct dc_link *link, @@ -624,9 +634,10 @@ static enum link_training_result dpia_training_eq_non_transparent( else tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + /* DPTX-to-DPIA equalization always successful. */ if (hop == repeater_cnt) { result = LINK_TRAINING_SUCCESS; @@ -640,7 +651,8 @@ static enum link_training_result dpia_training_eq_non_transparent( result = LINK_TRAINING_ABORT; break; } - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_TRAINING, ts); if (status != DC_OK) { @@ -658,12 +670,14 @@ static enum link_training_result dpia_training_eq_non_transparent( * drive settings for hop immediately downstream. */ if (hop == repeater_cnt - 1) { - set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, - link, - lt_settings); - status = core_link_send_set_config(link, - DPIA_SET_CFG_SET_VSPE, - set_cfg_data); + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_VSPE, + link, + lt_settings); + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_VSPE, + set_cfg_data); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; @@ -679,7 +693,7 @@ static enum link_training_result dpia_training_eq_non_transparent( * ensure clock sync packets have been sent. */ if (hop == DPRX && retries_eq == 1) - wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY); + wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY); else wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop); @@ -705,8 +719,8 @@ static enum link_training_result dpia_training_eq_non_transparent( } if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) { + dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { result = LINK_TRAINING_SUCCESS; break; } @@ -741,7 +755,7 @@ static enum link_training_result dpia_training_eq_non_transparent( * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_transparent( struct dc_link *link, @@ -761,6 +775,7 @@ static enum link_training_result dpia_training_eq_transparent( wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX); for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + if (retries_eq == 0) { status = dpcd_set_lt_pattern(link, tr_pattern, DPRX); if (status != DC_OK) { @@ -810,8 +825,7 @@ static enum link_training_result dpia_training_eq_transparent( if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" - " - hop(%d)\n - result(%d)\n - retries(%d)\n", + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, DPRX, @@ -826,7 +840,7 @@ static enum link_training_result dpia_training_eq_transparent( * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_phase( struct dc_link *link, @@ -845,7 +859,9 @@ static enum link_training_result dpia_training_eq_phase( } /* End training of specified hop in display path. */ -static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) +static enum dc_status dpcd_clear_lt_pattern( + struct dc_link *link, + uint32_t hop) { union dpcd_training_pattern dpcd_pattern = {0}; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; @@ -855,7 +871,8 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); - status = core_link_write_dpcd(link, + status = core_link_write_dpcd( + link, dpcd_tps_offset, &dpcd_pattern.raw, sizeof(dpcd_pattern.raw)); @@ -873,9 +890,10 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) * (DPTX-to-DPIA) and last hop (DPRX). * * @param link DPIA link being trained. - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_end(struct dc_link *link, +static enum link_training_result dpia_training_end( + struct dc_link *link, struct link_training_settings *lt_settings, uint32_t hop) { @@ -884,13 +902,15 @@ static enum link_training_result dpia_training_end(struct dc_link *link, enum dc_status status; if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); if (hop == repeater_cnt) { /* DPTX-to-DPIA */ /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that * DPTX-to-DPIA hop trained. No DPCD write needed for first hop. */ - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_TRAINING, DPIA_TS_UFP_DONE); if (status != DC_OK) @@ -904,7 +924,8 @@ static enum link_training_result dpia_training_end(struct dc_link *link, /* Notify DPOA that non-transparent link training of DPRX done. */ if (hop == DPRX && result != LINK_TRAINING_ABORT) { - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_TRAINING, DPIA_TS_DPRX_DONE); if (status != DC_OK) @@ -912,18 +933,20 @@ static enum link_training_result dpia_training_end(struct dc_link *link, } } else { /* non-LTTPR or transparent LTTPR. */ + /* Write 0x0 to TRAINING_PATTERN_SET */ status = dpcd_clear_lt_pattern(link, hop); if (status != DC_OK) result = LINK_TRAINING_ABORT; + } DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n", - __func__, - link->link_id.enum_id - ENUM_ID_1, - hop, - result, - lt_settings->lttpr_mode); + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + lt_settings->lttpr_mode); return result; } @@ -933,20 +956,21 @@ static enum link_training_result dpia_training_end(struct dc_link *link, * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. * * @param link DPIA link being trained. - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ -static void dpia_training_abort(struct dc_link *link, - struct link_training_settings *lt_settings, - uint32_t hop) +static void dpia_training_abort( + struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) { uint8_t data = 0; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n", - __func__, - link->link_id.enum_id - ENUM_ID_1, - lt_settings->lttpr_mode, - link->is_hpd_pending); + __func__, + link->link_id.enum_id - ENUM_ID_1, + lt_settings->lttpr_mode, + link->is_hpd_pending); /* Abandon clean-up if sink unplugged. */ if (link->is_hpd_pending) @@ -975,7 +999,7 @@ enum link_training_result dc_link_dpia_perform_link_training( struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in - lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings); + lt_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link_settings); /* Configure link as prescribed in link_setting and set LTTPR mode. */ result = dpia_configure_link(link, link_res, link_setting, <_settings); @@ -983,7 +1007,7 @@ enum link_training_result dc_link_dpia_perform_link_training( return result; if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Train each hop in turn starting with the one closest to DPTX. * In transparent or non-LTTPR mode, train only the final hop (DPRX). @@ -1014,10 +1038,10 @@ enum link_training_result dc_link_dpia_perform_link_training( msleep(5); if (!link->is_automated) result = dp_check_link_loss_status(link, <_settings); - } else if (result == LINK_TRAINING_ABORT) { + } else if (result == LINK_TRAINING_ABORT) dpia_training_abort(link, <_settings, repeater_id); - } else { + else dpia_training_end(link, <_settings, repeater_id); - } + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h new file mode 100644 index 00000000000000..0150f29164215a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_DPIA_H__ +#define __DC_LINK_DP_TRAINING_DPIA_H__ +#include "link_dp_training.h" + +/* Train DP tunneling link for USB4 DPIA display endpoint. + * DPIA equivalent of dc_link_dp_perfrorm_link_training. + * Aborts link training upon detection of sink unplug. + */ +enum link_training_result dc_link_dpia_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting, + bool skip_video_pattern); + +#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c new file mode 100644 index 00000000000000..a4071d2959a033 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -0,0 +1,579 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements 8b/10b link training specially modified to support an + * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer. + * Unlike native dp connection this chip requires a modified link training + * protocol based on 8b/10b link training. Since this is a non standard sequence + * and we must support this hardware, we decided to isolate it in its own + * training sequence inside its own file. + */ +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ + link->ctx->logger + +void dp_fixed_vs_pe_read_lane_adjust( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; + const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + uint32_t vendor_lttpr_read_address = 0xF0053; + uint8_t dprx_vs = 0; + uint8_t dprx_pe = 0; + uint8_t lane; + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + vendor_lttpr_read_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + /* W/A to read lane settings requested by DPRX */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_vs, + 1); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_pe, + 1); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; + } +} + + +void dp_fixed_vs_pe_set_retimer_lane_settings( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count) +{ + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + uint32_t vendor_lttpr_write_address = 0xF004F; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint8_t lane = 0; + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Force LTTPR to output desired VS and PE */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); +} + +static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + uint8_t toggle_rate = 0x6; + uint8_t target_rate = 0x6; + bool apply_toggle_rate_wa = false; + uint8_t repeater_cnt; + uint8_t repeater_id; + + /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ + if (lt_settings->cr_pattern_time < 16000) + lt_settings->cr_pattern_time = 16000; + + /* Fixed VS/PE specific: Toggle link rate */ + apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); + target_rate = get_dpcd_link_rate(<_settings->link_settings); + toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; + + if (apply_toggle_rate_wa) + lt_settings->link_settings.link_rate = toggle_rate; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + /* Fixed VS/PE specific: Toggle link rate back*/ + if (apply_toggle_rate_wa) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &target_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = target_rate; + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + } + } + + return status; +} + + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint32_t vendor_lttpr_write_address = 0xF004F; + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + uint8_t toggle_rate; + uint8_t rate; + + /* Only 8b/10b is supported */ + ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); + return status; + } + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* Certain display and cable configuration require extra delay */ + if (offset > 2) + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } + + /* Vendor specific: Reset lane settings */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + /* Vendor specific: Enable intercept */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + + /* 1. set link rate, lane count and spread. */ + + downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + rate = get_dpcd_link_rate(<_settings->link_settings); + + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = rate; + + core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + + /* 2. Perform link training */ + + /* Perform Clock Recovery Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + const uint8_t max_vendor_dpcd_retries = 10; + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + enum dc_status dpcd_status = DC_OK; + uint8_t i = 0; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings */ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + 0); + + /* 2. update DPCD of the receiver */ + if (!retry_count) { + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration. + */ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + 0); + /* Vendor specific: Disable intercept */ + for (i = 0; i < max_vendor_dpcd_retries; i++) { + msleep(pre_disable_intercept_delay_ms); + dpcd_status = core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_dis[0], + sizeof(vendor_lttpr_write_data_intercept_dis)); + + if (dpcd_status == DC_OK) + break; + + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + } + } else { + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + dpcd_set_lane_settings( + link, + lt_settings, + 0); + } + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 6. max VS reached*/ + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings */ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient + */ + if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + status = dp_get_cr_failure(lane_count, dpcd_lane_status); + } + + /* Perform Channel EQ Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + + status = LINK_TRAINING_EQ_FAIL_EQ; + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, 0); + else + dpcd_set_lane_settings(link, lt_settings, 0); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h new file mode 100644 index 00000000000000..e61970e27661d9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h @@ -0,0 +1,45 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +void dp_fixed_vs_pe_set_retimer_lane_settings( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count); + +void dp_fixed_vs_pe_read_lane_adjust( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]); + +#endif /* __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c similarity index 97% rename from drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index af110bf9470fab..5c9a30211c109f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -23,11 +23,14 @@ * */ -#include -#include -#include -#include -#include +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements basic dpcd read/write functionality. It also does basic + * dpcd range check to ensure that every dpcd request is compliant with specs + * range requirements. + */ + +#include "link_dpcd.h" #include #include "dm_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h similarity index 95% rename from drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h rename to drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h index d561f86d503ce1..08d787a1e4517e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h @@ -25,9 +25,8 @@ #ifndef __LINK_DPCD_H__ #define __LINK_DPCD_H__ -#include -#include -#include +#include "link.h" +#include "dpcd_defs.h" enum dc_status core_link_read_dpcd( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c new file mode 100644 index 00000000000000..97e02b5b21ae3d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -0,0 +1,833 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements retrieval and configuration of eDP panel features such + * as PSR and ABM and it also manages specs defined eDP panel power sequences. + */ + +#include "link_edp_panel_control.h" +#include "link_dpcd.h" +#include "link_dp_capability.h" +#include "dm_helpers.h" +#include "dal_asic_id.h" +#include "dce/dmub_psr.h" +#include "abm.h" +#define DC_LOGGER_INIT(logger) + +void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) +{ + union dpcd_edp_config edp_config_set; + bool panel_mode_edp = false; + + memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); + + if (panel_mode != DP_PANEL_MODE_DEFAULT) { + + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + case DP_PANEL_MODE_SPECIAL: + panel_mode_edp = true; + break; + + default: + break; + } + + /*set edp panel mode in receiver*/ + core_link_read_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); + + if (edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { + enum dc_status result; + + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; + result = core_link_write_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); + + ASSERT(result == DC_OK); + } + } + DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " + "eDP panel mode enabled: %d \n", + link->link_index, + link->dpcd_caps.panel_mode_edp, + panel_mode_edp); +} + +enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) +{ + /* We need to explicitly check that connector + * is not DP. Some Travis_VGA get reported + * by video bios as DP. + */ + if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { + + switch (link->dpcd_caps.branch_dev_id) { + case DP_BRANCH_DEVICE_ID_0022B9: + /* alternate scrambler reset is required for Travis + * for the case when external chip does not + * provide sink device id, alternate scrambler + * scheme will be overriden later by querying + * Encoder features + */ + if (strncmp( + link->dpcd_caps.branch_dev_name, + DP_VGA_LVDS_CONVERTER_ID_2, + sizeof( + link->dpcd_caps. + branch_dev_name)) == 0) { + return DP_PANEL_MODE_SPECIAL; + } + break; + case DP_BRANCH_DEVICE_ID_00001A: + /* alternate scrambler reset is required for Travis + * for the case when external chip does not provide + * sink device id, alternate scrambler scheme will + * be overriden later by querying Encoder feature + */ + if (strncmp(link->dpcd_caps.branch_dev_name, + DP_VGA_LVDS_CONVERTER_ID_3, + sizeof( + link->dpcd_caps. + branch_dev_name)) == 0) { + return DP_PANEL_MODE_SPECIAL; + } + break; + default: + break; + } + } + + if (link->dpcd_caps.panel_mode_edp && + (link->connector_signal == SIGNAL_TYPE_EDP || + (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->is_internal_display))) { + return DP_PANEL_MODE_EDP; + } + + return DP_PANEL_MODE_DEFAULT; +} + +bool dc_link_set_backlight_level_nits(struct dc_link *link, + bool isHDR, + uint32_t backlight_millinits, + uint32_t transition_time_in_ms) +{ + struct dpcd_source_backlight_set dpcd_backlight_set; + uint8_t backlight_control = isHDR ? 1 : 0; + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + // OLEDs have no PWM, they can only use AUX + if (link->dpcd_sink_ext_caps.bits.oled == 1) + backlight_control = 1; + + *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; + *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)(&dpcd_backlight_set), + sizeof(dpcd_backlight_set)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_control, 1) != DC_OK) + return false; + + return true; +} + +bool dc_link_get_backlight_level_nits(struct dc_link *link, + uint32_t *backlight_millinits_avg, + uint32_t *backlight_millinits_peak) +{ + union dpcd_source_backlight_get dpcd_backlight_get; + + memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, + dpcd_backlight_get.raw, + sizeof(union dpcd_source_backlight_get))) + return false; + + *backlight_millinits_avg = + dpcd_backlight_get.bytes.backlight_millinits_avg; + *backlight_millinits_peak = + dpcd_backlight_get.bytes.backlight_millinits_peak; + + /* On non-supported panels dpcd_read usually succeeds with 0 returned */ + if (*backlight_millinits_avg == 0 || + *backlight_millinits_avg > *backlight_millinits_peak) + return false; + + return true; +} + +bool link_backlight_enable_aux(struct dc_link *link, bool enable) +{ + uint8_t backlight_enable = enable ? 1 : 0; + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, + &backlight_enable, 1) != DC_OK) + return false; + + return true; +} + +// we read default from 0x320 because we expect BIOS wrote it there +// regular get_backlight_nit reads from panel set at 0x326 +static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) +{ + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *) backlight_millinits, + sizeof(uint32_t))) + return false; + + return true; +} + +bool set_default_brightness_aux(struct dc_link *link) +{ + uint32_t default_backlight; + + if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { + if (!read_default_bl_aux(link, &default_backlight)) + default_backlight = 150000; + // if < 5 nits or > 5000, it might be wrong readback + if (default_backlight < 5000 || default_backlight > 5000000) + default_backlight = 150000; // + + return dc_link_set_backlight_level_nits(link, true, + default_backlight, 0); + } + return false; +} + +bool link_is_edp_ilr_optimization_required(struct dc_link *link, + struct dc_crtc_timing *crtc_timing) +{ + struct dc_link_settings link_setting; + uint8_t link_bw_set; + uint8_t link_rate_set; + uint32_t req_bw; + union lane_count_set lane_count_set = {0}; + + ASSERT(link || crtc_timing); // invalid input + + if (link->dpcd_caps.edp_supported_link_rates_count == 0 || + !link->panel_config.ilr.optimize_edp_link_rate) + return false; + + + // Read DPCD 00100h to find if standard link rates are set + core_link_read_dpcd(link, DP_LINK_BW_SET, + &link_bw_set, sizeof(link_bw_set)); + + if (link_bw_set) { + DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); + return true; + } + + // Read DPCD 00115h to find the edp link rate set used + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // Read DPCD 00101h to find out the number of lanes currently set + core_link_read_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, sizeof(lane_count_set)); + + req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); + + if (!crtc_timing->flags.DSC) + dc_link_decide_edp_link_settings(link, &link_setting, req_bw); + else + decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); + + if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || + lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { + DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); + return true; + } + + DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); + return false; +} + +void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) +{ + if (link->connector_signal != SIGNAL_TYPE_EDP) + return; + + link->dc->hwss.edp_power_control(link, true); + if (wait_for_hpd) + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, true); +} + +bool dc_link_wait_for_t12(struct dc_link *link) +{ + if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { + link->dc->hwss.edp_wait_for_T12(link); + + return true; + } + + return false; +} + +void link_edp_add_delay_for_T9(struct dc_link *link) +{ + if (link && link->panel_config.pps.extra_delay_backlight_off > 0) + udelay(link->panel_config.pps.extra_delay_backlight_off * 1000); +} + +bool link_edp_receiver_ready_T9(struct dc_link *link) +{ + unsigned int tries = 0; + unsigned char sinkstatus = 0; + unsigned char edpRev = 0; + enum dc_status result = DC_OK; + + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + if (result == DC_OK && edpRev >= DP_EDP_12) { + do { + sinkstatus = 1; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 0) + break; + if (result != DC_OK) + break; + udelay(100); //MAx T9 + } while (++tries < 50); + } + + return result; +} + +bool link_edp_receiver_ready_T7(struct dc_link *link) +{ + unsigned char sinkstatus = 0; + unsigned char edpRev = 0; + enum dc_status result = DC_OK; + + /* use absolute time stamp to constrain max T7*/ + unsigned long long enter_timestamp = 0; + unsigned long long finish_timestamp = 0; + unsigned long long time_taken_in_ns = 0; + + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + + if (result == DC_OK && edpRev >= DP_EDP_12) { + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + enter_timestamp = dm_get_timestamp(link->ctx); + do { + sinkstatus = 0; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 1) + break; + if (result != DC_OK) + break; + udelay(25); + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); + } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms + } + + if (link && link->panel_config.pps.extra_t7_ms > 0) + udelay(link->panel_config.pps.extra_t7_ms * 1000); + + return result; +} + +bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable) +{ + bool ret = false; + union dpcd_alpm_configuration alpm_config; + + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + memset(&alpm_config, 0, sizeof(alpm_config)); + + alpm_config.bits.ENABLE = (enable ? true : false); + ret = dm_helpers_dp_write_dpcd(link->ctx, link, + DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, + sizeof(alpm_config.raw)); + } + return ret; +} + +static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) +{ + int i; + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + break; + } + } + } + + return pipe_ctx; +} + +bool dc_link_set_backlight_level(const struct dc_link *link, + uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp) +{ + struct dc *dc = link->ctx->dc; + + DC_LOGGER_INIT(link->ctx->logger); + DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", + backlight_pwm_u16_16, backlight_pwm_u16_16); + + if (dc_is_embedded_signal(link->connector_signal)) { + struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); + + if (pipe_ctx) { + /* Disable brightness ramping when the display is blanked + * as it can hang the DMCU + */ + if (pipe_ctx->plane_state == NULL) + frame_ramp = 0; + } else { + return false; + } + + dc->hwss.set_backlight_level( + pipe_ctx, + backlight_pwm_u16_16, + frame_ramp); + } + return true; +} + +bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; + unsigned int panel_inst; + + if (psr == NULL && force_static) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { + // Don't enter PSR if panel is not connected + return false; + } + + /* Set power optimization flag */ + if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { + link->psr_settings.psr_power_opt = *power_opts; + + if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) + psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); + } + + if (psr != NULL && link->psr_settings.psr_feature_enabled && + force_static && psr->funcs->psr_force_static) + psr->funcs->psr_force_static(psr, panel_inst); + + /* Enable or Disable PSR */ + if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { + link->psr_settings.psr_allow_active = *allow_active; + + if (!link->psr_settings.psr_allow_active) + dc_z10_restore(dc); + + if (psr != NULL && link->psr_settings.psr_feature_enabled) { + psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); + } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && + link->psr_settings.psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); + else + return false; + } + return true; +} + +bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (psr != NULL && link->psr_settings.psr_feature_enabled) + psr->funcs->psr_get_state(psr, state, panel_inst); + else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) + dmcu->funcs->get_psr_state(dmcu, state); + + return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(struct dc_link *link) +{ + struct dc_context *dc_ctx = link->ctx; + enum transmitter transmitter_value = link->link_enc->transmitter; + + switch (transmitter_value) { + case TRANSMITTER_UNIPHY_A: + return PHYLD_0; + case TRANSMITTER_UNIPHY_B: + return PHYLD_1; + case TRANSMITTER_UNIPHY_C: + return PHYLD_2; + case TRANSMITTER_UNIPHY_D: + return PHYLD_3; + case TRANSMITTER_UNIPHY_E: + return PHYLD_4; + case TRANSMITTER_UNIPHY_F: + return PHYLD_5; + case TRANSMITTER_NUTMEG_CRT: + return PHYLD_6; + case TRANSMITTER_TRAVIS_CRT: + return PHYLD_7; + case TRANSMITTER_TRAVIS_LCD: + return PHYLD_8; + case TRANSMITTER_UNIPHY_G: + return PHYLD_9; + case TRANSMITTER_COUNT: + return PHYLD_COUNT; + case TRANSMITTER_UNKNOWN: + return PHYLD_UNKNOWN; + default: + DC_ERROR("Unknown transmitter value %d\n", transmitter_value); + return PHYLD_UNKNOWN; + } +} + +bool dc_link_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context) +{ + struct dc *dc; + struct dmcu *dmcu; + struct dmub_psr *psr; + int i; + unsigned int panel_inst; + /* updateSinkPsrDpcdConfig*/ + union dpcd_psr_configuration psr_configuration; + union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; + + psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + dc = link->ctx->dc; + dmcu = dc->res_pool->dmcu; + psr = dc->res_pool->psr; + + if (!dmcu && !psr) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + + memset(&psr_configuration, 0, sizeof(psr_configuration)); + + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = + psr_config->psr_frame_capture_indication_req; + + /* Check for PSR v2*/ + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + /* For PSR v2 selective update. + * Indicates whether sink should start capturing + * immediately following active scan line, + * or starting with the 2nd active scan line. + */ + psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; + /*For PSR v2, determines whether Sink should generate + * IRQ_HPD when CRC mismatch is detected. + */ + psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; + /* For PSR v2, set the bit when the Source device will + * be enabling PSR2 operation. + */ + psr_configuration.bits.ENABLE_PSR2 = 1; + /* For PSR v2, the Sink device must be able to receive + * SU region updates early in the frame time. + */ + psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 368, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + link_power_alpm_dpcd_enable(link, true); + psr_context->su_granularity_required = + psr_config->su_granularity_required; + psr_context->su_y_granularity = + psr_config->su_y_granularity; + psr_context->line_time_in_us = psr_config->line_time_in_us; + + /* linux must be able to expose AMD Source DPCD definition + * in order to support FreeSync PSR + */ + if (link->psr_settings.psr_vtotal_control_support) { + psr_context->rate_control_caps = psr_config->rate_control_caps; + vtotal_control.bits.ENABLE = true; + core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, + &vtotal_control.raw, sizeof(vtotal_control.raw)); + } + } + + psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; + psr_context->transmitterId = link->link_enc->transmitter; + psr_context->engineId = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + psr_context->controllerId = + dc->current_state->res_ctx. + pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ + psr_context->phyType = PHY_TYPE_UNIPHY; + /*PhyId is associated with the transmitter id*/ + psr_context->smuPhyId = transmitter_to_phy_id(link); + + psr_context->crtcTimingVerticalTotal = stream->timing.v_total; + psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> + timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + psr_context->psrSupportedDisplayConfig = true; + psr_context->psrExitLinkTrainingRequired = + psr_config->psr_exit_link_training_required; + psr_context->sdpTransmitLineNumDeadline = + psr_config->psr_sdp_transmit_line_num_deadline; + psr_context->psrFrameCaptureIndicationReq = + psr_config->psr_frame_capture_indication_req; + + psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + + psr_context->numberOfControllers = + link->dc->res_pool->timing_generator_count; + + psr_context->rfb_update_auto_en = true; + + /* 2 frames before enter PSR. */ + psr_context->timehyst_frames = 2; + /* half a frame + * (units in 100 lines, i.e. a value of 1 represents 100 lines) + */ + psr_context->hyst_lines = stream->timing.v_total / 2 / 100; + psr_context->aux_repeats = 10; + + psr_context->psr_level.u32all = 0; + + /*skip power down the single pipe since it blocks the cstate*/ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->ctx->asic_id.chip_family >= FAMILY_RV) { + switch (link->ctx->asic_id.chip_family) { + case FAMILY_YELLOW_CARP: + case AMDGPU_FAMILY_GC_10_3_6: + case AMDGPU_FAMILY_GC_11_0_1: + if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; + break; + default: + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; + break; + } + } +#else + if (link->ctx->asic_id.chip_family >= FAMILY_RV) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +#endif + + /* SMU will perform additional powerdown sequence. + * For unsupported ASICs, set psr_level flag to skip PSR + * static screen notification to SMU. + * (Always set for DAL2, did not check ASIC) + */ + psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; + psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; + + /* Complete PSR entry before aborting to prevent intermittent + * freezes on certain eDPs + */ + psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + + /* Disable ALPM first for compatible non-ALPM panel now */ + psr_context->psr_level.bits.DISABLE_ALPM = 0; + psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; + + /* Controls additional delay after remote frame capture before + * continuing power down, default = 0 + */ + psr_context->frame_delay = 0; + + psr_context->dsc_slice_height = psr_config->dsc_slice_height; + + if (psr) { + link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, + link, psr_context, panel_inst); + link->psr_settings.psr_power_opt = 0; + link->psr_settings.psr_allow_active = 0; + } else { + link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + } + + /* psr_enabled == 0 indicates setup_psr did not succeed, but this + * should not happen since firmware should be running at this point + */ + if (link->psr_settings.psr_feature_enabled == 0) + ASSERT(0); + + return true; + +} + +void link_get_psr_residency(const struct dc_link *link, uint32_t *residency) +{ + struct dc *dc = link->ctx->dc; + struct dmub_psr *psr = dc->res_pool->psr; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return; + + // PSR residency measurements only supported on DMCUB + if (psr != NULL && link->psr_settings.psr_feature_enabled) + psr->funcs->psr_get_residency(psr, residency, panel_inst); + else + *residency = 0; +} +bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) +{ + struct dc *dc = link->ctx->dc; + struct dmub_psr *psr = dc->res_pool->psr; + + if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) + return false; + + psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); + + return true; +} + +static struct abm *get_abm_from_stream_res(const struct dc_link *link) +{ + int i; + struct dc *dc = link->ctx->dc; + struct abm *abm = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx.stream; + + if (stream && stream->link == link) { + abm = pipe_ctx.stream_res.abm; + break; + } + } + return abm; +} + +int dc_link_get_backlight_level(const struct dc_link *link) +{ + struct abm *abm = get_abm_from_stream_res(link); + struct panel_cntl *panel_cntl = link->panel_cntl; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + bool fw_set_brightness = true; + + if (dmcu) + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); + + if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) + return panel_cntl->funcs->get_current_backlight(panel_cntl); + else if (abm != NULL && abm->funcs->get_current_backlight != NULL) + return (int) abm->funcs->get_current_backlight(abm); + else + return DC_ERROR_UNEXPECTED; +} + +int dc_link_get_target_backlight_pwm(const struct dc_link *link) +{ + struct abm *abm = get_abm_from_stream_res(link); + + if (abm == NULL || abm->funcs->get_target_backlight == NULL) + return DC_ERROR_UNEXPECTED; + + return (int) abm->funcs->get_target_backlight(abm); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h new file mode 100644 index 00000000000000..7f91a564b0893e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__ +#define __DC_LINK_EDP_PANEL_CONTROL_H__ +#include "link.h" + +enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); +void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); +bool set_default_brightness_aux(struct dc_link *link); +#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c new file mode 100644 index 00000000000000..5f39dfe06e9a7b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c @@ -0,0 +1,240 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements functions that manage basic HPD components such as gpio. + * It also provides wrapper functions to execute HPD related programming. This + * file only manages basic HPD functionality. It doesn't manage detection or + * feature or signal specific HPD behaviors. + */ +#include "link_hpd.h" +#include "gpio_service_interface.h" + +bool dc_link_get_hpd_state(struct dc_link *dc_link) +{ + uint32_t state; + + dal_gpio_lock_pin(dc_link->hpd_gpio); + dal_gpio_get_value(dc_link->hpd_gpio, &state); + dal_gpio_unlock_pin(dc_link->hpd_gpio); + + return state; +} + +void dc_link_enable_hpd(const struct dc_link *link) +{ + struct link_encoder *encoder = link->link_enc; + + if (encoder != NULL && encoder->funcs->enable_hpd != NULL) + encoder->funcs->enable_hpd(encoder); +} + +void dc_link_disable_hpd(const struct dc_link *link) +{ + struct link_encoder *encoder = link->link_enc; + + if (encoder != NULL && encoder->funcs->enable_hpd != NULL) + encoder->funcs->disable_hpd(encoder); +} + +void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) +{ + struct gpio *hpd; + + if (enable) { + link->is_hpd_filter_disabled = false; + program_hpd_filter(link); + } else { + link->is_hpd_filter_disabled = true; + /* Obtain HPD handle */ + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + + if (!hpd) + return; + + /* Setup HPD filtering */ + if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { + struct gpio_hpd_config config; + + config.delay_on_connect = 0; + config.delay_on_disconnect = 0; + + dal_irq_setup_hpd_filter(hpd, &config); + + dal_gpio_close(hpd); + } else { + ASSERT_CRITICAL(false); + } + /* Release HPD handle */ + dal_gpio_destroy_irq(&hpd); + } +} + +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service) +{ + enum bp_result bp_result; + struct graphics_object_hpd_info hpd_info; + struct gpio_pin_info pin_info; + + if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) + return NULL; + + bp_result = dcb->funcs->get_gpio_pin_info(dcb, + hpd_info.hpd_int_gpio_uid, &pin_info); + + if (bp_result != BP_RESULT_OK) { + ASSERT(bp_result == BP_RESULT_NORECORD); + return NULL; + } + + return dal_gpio_service_create_irq(gpio_service, + pin_info.offset, + pin_info.mask); +} + +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high) +{ + struct gpio *hpd_pin = link_get_hpd_gpio( + link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + if (!hpd_pin) + return false; + + dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); + dal_gpio_get_value(hpd_pin, is_hpd_high); + dal_gpio_close(hpd_pin); + dal_gpio_destroy_irq(&hpd_pin); + return true; +} + +enum hpd_source_id get_hpd_line(struct dc_link *link) +{ + struct gpio *hpd; + enum hpd_source_id hpd_id; + + hpd_id = HPD_SOURCEID_UNKNOWN; + + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (hpd) { + switch (dal_irq_get_source(hpd)) { + case DC_IRQ_SOURCE_HPD1: + hpd_id = HPD_SOURCEID1; + break; + case DC_IRQ_SOURCE_HPD2: + hpd_id = HPD_SOURCEID2; + break; + case DC_IRQ_SOURCE_HPD3: + hpd_id = HPD_SOURCEID3; + break; + case DC_IRQ_SOURCE_HPD4: + hpd_id = HPD_SOURCEID4; + break; + case DC_IRQ_SOURCE_HPD5: + hpd_id = HPD_SOURCEID5; + break; + case DC_IRQ_SOURCE_HPD6: + hpd_id = HPD_SOURCEID6; + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + dal_gpio_destroy_irq(&hpd); + } + + return hpd_id; +} + +bool program_hpd_filter(const struct dc_link *link) +{ + bool result = false; + struct gpio *hpd; + int delay_on_connect_in_ms = 0; + int delay_on_disconnect_in_ms = 0; + + if (link->is_hpd_filter_disabled) + return false; + /* Verify feature is supported */ + switch (link->connector_signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + /* Program hpd filter */ + delay_on_connect_in_ms = 500; + delay_on_disconnect_in_ms = 100; + break; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + /* Program hpd filter to allow DP signal to settle */ + /* 500: not able to detect MST <-> SST switch as HPD is low for + * only 100ms on DELL U2413 + * 0: some passive dongle still show aux mode instead of i2c + * 20-50: not enough to hide bouncing HPD with passive dongle. + * also see intermittent i2c read issues. + */ + delay_on_connect_in_ms = 80; + delay_on_disconnect_in_ms = 0; + break; + case SIGNAL_TYPE_LVDS: + case SIGNAL_TYPE_EDP: + default: + /* Don't program hpd filter */ + return false; + } + + /* Obtain HPD handle */ + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (!hpd) + return result; + + /* Setup HPD filtering */ + if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { + struct gpio_hpd_config config; + + config.delay_on_connect = delay_on_connect_in_ms; + config.delay_on_disconnect = delay_on_disconnect_in_ms; + + dal_irq_setup_hpd_filter(hpd, &config); + + dal_gpio_close(hpd); + + result = true; + } else { + ASSERT_CRITICAL(false); + } + + /* Release HPD handle */ + dal_gpio_destroy_irq(&hpd); + + return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h new file mode 100644 index 00000000000000..3d122def0c887b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_HPD_H__ +#define __DC_LINK_HPD_H__ +#include "link.h" + +enum hpd_source_id get_hpd_line(struct dc_link *link); +/* + * Function: program_hpd_filter + * + * @brief + * Programs HPD filter on associated HPD line to default values. + * + * @return + * true on success, false otherwise + */ +bool program_hpd_filter(const struct dc_link *link); +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dpia_query_hpd_status(struct dc_link *link); +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high); +#endif /* __DC_LINK_HPD_H__ */ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index eb5b7eb292ef30..a391b939d709bd 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -126,9 +126,21 @@ enum dmub_notification_type { DMUB_NOTIFICATION_HPD, DMUB_NOTIFICATION_HPD_IRQ, DMUB_NOTIFICATION_SET_CONFIG_REPLY, + DMUB_NOTIFICATION_DPIA_NOTIFICATION, DMUB_NOTIFICATION_MAX }; +/** + * DPIA NOTIFICATION Response Type + */ +enum dpia_notify_bw_alloc_status { + + DPIA_BW_REQ_FAILED = 0, + DPIA_BW_REQ_SUCCESS, + DPIA_EST_BW_CHANGED, + DPIA_BW_ALLOC_CAPS_CHANGED +}; + /** * struct dmub_region - dmub hw memory region * @base: base address for region, must be 256 byte aligned @@ -453,6 +465,7 @@ struct dmub_srv { * @pending_notification: Indicates there are other pending notifications * @aux_reply: aux reply * @hpd_status: hpd status + * @bw_alloc_reply: BW Allocation reply from CM/DPIA */ struct dmub_notification { enum dmub_notification_type type; @@ -463,6 +476,10 @@ struct dmub_notification { struct aux_reply_data aux_reply; enum dp_hpd_status hpd_status; enum set_config_status sc_status; + /** + * DPIA notification command. + */ + struct dmub_rb_cmd_dpia_notification dpia_notification; }; }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 33907feefebbdc..007d6bdc3e3959 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -162,6 +162,7 @@ extern "C" { #define dmub_udelay(microseconds) udelay(microseconds) #endif +#pragma pack(push, 1) /** * union dmub_addr - DMUB physical/virtual 64-bit address. */ @@ -172,6 +173,7 @@ union dmub_addr { } u; /*<< Low/high bit access */ uint64_t quad_part; /*<< 64 bit address */ }; +#pragma pack(pop) /** * Dirty rect definition. @@ -457,6 +459,10 @@ enum dmub_cmd_vbios_type { * Query DP alt status on a transmitter. */ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26, + /** + * Controls domain power gating + */ + DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28, }; //============================================================================== @@ -770,6 +776,10 @@ enum dmub_out_cmd_type { * Command type used for SET_CONFIG Reply notification */ DMUB_OUT_CMD__SET_CONFIG_REPLY = 3, + /** + * Command type used for USB4 DPIA notification + */ + DMUB_OUT_CMD__DPIA_NOTIFICATION = 5, }; /* DMUB_CMD__DPIA command sub-types. */ @@ -779,6 +789,11 @@ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, }; +/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ +enum dmub_cmd_dpia_notification_type { + DPIA_NOTIFY__BW_ALLOCATION = 0, +}; + #pragma pack(push, 1) /** @@ -1204,6 +1219,23 @@ struct dmub_rb_cmd_dig1_transmitter_control { union dmub_cmd_dig1_transmitter_control_data transmitter_control; /**< payload */ }; +/** + * struct dmub_rb_cmd_domain_control_data - Data for DOMAIN power control + */ +struct dmub_rb_cmd_domain_control_data { + uint8_t inst : 6; /**< DOMAIN instance to control */ + uint8_t power_gate : 1; /**< 1=power gate, 0=power up */ + uint8_t reserved[3]; /**< Reserved for future use */ +}; + +/** + * struct dmub_rb_cmd_domain_control - Controls DOMAIN power gating + */ +struct dmub_rb_cmd_domain_control { + struct dmub_cmd_header header; /**< header */ + struct dmub_rb_cmd_domain_control_data data; /**< payload */ +}; + /** * DPIA tunnel command parameters. */ @@ -1557,6 +1589,79 @@ struct dmub_rb_cmd_dp_set_config_reply { struct set_config_reply_control_data set_config_reply_control; }; +/** + * Definition of a DPIA notification header + */ +struct dpia_notification_header { + uint8_t instance; /**< DPIA Instance */ + uint8_t reserved[3]; + enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */ +}; + +/** + * Definition of the common data struct of DPIA notification + */ +struct dpia_notification_common { + uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header) + - sizeof(struct dpia_notification_header)]; +}; + +/** + * Definition of a DPIA notification data + */ +struct dpia_bw_allocation_notify_data { + union { + struct { + uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */ + uint16_t bw_request_failed: 1; /**< BW_Request_Failed */ + uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */ + uint16_t est_bw_changed: 1; /**< Estimated_BW changed */ + uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */ + uint16_t reserved: 11; /**< Reserved */ + } bits; + + uint16_t flags; + }; + + uint8_t cm_id; /**< CM ID */ + uint8_t group_id; /**< Group ID */ + uint8_t granularity; /**< BW Allocation Granularity */ + uint8_t estimated_bw; /**< Estimated_BW */ + uint8_t allocated_bw; /**< Allocated_BW */ + uint8_t reserved; +}; + +/** + * union dpia_notify_data_type - DPIA Notification in Outbox command + */ +union dpia_notification_data { + /** + * DPIA Notification for common data struct + */ + struct dpia_notification_common common_data; + + /** + * DPIA Notification for DP BW Allocation support + */ + struct dpia_bw_allocation_notify_data dpia_bw_alloc; +}; + +/** + * Definition of a DPIA notification payload + */ +struct dpia_notification_payload { + struct dpia_notification_header header; + union dpia_notification_data data; /**< DPIA notification payload data */ +}; + +/** + * Definition of a DMUB_OUT_CMD__DPIA_NOTIFICATION command. + */ +struct dmub_rb_cmd_dpia_notification { + struct dmub_cmd_header header; /**< DPIA notification header */ + struct dpia_notification_payload payload; /**< DPIA notification payload */ +}; + /** * Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command. */ @@ -1886,6 +1991,14 @@ struct dmub_cmd_psr_copy_settings_data { * Explicit padding to 2 byte boundary. */ uint8_t pad3; + /** + * DSC Slice height. + */ + uint16_t dsc_slice_height; + /** + * Explicit padding to 4 byte boundary. + */ + uint16_t pad; }; /** @@ -3029,7 +3142,8 @@ struct dmub_rb_cmd_panel_cntl { */ struct dmub_cmd_lvtma_control_data { uint8_t uc_pwr_action; /**< LVTMA_ACTION */ - uint8_t reserved_0[3]; /**< For future use */ + uint8_t bypass_panel_control_wait; + uint8_t reserved_0[2]; /**< For future use */ uint8_t panel_inst; /**< LVTMA control instance */ uint8_t reserved_1[3]; /**< For future use */ }; @@ -3231,6 +3345,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL command. */ struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; + /** + * Definition of a DMUB_CMD__VBIOS_DOMAIN_CONTROL command. + */ + struct dmub_rb_cmd_domain_control domain_control; /** * Definition of a DMUB_CMD__PSR_SET_VERSION command. */ @@ -3422,6 +3540,10 @@ union dmub_rb_out_cmd { * SET_CONFIG reply command. */ struct dmub_rb_cmd_dp_set_config_reply set_config_reply; + /** + * DPIA notification command. + */ + struct dmub_rb_cmd_dpia_notification dpia_notification; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c index 44502ec919a2c4..74189102eaecc1 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -92,6 +92,27 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, notify->link_index = cmd.set_config_reply.set_config_reply_control.instance; notify->sc_status = cmd.set_config_reply.set_config_reply_control.status; break; + case DMUB_OUT_CMD__DPIA_NOTIFICATION: + notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; + notify->link_index = cmd.dpia_notification.payload.header.instance; + + if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { + + notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw = + cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw; + notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw = + cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw; + + if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed) + notify->result = DPIA_BW_REQ_FAILED; + else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) + notify->result = DPIA_BW_REQ_SUCCESS; + else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed) + notify->result = DPIA_EST_BW_CHANGED; + else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) + notify->result = DPIA_BW_ALLOC_CAPS_CHANGED; + } + break; default: notify->type = DMUB_NOTIFICATION_NO_DATA; break; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index a7ba5bd8dc16ad..31a12ce79a8e0a 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -35,6 +35,7 @@ #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C #define DP_BRANCH_DEVICE_ID_006037 0x006037 #define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8 +#define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD #define DP_BRANCH_HW_REV_10 0x10 #define DP_BRANCH_HW_REV_20 0x20 @@ -133,6 +134,11 @@ static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5}; static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; +/*Travis*/ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; +/*Nutmeg*/ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; + /*MST Dock*/ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index b2df07f9e91c93..c062a44db0785d 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -88,7 +88,10 @@ enum dpcd_phy_test_patterns { PHY_TEST_PATTERN_PRBS23 = 0x30, PHY_TEST_PATTERN_PRBS31 = 0x38, PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40, - PHY_TEST_PATTERN_SQUARE_PULSE = 0x48, + PHY_TEST_PATTERN_SQUARE = 0x48, + PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED = 0x49, + PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED = 0x4A, + PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED = 0x4B, }; enum dpcd_test_dyn_range { diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index d1e91d31d1519a..18b9173d5a962b 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -165,7 +165,12 @@ enum dp_test_pattern { DP_TEST_PATTERN_PRBS23, DP_TEST_PATTERN_PRBS31, DP_TEST_PATTERN_264BIT_CUSTOM, - DP_TEST_PATTERN_SQUARE_PULSE, + DP_TEST_PATTERN_SQUARE_BEGIN, + DP_TEST_PATTERN_SQUARE = DP_TEST_PATTERN_SQUARE_BEGIN, + DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED, + DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED, + DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED, + DP_TEST_PATTERN_SQUARE_END = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED, /* Link Training Patterns */ DP_TEST_PATTERN_TRAINING_PATTERN1, diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index f6034213c700d0..67a062af3ab03e 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1715,8 +1715,8 @@ static bool map_regamma_hw_to_x_user( const struct pwl_float_data_ex *rgb_regamma, uint32_t hw_points_num, struct dc_transfer_func_distributed_points *tf_pts, - bool mapUserRamp, - bool doClamping) + bool map_user_ramp, + bool do_clamping) { /* setup to spare calculated ideal regamma values */ @@ -1724,7 +1724,7 @@ static bool map_regamma_hw_to_x_user( struct hw_x_point *coords = coords_x; const struct pwl_float_data_ex *regamma = rgb_regamma; - if (ramp && mapUserRamp) { + if (ramp && map_user_ramp) { copy_rgb_regamma_to_coordinates_x(coords, hw_points_num, rgb_regamma); @@ -1744,7 +1744,7 @@ static bool map_regamma_hw_to_x_user( } } - if (doClamping) { + if (do_clamping) { /* this should be named differently, all it does is clamp to 0-1 */ build_new_custom_resulted_curve(hw_points_num, tf_pts); } @@ -1875,7 +1875,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *input_tf, - const struct dc_gamma *ramp, bool mapUserRamp) + const struct dc_gamma *ramp, bool map_user_ramp) { struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts; struct dividers dividers; @@ -1883,7 +1883,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct pwl_float_data_ex *curve = NULL; struct gamma_pixel *axis_x = NULL; struct pixel_gamma_point *coeff = NULL; - enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; + enum dc_transfer_func_predefined tf; uint32_t i; bool ret = false; @@ -1891,12 +1891,12 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, return false; /* we can use hardcoded curve for plain SRGB TF - * If linear, it's bypass if on user ramp + * If linear, it's bypass if no user ramp */ if (input_tf->type == TF_TYPE_PREDEFINED) { if ((input_tf->tf == TRANSFER_FUNCTION_SRGB || input_tf->tf == TRANSFER_FUNCTION_LINEAR) && - !mapUserRamp) + !map_user_ramp) return true; if (dc_caps != NULL && @@ -1919,7 +1919,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - if (mapUserRamp && ramp && ramp->type == GAMMA_RGB_256) { + if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) { rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*rgb_user), GFP_KERNEL); @@ -2007,7 +2007,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, map_regamma_hw_to_x_user(ramp, coeff, rgb_user, coordinates_x, axis_x, curve, MAX_HW_POINTS, tf_pts, - mapUserRamp && ramp && ramp->type == GAMMA_RGB_256, + map_user_ramp && ramp && ramp->type == GAMMA_RGB_256, true); } @@ -2112,9 +2112,11 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans, } bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, - const struct hdr_tm_params *fs_params, - struct calculate_buffer *cal_buffer) + const struct dc_gamma *ramp, + bool map_user_ramp, + bool can_rom_be_used, + const struct hdr_tm_params *fs_params, + struct calculate_buffer *cal_buffer) { struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; struct dividers dividers; @@ -2123,27 +2125,27 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, struct pwl_float_data_ex *rgb_regamma = NULL; struct gamma_pixel *axis_x = NULL; struct pixel_gamma_point *coeff = NULL; - enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; - bool doClamping = true; + enum dc_transfer_func_predefined tf; + bool do_clamping = true; bool ret = false; if (output_tf->type == TF_TYPE_BYPASS) return false; /* we can use hardcoded curve for plain SRGB TF */ - if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && + if (output_tf->type == TF_TYPE_PREDEFINED && can_rom_be_used == true && output_tf->tf == TRANSFER_FUNCTION_SRGB) { if (ramp == NULL) return true; if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + (!map_user_ramp && ramp->type == GAMMA_RGB_256)) return true; } output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; if (ramp && ramp->type != GAMMA_CS_TFM_1D && - (mapUserRamp || ramp->type != GAMMA_RGB_256)) { + (map_user_ramp || ramp->type != GAMMA_RGB_256)) { rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*rgb_user), GFP_KERNEL); @@ -2164,7 +2166,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, ramp->num_entries, dividers); - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) + if (ramp->type == GAMMA_RGB_256 && map_user_ramp) scale_gamma(rgb_user, ramp, dividers); else if (ramp->type == GAMMA_RGB_FLOAT_1024) scale_gamma_dx(rgb_user, ramp, dividers); @@ -2191,15 +2193,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, cal_buffer); if (ret) { - doClamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 && - fs_params != NULL && fs_params->skip_tm == 0); + do_clamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 && + fs_params != NULL && fs_params->skip_tm == 0); map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axis_x, rgb_regamma, - MAX_HW_POINTS, tf_pts, - (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && - (ramp && ramp->type != GAMMA_CS_TFM_1D), - doClamping); + coordinates_x, axis_x, rgb_regamma, + MAX_HW_POINTS, tf_pts, + (map_user_ramp || (ramp && ramp->type != GAMMA_RGB_256)) && + (ramp && ramp->type != GAMMA_CS_TFM_1D), + do_clamping); if (ramp && ramp->type == GAMMA_CS_TFM_1D) apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); @@ -2215,89 +2217,3 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, rgb_user_alloc_fail: return ret; } - -bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points) -{ - uint32_t i; - bool ret = false; - struct pwl_float_data_ex *rgb_degamma = NULL; - - if (trans == TRANSFER_FUNCTION_UNITY || - trans == TRANSFER_FUNCTION_LINEAR) { - - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = coordinates_x[i].x; - points->green[i] = coordinates_x[i].x; - points->blue[i] = coordinates_x[i].x; - } - ret = true; - } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_degamma), - GFP_KERNEL); - if (!rgb_degamma) - goto rgb_degamma_alloc_fail; - - - build_de_pq(rgb_degamma, - MAX_HW_POINTS, - coordinates_x); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_degamma[i].r; - points->green[i] = rgb_degamma[i].g; - points->blue[i] = rgb_degamma[i].b; - } - ret = true; - - kvfree(rgb_degamma); - } else if (trans == TRANSFER_FUNCTION_SRGB || - trans == TRANSFER_FUNCTION_BT709 || - trans == TRANSFER_FUNCTION_GAMMA22 || - trans == TRANSFER_FUNCTION_GAMMA24 || - trans == TRANSFER_FUNCTION_GAMMA26) { - rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_degamma), - GFP_KERNEL); - if (!rgb_degamma) - goto rgb_degamma_alloc_fail; - - build_degamma(rgb_degamma, - MAX_HW_POINTS, - coordinates_x, - trans); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_degamma[i].r; - points->green[i] = rgb_degamma[i].g; - points->blue[i] = rgb_degamma[i].b; - } - ret = true; - - kvfree(rgb_degamma); - } else if (trans == TRANSFER_FUNCTION_HLG) { - rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_degamma), - GFP_KERNEL); - if (!rgb_degamma) - goto rgb_degamma_alloc_fail; - - build_hlg_degamma(rgb_degamma, - MAX_HW_POINTS, - coordinates_x, - 80, 1000); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_degamma[i].r; - points->green[i] = rgb_degamma[i].g; - points->blue[i] = rgb_degamma[i].b; - } - ret = true; - kvfree(rgb_degamma); - } - points->end_exponent = 0; - points->x_point_at_y1_red = 1; - points->x_point_at_y1_green = 1; - points->x_point_at_y1_blue = 1; - -rgb_degamma_alloc_fail: - return ret; -} diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 2893abf4820844..ee5c466613de7b 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -115,9 +115,6 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points); - bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, const struct regamma_lut *regamma, struct calculate_buffer *cal_buffer, diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index c2e00f7b8381ef..2be45b3149220d 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -616,7 +616,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr, } static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, - struct dc_info_packet *infopacket) + struct dc_info_packet *infopacket, + bool freesync_on_desktop) { unsigned int min_refresh; unsigned int max_refresh; @@ -649,9 +650,15 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, infopacket->sb[6] |= 0x02; /* PB6 = [Bit 2 = FreeSync Active] */ - if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || + if (freesync_on_desktop) { + if (vrr->state != VRR_STATE_DISABLED && + vrr->state != VRR_STATE_UNSUPPORTED) + infopacket->sb[6] |= 0x04; + } else { + if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || vrr->state == VRR_STATE_ACTIVE_FIXED) - infopacket->sb[6] |= 0x04; + infopacket->sb[6] |= 0x04; + } min_refresh = (vrr->min_refresh_in_uhz + 500000) / 1000000; max_refresh = (vrr->max_refresh_in_uhz + 500000) / 1000000; @@ -898,52 +905,20 @@ static void build_vrr_infopacket_v2(enum signal_type signal, infopacket->valid = true; } -#ifndef TRIM_FSFT -static void build_vrr_infopacket_fast_transport_data( - bool ftActive, - unsigned int ftOutputRate, - struct dc_info_packet *infopacket) -{ - /* PB9 : bit7 - fast transport Active*/ - unsigned char activeBit = (ftActive) ? 1 << 7 : 0; - - infopacket->sb[1] &= ~activeBit; //clear bit - infopacket->sb[1] |= activeBit; //set bit - - /* PB13 : Target Output Pixel Rate [kHz] - bits 7:0 */ - infopacket->sb[13] = ftOutputRate & 0xFF; - - /* PB14 : Target Output Pixel Rate [kHz] - bits 15:8 */ - infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF; - - /* PB15 : Target Output Pixel Rate [kHz] - bits 23:16 */ - infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF; - -} -#endif static void build_vrr_infopacket_v3(enum signal_type signal, const struct mod_vrr_params *vrr, -#ifndef TRIM_FSFT - bool ftActive, unsigned int ftOutputRate, -#endif enum color_transfer_func app_tf, - struct dc_info_packet *infopacket) + struct dc_info_packet *infopacket, + bool freesync_on_desktop) { unsigned int payload_size = 0; build_vrr_infopacket_header_v3(signal, infopacket, &payload_size); - build_vrr_infopacket_data_v3(vrr, infopacket); + build_vrr_infopacket_data_v3(vrr, infopacket, freesync_on_desktop); build_vrr_infopacket_fs2_data(app_tf, infopacket); -#ifndef TRIM_FSFT - build_vrr_infopacket_fast_transport_data( - ftActive, - ftOutputRate, - infopacket); -#endif - build_vrr_infopacket_checksum(&payload_size, infopacket); infopacket->valid = true; @@ -980,31 +955,26 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, * Check if Freesync is supported. Return if false. If true, * set the corresponding bit in the info packet */ + bool freesync_on_desktop; + bool fams_enable; + + fams_enable = stream->ctx->dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; + freesync_on_desktop = stream->freesync_on_desktop && fams_enable; + if (!vrr->send_info_frame) return; switch (packet_type) { case PACKET_TYPE_FS_V3: -#ifndef TRIM_FSFT - // always populate with pixel rate. - build_vrr_infopacket_v3( - stream->signal, vrr, - stream->timing.flags.FAST_TRANSPORT, - (stream->timing.flags.FAST_TRANSPORT) ? - stream->timing.fast_transport_output_rate_100hz : - stream->timing.pix_clk_100hz, - app_tf, infopacket); -#else - build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket); -#endif + build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop); break; case PACKET_TYPE_FS_V2: - build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop); + build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop); break; case PACKET_TYPE_VRR: case PACKET_TYPE_FS_V1: default: - build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop); + build_vrr_infopacket_v1(stream->signal, vrr, infopacket, freesync_on_desktop); } if (true == pack_sdp_v1_3 && diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index edf5845f6a1f77..66dc9a19aebe5d 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -41,4 +41,40 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet); +enum adaptive_sync_type { + ADAPTIVE_SYNC_TYPE_NONE = 0, + ADAPTIVE_SYNC_TYPE_DP = 1, + FREESYNC_TYPE_PCON_IN_WHITELIST = 2, + FREESYNC_TYPE_PCON_NOT_IN_WHITELIST = 3, + ADAPTIVE_SYNC_TYPE_EDP = 4, +}; + +enum adaptive_sync_sdp_version { + AS_SDP_VER_0 = 0x0, + AS_SDP_VER_1 = 0x1, + AS_SDP_VER_2 = 0x2, +}; + +#define AS_DP_SDP_LENGTH (9) + +struct frame_duration_op { + bool support; + unsigned char frame_duration_hex; +}; + +struct AS_Df_params { + bool supportMode; + struct frame_duration_op increase; + struct frame_duration_op decrease; +}; + +void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, + enum adaptive_sync_type asType, const struct AS_Df_params *param, + struct dc_info_packet *info_packet); + +void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream, + const struct AS_Df_params *param, struct dc_info_packet *info_packet); + +void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet); + #endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 69691058ab8981..ec64f19e178660 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -519,3 +519,58 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; } +void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, + enum adaptive_sync_type asType, + const struct AS_Df_params *param, + struct dc_info_packet *info_packet) +{ + info_packet->valid = false; + + memset(info_packet, 0, sizeof(struct dc_info_packet)); + + switch (asType) { + case ADAPTIVE_SYNC_TYPE_DP: + if (stream != NULL) + mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet); + break; + case FREESYNC_TYPE_PCON_IN_WHITELIST: + mod_build_adaptive_sync_infopacket_v1(info_packet); + break; + case ADAPTIVE_SYNC_TYPE_NONE: + case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST: + default: + break; + } +} + +void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet) +{ + info_packet->valid = true; + // HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length} + info_packet->hb0 = 0x00; + info_packet->hb1 = 0x22; + info_packet->hb2 = AS_SDP_VER_1; + info_packet->hb3 = 0x00; +} + +void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream, + const struct AS_Df_params *param, + struct dc_info_packet *info_packet) +{ + info_packet->valid = true; + // HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length} + info_packet->hb0 = 0x00; + info_packet->hb1 = 0x22; + info_packet->hb2 = AS_SDP_VER_2; + info_packet->hb3 = AS_DP_SDP_LENGTH; + + //Payload + info_packet->sb[0] = param->supportMode; //1: AVT; 0: FAVT + info_packet->sb[1] = (stream->timing.v_total & 0x00FF); + info_packet->sb[2] = (stream->timing.v_total & 0xFF00) >> 8; + //info_packet->sb[3] = 0x00; Target RR, not use fot AVT + info_packet->sb[4] = (param->increase.support << 6 | param->decrease.support << 7); + info_packet->sb[5] = param->increase.frame_duration_hex; + info_packet->sb[6] = param->decrease.frame_duration_hex; +} + diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 9b5d9b2c9a6a79..e39b133d05af4e 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -916,3 +916,34 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s { return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal); } + +bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, + struct dc_stream_state *stream, + struct psr_config *config) +{ + uint16_t pic_height; + uint16_t slice_height; + + config->dsc_slice_height = 0; + if ((link->connector_signal & SIGNAL_TYPE_EDP) && + (!dc->caps.edp_dsc_support || + link->panel_config.dsc.disable_dsc_edp || + !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || + !stream->timing.dsc_cfg.num_slices_v)) + return true; + + pic_height = stream->timing.v_addressable + + stream->timing.v_border_top + stream->timing.v_border_bottom; + slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v; + config->dsc_slice_height = slice_height; + + if (slice_height) { + if (config->su_y_granularity && + (slice_height % config->su_y_granularity)) { + ASSERT(0); + return false; + } + } + + return true; +} diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 316452e9dbc91d..1d3079e56799f3 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config, const struct dc_stream_state *stream); bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_state *stream); +bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, + struct dc_stream_state *stream, + struct psr_config *config); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f175e65b853a00..e4a22c68517d1d 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -240,6 +240,7 @@ enum DC_FEATURE_MASK { DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default + DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default }; enum DC_DEBUG_MASK { diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h new file mode 100644 index 00000000000000..fbb18e44ec5226 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _df_4_3_OFFSET_HEADER +#define _df_4_3_OFFSET_HEADER + +#define regDF_CS_UMC_AON0_HardwareAssertMaskLow 0x0e3e +#define regDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 4 +#define regDF_NCS_PG0_HardwareAssertMaskHigh 0x0e3f +#define regDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 4 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h new file mode 100644 index 00000000000000..9c8f19ded4ebd6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _df_4_3_SH_MASK_HEADER +#define _df_4_3_SH_MASK_HEADER + +//DF_CS_UMC_AON0_HardwareAssertMaskLow +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L + +//DF_NCS_PG0_HardwareAssertMaskHigh +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h index 3b95a59b196c2e..56e00252bff84c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h @@ -3593,6 +3593,14 @@ #define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +// addressBlock: gc_rlcsdec +// base address: 0x3b980 +#define regRLC_RLCS_FED_STATUS_0 0x4eff +#define regRLC_RLCS_FED_STATUS_0_BASE_IDX 1 +#define regRLC_RLCS_FED_STATUS_1 0x4f00 +#define regRLC_RLCS_FED_STATUS_1_BASE_IDX 1 + + // addressBlock: gc_gcvml2pspdec // base address: 0x3f900 #define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h index ae3ef8a9e7026e..658e88a8e2ac9a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h @@ -37642,6 +37642,56 @@ #define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL #define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L #define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L +//RLC_RLCS_FED_STATUS_0 +#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR__SHIFT 0x0 +#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR__SHIFT 0x1 +#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR__SHIFT 0x2 +#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR__SHIFT 0x3 +#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR__SHIFT 0x4 +#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR__SHIFT 0x5 +#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR__SHIFT 0x6 +#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR__SHIFT 0x7 +#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR_MASK 0x00000001L +#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR_MASK 0x00000002L +#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR_MASK 0x00000004L +#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR_MASK 0x00000008L +#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR_MASK 0x00000010L +#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR_MASK 0x00000020L +#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR_MASK 0x00000040L +#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR_MASK 0x00000080L +//RLC_RLCS_FED_STATUS_1 +#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR__SHIFT 0x0 +#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR__SHIFT 0x1 +#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR__SHIFT 0x2 +#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR__SHIFT 0x3 +#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR__SHIFT 0x4 +#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR__SHIFT 0x5 +#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR__SHIFT 0x6 +#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR__SHIFT 0x7 +#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR__SHIFT 0x8 +#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR__SHIFT 0x9 +#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR__SHIFT 0xa +#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR__SHIFT 0xb +#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR__SHIFT 0xc +#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR__SHIFT 0xd +#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR__SHIFT 0xe +#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR__SHIFT 0xf +#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR_MASK 0x00000001L +#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR_MASK 0x00000002L +#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR_MASK 0x00000004L +#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR_MASK 0x00000008L +#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR_MASK 0x00000010L +#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR_MASK 0x00000020L +#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR_MASK 0x00000040L +#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR_MASK 0x00000080L +#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR_MASK 0x00000100L +#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR_MASK 0x00000200L +#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR_MASK 0x00000400L +#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR_MASK 0x00000800L +#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR_MASK 0x00001000L +#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR_MASK 0x00002000L +#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR_MASK 0x00004000L +#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR_MASK 0x00008000L //RLC_CGTT_MGCG_OVERRIDE #define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0 #define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h new file mode 100644 index 00000000000000..c6c0cf1376a6e8 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h @@ -0,0 +1,87 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_6_1_0_SH_MASK_HEADER +#define _xgmi_6_1_0_SH_MASK_HEADER + +//PCS_XGMI3X16_PCS_ERROR_STATUS +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr__SHIFT 0x2 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr__SHIFT 0x3 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr__SHIFT 0x4 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr__SHIFT 0x7 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr__SHIFT 0xe +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr__SHIFT 0x16 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr__SHIFT 0x17 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr__SHIFT 0x18 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr__SHIFT 0x19 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr__SHIFT 0x1a +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr__SHIFT 0x1b +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr__SHIFT 0x1c +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr_MASK 0x00000004L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr_MASK 0x00000008L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr_MASK 0x00000010L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr_MASK 0x00000080L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr_MASK 0x00004000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr_MASK 0x00400000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr_MASK 0x00800000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr_MASK 0x01000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr_MASK 0x02000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr_MASK 0x04000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr_MASK 0x08000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr_MASK 0x10000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h index 9e8ed9f4bb1525..3a4670bc4449d8 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h @@ -49,6 +49,8 @@ #define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout) #define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout +#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning) + #define GFX_11_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int #define GFX_11_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error #define GFX_11_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index d18162e9ed1da1..75f18791cdb9c8 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -139,6 +139,8 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, AMDGPU_PP_SENSOR_VCN_POWER_STATE, + AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, + AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, }; enum amd_pp_task { @@ -395,6 +397,7 @@ struct amd_pm_funcs { int (*get_ppfeature_status)(void *handle, char *buf); int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks); int (*asic_reset_mode_2)(void *handle); + int (*asic_reset_enable_gfx_features)(void *handle); int (*set_df_cstate)(void *handle, enum pp_df_cstate state); int (*set_xgmi_pstate)(void *handle, uint32_t pstate); ssize_t (*get_gpu_metrics)(void *handle, void **table); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 1b300c569faf50..6e79d3352d0bb1 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -227,6 +227,24 @@ int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) return ret; } +int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + int ret = 0; + + if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) + return -ENOENT; + + mutex_lock(&adev->pm.mutex); + + ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); + + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 2f3e239e623dcf..bf6d63673b5aa9 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3063,7 +3063,7 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * hwmon interfaces for GPU power: * - * - power1_average: average power used by the GPU in microWatts + * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU. * * - power1_cap_min: minimum cap supported in microWatts * diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index cb5b9df78b4db3..16addceca68ff0 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -386,6 +386,7 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev); +int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev); bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 49c398ec0aaf60..d6d9e3b1b2c0e4 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7714,20 +7714,13 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: + err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); if (err) { DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", err, fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; + amdgpu_ucode_release(&adev->pm.fw); } return err; - } static int si_dpm_sw_init(void *handle) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 304190d5c9d26d..11b7b4cffaae09 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -111,8 +111,7 @@ static int pp_sw_fini(void *handle) hwmgr_sw_fini(hwmgr); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; + amdgpu_ucode_release(&adev->pm.fw); return 0; } @@ -769,10 +768,16 @@ static int pp_dpm_read_sensor(void *handle, int idx, switch (idx) { case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: - *((uint32_t *)value) = hwmgr->pstate_sclk; + *((uint32_t *)value) = hwmgr->pstate_sclk * 100; return 0; case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: - *((uint32_t *)value) = hwmgr->pstate_mclk; + *((uint32_t *)value) = hwmgr->pstate_mclk * 100; + return 0; + case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: + *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100; + return 0; + case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: + *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100; return 0; case AMDGPU_PP_SENSOR_MIN_FAN_RPM: *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index ede71de2343dcf..86d6e88c738628 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) return 0; } +static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) +{ + hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK; + hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK; + + smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetMaxGfxclkFrequency, + &hwmgr->pstate_sclk_peak); + hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK; +} + static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; @@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return ret; } + smu10_populate_umdpstate_clocks(hwmgr); + return 0; } @@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100; - hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100; - /* enable the pp_od_clk_voltage sysfs file */ hwmgr->od_enabled = 1; /* disabled fine grain tuning function by default */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 7ef7e81525a306..e10cc5e7928e68 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -22,7 +22,6 @@ */ #include "pp_debug.h" #include -#include #include #include #include @@ -1501,6 +1500,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr) return ret; } +static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; + int32_t tmp_sclk, count, percentage; + + if (golden_dpm_table->mclk_table.count == 1) { + percentage = 70; + hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value; + } else { + percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; + hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; + } + + tmp_sclk = hwmgr->pstate_mclk * percentage / 100; + + if (hwmgr->pp_table_version == PP_TABLE_V0) { + struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) { + if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) { + hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk; + break; + } + } + if (count < 0) + hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk; + + hwmgr->pstate_sclk_peak = + vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk; + } else if (hwmgr->pp_table_version == PP_TABLE_V1) { + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk = + table_info->vdd_dep_on_sclk; + + for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) { + if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) { + hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk; + break; + } + } + if (count < 0) + hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk; + + hwmgr->pstate_sclk_peak = + vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk; + } + + hwmgr->pstate_mclk_peak = + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; + + /* make sure the output is in Mhz */ + hwmgr->pstate_sclk /= 100; + hwmgr->pstate_mclk /= 100; + hwmgr->pstate_sclk_peak /= 100; + hwmgr->pstate_mclk_peak /= 100; +} + static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result = 0; @@ -1625,6 +1685,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "pcie performance request failed!", result = tmp_result); + smu7_populate_umdpstate_clocks(hwmgr); + return 0; } @@ -3143,15 +3205,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) { if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { - tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; *sclk_mask = count; break; } } - if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) *sclk_mask = 0; - tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; - } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; @@ -3161,15 +3220,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { - tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; *sclk_mask = count; break; } } - if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) *sclk_mask = 0; - tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; - } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; @@ -3181,8 +3237,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le *mclk_mask = golden_dpm_table->mclk_table.count - 1; *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; - hwmgr->pstate_sclk = tmp_sclk; - hwmgr->pstate_mclk = tmp_mclk; return 0; } @@ -3195,9 +3249,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t mclk_mask = 0; uint32_t pcie_mask = 0; - if (hwmgr->pstate_sclk == 0) - smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); - switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = smu7_force_dpm_highest(hwmgr); @@ -4153,7 +4204,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to freeze SCLK DPM when DPM is disabled", ); @@ -4210,7 +4261,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels( } if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { result = smum_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", @@ -4218,7 +4269,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels( } if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { /*populate MCLK dpm table to SMU7 */ result = smum_populate_all_memory_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), @@ -4309,7 +4360,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to Unfreeze SCLK DPM when DPM is disabled", diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b50fd4a4a3d1ae..b015a601b385ae 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr) data->acp_boot_level = 0xff; } +static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) +{ + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + hwmgr->pstate_sclk = table->entries[0].clk / 100; + hwmgr->pstate_mclk = 0; + + hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100; + hwmgr->pstate_mclk_peak = 0; +} + static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { smu8_program_voting_clients(hwmgr); @@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) smu8_program_bootup_state(hwmgr); smu8_reset_acp_boot_level(hwmgr); + smu8_populate_umdpstate_clocks(hwmgr); + return 0; } @@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) data->sclk_dpm.soft_min_clk = table->entries[0].clk; data->sclk_dpm.hard_min_clk = table->entries[0].clk; - hwmgr->pstate_sclk = table->entries[0].clk; - hwmgr->pstate_mclk = 0; level = smu8_get_max_sclk_level(hwmgr) - 1; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c8c9fb827bda11..99cd2e63afdd43 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -22,7 +22,6 @@ */ #include -#include #include #include #include @@ -3008,6 +3007,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool return 0; } +static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL && + table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) { + hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk; + hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk; + } else { + hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk; + } + + hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk; + hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk; + + /* make sure the output is in Mhz */ + hwmgr->pstate_sclk /= 100; + hwmgr->pstate_mclk /= 100; + hwmgr->pstate_sclk_peak /= 100; + hwmgr->pstate_mclk_peak /= 100; +} + static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; @@ -3082,6 +3105,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) result = tmp_result); } + vega10_populate_umdpstate_clocks(hwmgr); + return result; } @@ -4169,8 +4194,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL; *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL; *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL; - hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk; - hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk; } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { @@ -4281,9 +4304,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t mclk_mask = 0; uint32_t soc_mask = 0; - if (hwmgr->pstate_sclk == 0) - vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); - switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = vega10_force_dpm_highest(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c index 95b988823f50f1..bb90d8abf79b1e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c @@ -23,7 +23,6 @@ #include #include #include -#include #include "vega10_processpptables.h" #include "ppatomfwctrl.h" diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index a2f4d6773d458c..e9db137cd1c6cb 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -22,7 +22,6 @@ */ #include -#include #include #include @@ -1026,6 +1025,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr) return 0; } +static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); + struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); + + if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && + mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) { + hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value; + hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value; + } else { + hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value; + hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value; + } + + hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value; + hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value; +} + static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -1077,6 +1095,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to setup default DPM tables!", return result); + + vega12_populate_umdpstate_clocks(hwmgr); + return result; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c index bd54fbd393b977..89148f73b514a4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c @@ -22,7 +22,6 @@ */ #include #include -#include #include "vega12/smu9_driver_if.h" #include "vega12_processpptables.h" diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index b30684c84e20e4..0d4d4811527c64 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -22,7 +22,6 @@ */ #include -#include #include #include @@ -1555,26 +1554,23 @@ static int vega20_set_mclk_od( return 0; } -static int vega20_populate_umdpstate_clocks( - struct pp_hwmgr *hwmgr) +static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); - hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; - hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; - if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; + } else { + hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; + hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; } - hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100; - hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100; - - return 0; + hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value; + hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value; } static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, @@ -1753,10 +1749,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to initialize odn settings!", return result); - result = vega20_populate_umdpstate_clocks(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "[EnableDPMTasks] Failed to populate umdpstate clocks!", - return result); + vega20_populate_umdpstate_clocks(hwmgr); result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c index 1f90825394575d..79c817752a3315 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c @@ -22,7 +22,6 @@ */ #include #include -#include #include "smu11_driver_if.h" #include "vega20_processpptables.h" diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 27f8d0e0e6a8c0..5ce433e2c16a56 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -809,6 +809,8 @@ struct pp_hwmgr { uint32_t workload_prority[Workload_Policy_Max]; uint32_t workload_setting[Workload_Policy_Max]; bool gfxoff_state_changed_by_workload; + uint32_t pstate_sclk_peak; + uint32_t pstate_mclk_peak; }; int hwmgr_early_init(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h index fdc6b7a57bc912..c2efc70ef28881 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h @@ -358,6 +358,7 @@ typedef struct { QuadraticInt_t SsCurve; } DpmDescriptor_t; +#pragma pack(push, 1) typedef struct { uint32_t Version; @@ -609,6 +610,7 @@ typedef struct { uint32_t MmHubPadding[8]; } PPTable_t; +#pragma pack(pop) typedef struct { diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h index 2818c98ff5ca90..faae4b918d90a4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h @@ -122,6 +122,7 @@ typedef struct { uint16_t Vid; /* min voltage in SVI2 VID */ } DisplayClockTable_t; +#pragma pack(push, 1) typedef struct { /* PowerTune */ uint16_t SocketPowerLimit; /* Watts */ @@ -323,6 +324,7 @@ typedef struct { uint32_t MmHubPadding[3]; /* SMU internal use */ } PPTable_t; +#pragma pack(pop) typedef struct { uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h index b6ffd08784e7ff..6456bea5d2d52f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h @@ -245,6 +245,7 @@ typedef struct { QuadraticInt_t SsCurve; } DpmDescriptor_t; +#pragma pack(push, 1) typedef struct { uint32_t Version; @@ -508,6 +509,7 @@ typedef struct { uint32_t MmHubPadding[7]; } PPTable_t; +#pragma pack(pop) typedef struct { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index 5ca3c422f7d4f0..4bc8db1be738a6 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -22,7 +22,6 @@ */ #include #include -#include #include "linux/delay.h" #include #include @@ -2203,7 +2202,7 @@ static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK)) return ci_program_memory_timing_parameters(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 03df35dee8ba8d..060fc140c5744c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2165,7 +2165,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK)) return iceland_program_memory_timing_parameters(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c index 88a5641465dcf5..7eeab84d421ac3 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c @@ -250,9 +250,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr) /* allocate space for watermarks table */ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, - sizeof(Watermarks_t), - PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + sizeof(Watermarks_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &priv->smu_tables.entry[SMU10_WMTABLE].handle, &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, &priv->smu_tables.entry[SMU10_WMTABLE].table); @@ -266,9 +265,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr) /* allocate space for watermarks table */ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, - sizeof(DpmClocks_t), - PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + sizeof(DpmClocks_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c index 04b561f5d932bc..acbe41174d7e61 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c @@ -2554,7 +2554,7 @@ static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK)) return tonga_program_memory_timing_parameters(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 6ab15502359265..834d146c4991fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -623,6 +623,7 @@ static int smu_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu; + int r; smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); if (!smu) @@ -640,7 +641,10 @@ static int smu_early_init(void *handle) adev->powerplay.pp_handle = smu; adev->powerplay.pp_funcs = &swsmu_pm_funcs; - return smu_set_funcs(adev); + r = smu_set_funcs(adev); + if (r) + return r; + return smu_init_microcode(smu); } static int smu_set_default_dpm_table(struct smu_context *smu) @@ -900,9 +904,8 @@ static int smu_alloc_dummy_read_table(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; - dummy_read_1_table->size = 0x40000; - dummy_read_1_table->align = PAGE_SIZE; - dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; + if (!dummy_read_1_table->size) + return 0; ret = amdgpu_bo_create_kernel(adev, dummy_read_1_table->size, @@ -1067,12 +1070,6 @@ static int smu_sw_init(void *handle) smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; - ret = smu_init_microcode(smu); - if (ret) { - dev_err(adev->dev, "Failed to load smu firmware!\n"); - return ret; - } - ret = smu_smc_table_sw_init(smu); if (ret) { dev_err(adev->dev, "Failed to sw init smc table!\n"); @@ -2487,6 +2484,14 @@ static int smu_read_sensor(void *handle, *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; *size = 4; break; + case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: + *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: + *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; + *size = 4; + break; case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); *size = 8; @@ -2853,6 +2858,23 @@ static int smu_mode2_reset(void *handle) return ret; } +static int smu_enable_gfx_features(void *handle) +{ + struct smu_context *smu = handle; + int ret = 0; + + if (!smu->pm_enabled) + return -EOPNOTSUPP; + + if (smu->ppt_funcs->enable_gfx_features) + ret = smu->ppt_funcs->enable_gfx_features(smu); + + if (ret) + dev_err(smu->adev->dev, "enable gfx features failed!\n"); + + return ret; +} + static int smu_get_max_sustainable_clocks_by_dc(void *handle, struct pp_smu_nv_clock_table *max_clocks) { @@ -3037,6 +3059,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_ppfeature_status = smu_sys_get_pp_feature_mask, .set_ppfeature_status = smu_sys_set_pp_feature_mask, .asic_reset_mode_2 = smu_mode2_reset, + .asic_reset_enable_gfx_features = smu_enable_gfx_features, .set_df_cstate = smu_set_df_cstate, .set_xgmi_pstate = smu_set_xgmi_pstate, .get_gpu_metrics = smu_sys_get_gpu_metrics, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3bc4128a22ac2d..2a03d85bf4e2d1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1201,6 +1201,8 @@ struct pptable_funcs { * IPs reset varies by asic. */ int (*mode2_reset)(struct smu_context *smu); + /* for gfx feature enablement after mode2 reset */ + int (*enable_gfx_features)(struct smu_context *smu); /** * @get_dpm_ultimate_freq: Get the hard frequency range of a clock diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h index 43d43d6addc005..d518dee18e1be7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h @@ -464,6 +464,7 @@ typedef struct { uint16_t Padding16; } DpmDescriptor_t; +#pragma pack(push, 1) typedef struct { uint32_t Version; @@ -733,6 +734,7 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } PPTable_t; +#pragma pack(pop) typedef struct { // Time constant parameters for clock averages in ms diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h index 04752ade10165f..c5c1943fb6a1c7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h @@ -515,6 +515,7 @@ typedef struct { uint32_t BoardLevelEnergyAccumulator; } OutOfBandMonitor_t; +#pragma pack(push, 1) typedef struct { uint32_t Version; @@ -814,6 +815,7 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } PPTable_t; +#pragma pack(pop) typedef struct { // Time constant parameters for clock averages in ms diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h index 351a4af429b32b..aa6d29de400250 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h @@ -599,6 +599,7 @@ typedef struct { uint16_t Fmax; } UclkDpmChangeRange_t; +#pragma pack(push, 1) typedef struct { // MAJOR SECTION: SKU PARAMETERS @@ -957,6 +958,7 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } PPTable_t; +#pragma pack(pop) typedef struct { // MAJOR SECTION: SKU PARAMETERS diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h index 7a6075daa7b2f6..90200f31ff5269 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h @@ -267,6 +267,7 @@ typedef struct { QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) } DpmDescriptor_t; +#pragma pack(push, 1) typedef struct { uint32_t Version; @@ -448,6 +449,7 @@ typedef struct { uint32_t reserved[14]; } PPTable_t; +#pragma pack(pop) typedef struct { // Time constant parameters for clock averages in ms diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index 4bc7aee4d44f82..b686fb68a6e765 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -1347,10 +1347,12 @@ typedef struct { uint32_t MmHubPadding[8]; } BoardTable_t; +#pragma pack(push, 1) typedef struct { SkuTable_t SkuTable; BoardTable_t BoardTable; } PPTable_t; +#pragma pack(pop) typedef struct { // Time constant parameters for clock averages in ms diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h index 48a3a3952ceb35..4c46a039245122 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h @@ -1380,10 +1380,12 @@ typedef struct { uint32_t MmHubPadding[8]; } BoardTable_t; +#pragma pack(push, 1) typedef struct { SkuTable_t SkuTable; BoardTable_t BoardTable; } PPTable_t; +#pragma pack(pop) typedef struct { // Time constant parameters for clock averages in ms diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h index 8b8266890a1002..10cff75b44d5cf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h @@ -94,6 +94,7 @@ //Resets #define PPSMC_MSG_PrepareMp1ForUnload 0x2E #define PPSMC_MSG_Mode1Reset 0x2F +#define PPSMC_MSG_Mode2Reset 0x4F //Set SystemVirtual DramAddrHigh #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 4180c71d930f1b..96f6c2db955b54 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -242,7 +242,8 @@ __SMU_DUMMY_MAP(LogGfxOffResidency), \ __SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \ __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \ - __SMU_DUMMY_MAP(AllowGpo), + __SMU_DUMMY_MAP(AllowGpo), \ + __SMU_DUMMY_MAP(Mode2Reset), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 992163e66f7b40..1c0ae2cb757b8d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -244,11 +244,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu, enum smu_clk_type clk_type, struct smu_13_0_dpm_table *single_dpm_table); -int smu_v13_0_get_dpm_level_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t *min_value, - uint32_t *max_value); - int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu); int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0bcd4fe0ef1776..95da6dd1cc656d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -494,6 +494,8 @@ static int navi10_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + struct smu_table *dummy_read_1_table = + &smu_table->dummy_read_1_table; SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -513,6 +515,10 @@ static int navi10_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + dummy_read_1_table->size = 0x40000; + dummy_read_1_table->align = PAGE_SIZE; + dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t), GFP_KERNEL); if (!smu_table->metrics_table) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index ad66d57aa102e5..6492d69e2e60fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu) int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - const char *chip_name; + char ucode_prefix[30]; char fw_name[SMU_FW_NAME_LEN]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -105,43 +105,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu) (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) return 0; - switch (adev->ip_versions[MP1_HWIP][0]) { - case IP_VERSION(11, 0, 0): - chip_name = "navi10"; - break; - case IP_VERSION(11, 0, 5): - chip_name = "navi14"; - break; - case IP_VERSION(11, 0, 9): - chip_name = "navi12"; - break; - case IP_VERSION(11, 0, 7): - chip_name = "sienna_cichlid"; - break; - case IP_VERSION(11, 0, 11): - chip_name = "navy_flounder"; - break; - case IP_VERSION(11, 0, 12): - chip_name = "dimgrey_cavefish"; - break; - case IP_VERSION(11, 0, 13): - chip_name = "beige_goby"; - break; - case IP_VERSION(11, 0, 2): - chip_name = "arcturus"; - break; - default: - dev_err(adev->dev, "Unsupported IP version 0x%x\n", - adev->ip_versions[MP1_HWIP][0]); - return -EINVAL; - } + amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); + err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); if (err) goto out; @@ -159,12 +127,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu) } out: - if (err) { - DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n", - fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } + if (err) + amdgpu_ucode_release(&adev->pm.fw); return err; } @@ -172,8 +136,7 @@ void smu_v11_0_fini_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; + amdgpu_ucode_release(&adev->pm.fw); adev->pm.fw_version = 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index b4373b6568ae6c..78945e79dbee15 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -88,7 +88,6 @@ static const int link_speed[] = {25, 50, 80, 160}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - const char *chip_name; char fw_name[30]; char ucode_prefix[30]; int err = 0; @@ -100,21 +99,11 @@ int smu_v13_0_init_microcode(struct smu_context *smu) if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[MP1_HWIP][0]) { - case IP_VERSION(13, 0, 2): - chip_name = "aldebaran_smc"; - break; - default: - amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - chip_name = ucode_prefix; - } + amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); + err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); if (err) goto out; @@ -132,12 +121,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu) } out: - if (err) { - DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n", - fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } + if (err) + amdgpu_ucode_release(&adev->pm.fw); return err; } @@ -145,8 +130,7 @@ void smu_v13_0_fini_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; + amdgpu_ucode_release(&adev->pm.fw); adev->pm.fw_version = 0; } @@ -2064,45 +2048,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu, return 0; } -int smu_v13_0_get_dpm_level_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t *min_value, - uint32_t *max_value) -{ - uint32_t level_count = 0; - int ret = 0; - - if (!min_value && !max_value) - return -EINVAL; - - if (min_value) { - /* by default, level 0 clock value as min value */ - ret = smu_v13_0_get_dpm_freq_by_index(smu, - clk_type, - 0, - min_value); - if (ret) - return ret; - } - - if (max_value) { - ret = smu_v13_0_get_dpm_level_count(smu, - clk_type, - &level_count); - if (ret) - return ret; - - ret = smu_v13_0_get_dpm_freq_by_index(smu, - clk_type, - level_count - 1, - max_value); - if (ret) - return ret; - } - - return ret; -} - int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 508e392547d7ad..7c906ab3ddd2f6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -138,6 +138,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), + MSG_MAP(Mode2Reset, PPSMC_MSG_Mode2Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), @@ -242,6 +243,7 @@ static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COU WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), }; static const uint8_t smu_v13_0_0_throttler_map[] = { @@ -1563,7 +1565,7 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7], title[8], title[9]); - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, @@ -1625,7 +1627,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, smu->power_profile_mode = input[size]; - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; } @@ -1910,15 +1912,51 @@ static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, NULL); } +static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, + uint32_t supported_version, + uint32_t *param) +{ + uint32_t smu_version; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + + if ((smu_version >= supported_version) && + ras && atomic_read(&ras->in_recovery)) + /* Set RAS fatal error reset flag */ + *param = 1 << 16; + else + *param = 0; +} + static int smu_v13_0_0_mode1_reset(struct smu_context *smu) { int ret; + uint32_t param; struct amdgpu_device *adev = smu->adev; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) - ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); - else + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 0): + /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ + smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, ¶m); + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_Mode1Reset, param, NULL); + break; + + case IP_VERSION(13, 0, 10): + /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */ + smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, ¶m); + + ret = smu_cmn_send_debug_smc_msg_with_param(smu, + DEBUGSMC_MSG_Mode1Reset, param); + break; + + default: ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); + break; + } if (!ret) msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); @@ -1926,6 +1964,30 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu) return ret; } +static int smu_v13_0_0_mode2_reset(struct smu_context *smu) +{ + int ret; + struct amdgpu_device *adev = smu->adev; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL); + else + return -EOPNOTSUPP; + + return ret; +} + +static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, + FEATURE_PWR_GFX, NULL); + else + return -EOPNOTSUPP; +} + static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2041,6 +2103,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .baco_exit = smu_v13_0_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_0_mode1_reset, + .mode2_reset = smu_v13_0_0_mode2_reset, + .enable_gfx_features = smu_v13_0_0_enable_gfx_features, .set_mp1_state = smu_v13_0_0_set_mp1_state, .set_df_cstate = smu_v13_0_0_set_df_cstate, .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 768b6e7dbd7719..d5abafc5a68201 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -404,6 +404,12 @@ int smu_cmn_send_debug_smc_msg(struct smu_context *smu, return __smu_cmn_send_debug_msg(smu, msg, 0); } +int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, + uint32_t msg, uint32_t param) +{ + return __smu_cmn_send_debug_msg(smu, msg, param); +} + int smu_cmn_to_asic_specific_index(struct smu_context *smu, enum smu_cmn2asic_mapping_type type, uint32_t index) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index f82cf76dd3a474..d7cd358a53bdcd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -45,6 +45,9 @@ int smu_cmn_send_smc_msg(struct smu_context *smu, int smu_cmn_send_debug_smc_msg(struct smu_context *smu, uint32_t msg); +int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, + uint32_t msg, uint32_t param); + int smu_cmn_wait_for_response(struct smu_context *smu); int smu_cmn_to_asic_specific_index(struct smu_context *smu, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 4cc07d6bb9d82e..cea3fd5772b574 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -10,7 +10,6 @@ #include #include -#include #include #include diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 3f4e719eebd857..28f76e07dd9584 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -6,6 +6,7 @@ */ #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h index 7339339ef6b87c..3a872c29209123 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 7043d1c9ed8f23..e3507dd6f82a7b 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -195,8 +195,8 @@ static int hdlcd_setup_mode_config(struct drm_device *drm) #ifdef CONFIG_DEBUG_FS static int hdlcd_show_underrun_count(struct seq_file *m, void *arg) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *drm = node->minor->dev; + struct drm_debugfs_entry *entry = m->private; + struct drm_device *drm = entry->dev; struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count)); @@ -208,8 +208,8 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg) static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *drm = node->minor->dev; + struct drm_debugfs_entry *entry = m->private; + struct drm_device *drm = entry->dev; struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); unsigned long clkrate = clk_get_rate(hdlcd->clk); unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000; @@ -219,17 +219,10 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) return 0; } -static struct drm_info_list hdlcd_debugfs_list[] = { +static struct drm_debugfs_info hdlcd_debugfs_list[] = { { "interrupt_count", hdlcd_show_underrun_count, 0 }, { "clocks", hdlcd_show_pxlclock, 0 }, }; - -static void hdlcd_debugfs_init(struct drm_minor *minor) -{ - drm_debugfs_create_files(hdlcd_debugfs_list, - ARRAY_SIZE(hdlcd_debugfs_list), - minor->debugfs_root, minor); -} #endif DEFINE_DRM_GEM_DMA_FOPS(fops); @@ -237,9 +230,6 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver hdlcd_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, -#ifdef CONFIG_DEBUG_FS - .debugfs_init = hdlcd_debugfs_init, -#endif .fops = &fops, .name = "hdlcd", .desc = "ARM HDLCD Controller DRM", @@ -303,6 +293,10 @@ static int hdlcd_drm_bind(struct device *dev) drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); +#ifdef CONFIG_DEBUG_FS + drm_debugfs_add_files(drm, hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list)); +#endif + ret = drm_dev_register(drm, 0); if (ret) goto err_register; diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 584cee123bd8ef..0e44f53e9fa409 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -129,7 +129,7 @@ int armada_fbdev_init(struct drm_device *dev) priv->fbdev = fbh; - drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs); + drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs); ret = drm_fb_helper_init(dev, fbh); if (ret) { @@ -137,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev) goto err_fb_helper; } - ret = drm_fb_helper_initial_config(fbh, 32); + ret = drm_fb_helper_initial_config(fbh); if (ret) { DRM_ERROR("failed to set initial config\n"); goto err_fb_setup; @@ -147,6 +147,7 @@ int armada_fbdev_init(struct drm_device *dev) err_fb_setup: drm_fb_helper_fini(fbh); err_fb_helper: + drm_fb_helper_unprepare(fbh); priv->fbdev = NULL; return ret; } @@ -164,6 +165,8 @@ void armada_fbdev_fini(struct drm_device *dev) if (fbh->fb) fbh->fb->funcs->destroy(fbh->fb); + drm_fb_helper_unprepare(fbh); + priv->fbdev = NULL; } } diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c index 55a3444a51d8a8..7877a57b8e2657 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 718119e168a698..ecfb060d2557bf 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -14,7 +14,6 @@ #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c index 4f2187025a215a..78775e0c853fd0 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c @@ -3,7 +3,6 @@ #include #include -#include #include #include diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index d367a90cd3de0d..563fa7a3b546c8 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig @@ -4,6 +4,8 @@ config DRM_AST depends on DRM && PCI && MMU select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER + select I2C + select I2C_ALGOBIT help Say yes for experimental AST GPU driver. Do not enable this driver without having a working -modesetting, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 420fc75c240e4d..d78852c7cf5b14 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -31,7 +31,6 @@ #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index bffa310a04319e..f83ce77127cb48 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -29,7 +29,6 @@ #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 66a4a41c3fe944..984ec590a7e7d6 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -636,7 +635,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src struct drm_framebuffer *fb, const struct drm_rect *clip) { - struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr); + struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr); iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip)); drm_fb_memcpy(&dst, fb->pitches, src, fb, clip); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index a2bb5b91623525..4e806b06d35d13 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -784,7 +784,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); @@ -815,10 +814,10 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev) return drm_atomic_helper_resume(drm_dev, dc->suspend.state); } -#endif -static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops, - atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops, + atmel_hlcdc_dc_drm_suspend, + atmel_hlcdc_dc_drm_resume); static const struct of_device_id atmel_hlcdc_dc_of_match[] = { { .compatible = "atmel,hlcdc-display-controller" }, @@ -830,7 +829,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = { .remove = atmel_hlcdc_dc_drm_remove, .driver = { .name = "atmel-hlcdc-display-controller", - .pm = &atmel_hlcdc_dc_drm_pm_ops, + .pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops), .of_match_table = atmel_hlcdc_dc_of_match, }, }; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 57946d80b02dbb..8b2226f72b2459 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -15,17 +15,6 @@ config DRM_PANEL_BRIDGE menu "Display Interface Bridges" depends on DRM && DRM_BRIDGE -config DRM_CDNS_DSI - tristate "Cadence DPI/DSI bridge" - select DRM_KMS_HELPER - select DRM_MIPI_DSI - select DRM_PANEL_BRIDGE - select GENERIC_PHY_MIPI_DPHY - depends on OF - help - Support Cadence DPI to DSI bridge. This is an internal - bridge and is meant to be directly embedded in a SoC. - config DRM_CHIPONE_ICN6211 tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 1884803c6860dc..52f6e8b4a82178 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index e7a6e456ed0d07..ddceafa7b63741 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1185,8 +1185,9 @@ static int adv7511_parse_dt(struct device_node *np, return 0; } -static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) +static int adv7511_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); struct adv7511_link_config link_config; struct adv7511 *adv7511; struct device *dev = &i2c->dev; @@ -1392,7 +1393,7 @@ static struct i2c_driver adv7511_driver = { .of_match_table = adv7511_of_ids, }, .id_table = adv7511_i2c_ids, - .probe = adv7511_probe, + .probe_new = adv7511_probe, .remove = adv7511_remove, }; diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 660a548579299c..3577c532abb4e3 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -692,8 +691,7 @@ static bool anx6345_get_chip_id(struct anx6345 *anx6345) return false; } -static int anx6345_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int anx6345_i2c_probe(struct i2c_client *client) { struct anx6345 *anx6345; struct device *dev; @@ -817,7 +815,7 @@ static struct i2c_driver anx6345_driver = { .name = "anx6345", .of_match_table = of_match_ptr(anx6345_match_table), }, - .probe = anx6345_i2c_probe, + .probe_new = anx6345_i2c_probe, .remove = anx6345_i2c_remove, .id_table = anx6345_id, }; diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c index 5997049fde5be3..a3a38bbe27860c 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c @@ -1214,8 +1214,7 @@ static const u16 anx78xx_chipid_list[] = { 0x7818, }; -static int anx78xx_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int anx78xx_i2c_probe(struct i2c_client *client) { struct anx78xx *anx78xx; struct anx78xx_platform_data *pdata; @@ -1390,7 +1389,7 @@ static struct i2c_driver anx78xx_driver = { .name = "anx7814", .of_match_table = of_match_ptr(anx78xx_match_table), }, - .probe = anx78xx_i2c_probe, + .probe_new = anx78xx_i2c_probe, .remove = anx78xx_i2c_remove, .id_table = anx78xx_id, }; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index b0ff1ecb80a50b..6846199a2ee14a 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -1403,7 +1402,6 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx) { ctx->hpd_status = 0; ctx->hpd_high_cnt = 0; - ctx->display_timing_valid = 0; } static void anx7625_start_dp_work(struct anx7625_data *ctx) @@ -2562,8 +2560,7 @@ static void anx7625_runtime_disable(void *data) pm_runtime_disable(data); } -static int anx7625_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int anx7625_i2c_probe(struct i2c_client *client) { struct anx7625_data *platform; struct anx7625_platform_data *pdata; @@ -2756,7 +2753,7 @@ static struct i2c_driver anx7625_driver = { .of_match_table = anx_match_table, .pm = &anx7625_pm_ops, }, - .probe = anx7625_i2c_probe, + .probe_new = anx7625_i2c_probe, .remove = anx7625_i2c_remove, .id_table = anx7625_id, diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig index 1d06182bea7109..ec35215a20034c 100644 --- a/drivers/gpu/drm/bridge/cadence/Kconfig +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -1,4 +1,25 @@ # SPDX-License-Identifier: GPL-2.0-only +config DRM_CDNS_DSI + tristate "Cadence DPI/DSI bridge" + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select DRM_PANEL_BRIDGE + select GENERIC_PHY_MIPI_DPHY + depends on OF + help + Support Cadence DPI to DSI bridge. This is an internal + bridge and is meant to be directly embedded in a SoC. + +if DRM_CDNS_DSI + +config DRM_CDNS_DSI_J721E + bool "J721E Cadence DSI wrapper support" + default y + help + Support J721E Cadence DSI wrapper. The wrapper manages + the routing of the DSS DPI signal to the Cadence DSI. +endif + config DRM_CDNS_MHDP8546 tristate "Cadence DPI/DP bridge" select DRM_DISPLAY_DP_HELPER diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile index 4d2db8df1bc6e9..c95fd5b81d137a 100644 --- a/drivers/gpu/drm/bridge/cadence/Makefile +++ b/drivers/gpu/drm/bridge/cadence/Makefile @@ -1,4 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o +cdns-dsi-y := cdns-dsi-core.o +cdns-dsi-$(CONFIG_DRM_CDNS_DSI_J721E) += cdns-dsi-j721e.o obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o cdns-mhdp8546-y := cdns-mhdp8546-core.o cdns-mhdp8546-hdcp.o cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c similarity index 97% rename from drivers/gpu/drm/bridge/cdns-dsi.c rename to drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index 20bece84ff8cc7..5dbfc7226b31ee 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -6,10 +6,7 @@ */ #include -#include #include -#include -#include #include #include