Merge tag 'drm-intel-next-2026-03-30' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next

drm/i915 feature pull #2 for v7.1:

Refactoring and cleanups:
- Refactor LT PHY PLL handling to use the DPLL framework (Mika)
- Implement display register polling and waits in display code (Ville)
- Move PCH clock gating in display PCH file (Luca)
- Add shared stepping info header for i915 and display (Jani)
- Clean up GVT I2C command decoding (Jonathan)
- NV12 plane unlinking cleanups (Ville)
- Clean up NV12 DDB/watermark handling for pre-ICL platforms (Ville)

Fixes:
- An assortment of DSI fixes (Ville)
- Handle PORT_NONE in assert_port_valid() (Jonathan)
- Fix link failure without FBDEV emulation (Arnd Bergmann)
- Quirk disable panel replay on certain Dell XPS models (Jouni)
- Check if VESA DPCD AUX backlight is possible (Suraj)

Other:
- Mailmap update for Christoph (Christoph)

Signed-off-by: Dave Airlie <airlied@redhat.com>

# Conflicts:
#	drivers/gpu/drm/i915/display/intel_plane.c
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patch.msgid.link/ac9dfdb745d5a67c519ea150a6f36f8f74b8760e@intel.com
This commit is contained in:
Dave Airlie 2026-03-31 16:38:49 +10:00
commit 28899037b8
42 changed files with 1007 additions and 671 deletions

View File

@ -196,6 +196,7 @@ Christophe Leroy <chleroy@kernel.org> <christophe.leroy2@cs-soprasteria.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christopher Obbard <christopher.obbard@linaro.org> <chris.obbard@collabora.com>
Christoph Hellwig <hch@lst.de>
Christoph Manszewski <c.manszewski@gmail.com> <christoph.manszewski@intel.com>
Chuck Lever <chuck.lever@oracle.com> <cel@kernel.org>
Chuck Lever <chuck.lever@oracle.com> <cel@netapp.com>
Chuck Lever <chuck.lever@oracle.com> <cel@citi.umich.edu>

View File

@ -254,6 +254,7 @@ i915-y += \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_dbuf_bw.o \
display/intel_de.o \
display/intel_display.o \
display/intel_display_conversion.o \
display/intel_display_driver.o \

View File

@ -711,7 +711,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
tmp = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
if (intel_dsi->eotp_pkt)
if (intel_dsi->eot_pkt)
tmp &= ~EOTP_DISABLED;
else
tmp |= EOTP_DISABLED;
@ -729,6 +729,12 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
else
tmp |= CLK_HS_CONTINUOUS;
if (DISPLAY_VER(display) >= 12 &&
intel_dsi->lp_clock_during_lpm)
tmp |= LP_CLK_DURING_LPM;
else
tmp &= ~LP_CLK_DURING_LPM;
/* configure buffer threshold limit to minimum */
tmp &= ~PIX_BUF_THRESHOLD_MASK;
tmp |= PIX_BUF_THRESHOLD_1_4;
@ -765,10 +771,11 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
}
}
if (DISPLAY_VER(display) >= 12) {
if (is_vid_mode(intel_dsi))
tmp |= BLANKING_PACKET_ENABLE;
}
if (DISPLAY_VER(display) >= 12 &&
is_vid_mode(intel_dsi) && intel_dsi->blanking_pkt)
tmp |= BLANKING_PACKET_ENABLE;
else
tmp &= ~BLANKING_PACKET_ENABLE;
/* program DSI operation mode */
if (is_vid_mode(intel_dsi)) {
@ -888,7 +895,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* non-compressed link speeds, and simplifies down to the ratio between
* compressed and non-compressed bpp.
*/
if (crtc_state->dsc.compression_enable) {
if (is_vid_mode(intel_dsi) && crtc_state->dsc.compression_enable) {
mul = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
}
@ -1502,7 +1509,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
if (pipe_config->dsc.compressed_bpp_x16) {
if (is_vid_mode(intel_dsi) && pipe_config->dsc.compressed_bpp_x16) {
int div = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);

View File

@ -227,12 +227,13 @@
#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
#define CLK_HS_OR_LP (0x2 << 8)
#define CLK_HS_CONTINUOUS (0x3 << 8)
#define LP_CLK_DURING_LPM (1 << 7) /* tgl+ */
#define LINK_CALIBRATION_MASK (0x3 << 4)
#define LINK_CALIBRATION_SHIFT 4
#define CALIBRATION_DISABLED (0x0 << 4)
#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
#define BLANKING_PACKET_ENABLE (1 << 2)
#define BLANKING_PACKET_ENABLE (1 << 2) /* tgl+ */
#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
#define EOTP_DISABLED (1 << 0)

View File

@ -2180,7 +2180,7 @@ static int intel_c10pll_calc_state(const struct intel_crtc_state *crtc_state,
return 0;
}
static int readout_enabled_lane_count(struct intel_encoder *encoder)
int intel_readout_lane_count(struct intel_encoder *encoder, int lane0, int lane1)
{
struct intel_display *display = to_intel_display(encoder);
u8 enabled_tx_lane_count = 0;
@ -2212,7 +2212,7 @@ static int readout_enabled_lane_count(struct intel_encoder *encoder)
max_tx_lane_count = round_up(max_tx_lane_count, 2);
for (tx_lane = 0; tx_lane < max_tx_lane_count; tx_lane++) {
u8 phy_lane_mask = tx_lane < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
u8 phy_lane_mask = tx_lane < 2 ? lane0 : lane1;
int tx = tx_lane % 2 + 1;
u8 val;
@ -2252,7 +2252,8 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
*/
intel_c10_msgbus_access_begin(encoder, lane);
cx0pll_state->lane_count = readout_enabled_lane_count(encoder);
cx0pll_state->lane_count = intel_readout_lane_count(encoder, INTEL_CX0_LANE0,
INTEL_CX0_LANE1);
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i));
@ -2707,7 +2708,8 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
wakeref = intel_cx0_phy_transaction_begin(encoder);
cx0pll_state->lane_count = readout_enabled_lane_count(encoder);
cx0pll_state->lane_count = intel_readout_lane_count(encoder, INTEL_CX0_LANE0,
INTEL_CX0_LANE1);
/* 1. Read VDR params and current context selection */
intel_c20_readout_vdr_params(encoder, &pll_state->vdr, &cntx);

View File

@ -28,6 +28,7 @@ struct intel_hdmi;
void intel_cx0_clear_response_ready_flag(struct intel_encoder *encoder,
int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
int intel_readout_lane_count(struct intel_encoder *encoder, int lane0, int lane1);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);

View File

@ -4243,21 +4243,6 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
&crtc_state->dpll_hw_state);
}
static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
else
crtc_state->port_clock =
intel_lt_phy_calc_port_clock(display, &crtc_state->dpll_hw_state.ltpll);
intel_ddi_get_config(encoder, crtc_state);
}
static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
@ -5298,10 +5283,13 @@ void intel_ddi_init(struct intel_display *display,
encoder->pipe_mask = ~0;
if (HAS_LT_PHY(display)) {
encoder->enable_clock = intel_xe3plpd_pll_enable;
encoder->disable_clock = intel_xe3plpd_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = xe3plpd_ddi_get_config;
encoder->enable_clock = intel_mtl_pll_enable_clock;
encoder->disable_clock = intel_mtl_pll_disable_clock;
encoder->port_pll_type = icl_ddi_tc_port_pll_type;
if (intel_encoder_is_tc(encoder))
encoder->get_config = mtl_ddi_tc_phy_get_config;
else
encoder->get_config = mtl_ddi_non_tc_phy_get_config;
} else if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable_clock;
encoder->disable_clock = intel_mtl_pll_disable_clock;

View File

@ -0,0 +1,178 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2026 Intel Corporation
*/
#include <linux/delay.h>
#include <drm/drm_print.h>
#include "intel_de.h"
static int __intel_de_wait_for_register(struct intel_display *display,
i915_reg_t reg, u32 mask, u32 value,
unsigned int timeout_us,
u32 (*read)(struct intel_display *display, i915_reg_t reg),
u32 *out_val, bool is_atomic)
{
const ktime_t end = ktime_add_us(ktime_get_raw(), timeout_us);
int wait_max = 1000;
int wait = 10;
u32 reg_value;
int ret;
might_sleep_if(!is_atomic);
if (timeout_us <= 10) {
is_atomic = true;
wait = 1;
}
for (;;) {
bool expired = ktime_after(ktime_get_raw(), end);
/* guarantee the condition is evaluated after timeout expired */
barrier();
reg_value = read(display, reg);
if ((reg_value & mask) == value) {
ret = 0;
break;
}
if (expired) {
ret = -ETIMEDOUT;
break;
}
if (is_atomic)
udelay(wait);
else
usleep_range(wait, wait << 1);
if (wait < wait_max)
wait <<= 1;
}
if (out_val)
*out_val = reg_value;
return ret;
}
static int intel_de_wait_for_register(struct intel_display *display,
i915_reg_t reg, u32 mask, u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_us,
u32 (*read)(struct intel_display *display, i915_reg_t reg),
u32 *out_value, bool is_atomic)
{
int ret = -EINVAL;
if (fast_timeout_us)
ret = __intel_de_wait_for_register(display, reg, mask, value,
fast_timeout_us, read,
out_value, is_atomic);
if (ret && slow_timeout_us)
ret = __intel_de_wait_for_register(display, reg, mask, value,
slow_timeout_us, read,
out_value, is_atomic);
return ret;
}
int intel_de_wait_us(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_us,
u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = intel_de_wait_for_register(display, reg, mask, value,
timeout_us, 0,
intel_de_read,
out_value, false);
intel_dmc_wl_put(display, reg);
return ret;
}
int intel_de_wait_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_ms,
u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = intel_de_wait_for_register(display, reg, mask, value,
2, timeout_ms * 1000,
intel_de_read,
out_value, false);
intel_dmc_wl_put(display, reg);
return ret;
}
int intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_ms,
u32 *out_value)
{
return intel_de_wait_for_register(display, reg, mask, value,
2, timeout_ms * 1000,
intel_de_read_fw,
out_value, false);
}
int intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_us,
u32 *out_value)
{
return intel_de_wait_for_register(display, reg, mask, value,
timeout_us, 0,
intel_de_read_fw,
out_value, true);
}
int intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_us)
{
return intel_de_wait_us(display, reg, mask, mask, timeout_us, NULL);
}
int intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_us)
{
return intel_de_wait_us(display, reg, mask, 0, timeout_us, NULL);
}
int intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_ms)
{
return intel_de_wait_ms(display, reg, mask, mask, timeout_ms, NULL);
}
int intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_ms)
{
return intel_de_wait_ms(display, reg, mask, 0, timeout_ms, NULL);
}
u8 intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
/* this is only used on VGA registers (possible on pre-g4x) */
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
return intel_uncore_read8(__to_uncore(display), reg);
}
void intel_de_write8(struct intel_display *display, i915_reg_t reg, u8 val)
{
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
intel_uncore_write8(__to_uncore(display), reg, val);
}

View File

@ -6,8 +6,6 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
#include <drm/drm_print.h>
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@ -19,6 +17,9 @@ static inline struct intel_uncore *__to_uncore(struct intel_display *display)
return to_intel_uncore(display->drm);
}
u8 intel_de_read8(struct intel_display *display, i915_reg_t reg);
void intel_de_write8(struct intel_display *display, i915_reg_t reg, u8 val);
static inline u32
intel_de_read(struct intel_display *display, i915_reg_t reg)
{
@ -33,23 +34,6 @@ intel_de_read(struct intel_display *display, i915_reg_t reg)
return val;
}
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
/* this is only used on VGA registers (possible on pre-g4x) */
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
return intel_uncore_read8(__to_uncore(display), reg);
}
static inline void
intel_de_write8(struct intel_display *display, i915_reg_t reg, u8 val)
{
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
intel_uncore_write8(__to_uncore(display), reg, val);
}
static inline u64
intel_de_read64_2x32(struct intel_display *display,
i915_reg_t lower_reg, i915_reg_t upper_reg)
@ -102,85 +86,26 @@ intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
return val;
}
static inline int
intel_de_wait_us(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_us,
u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
value, timeout_us, 0, out_value);
intel_dmc_wl_put(display, reg);
return ret;
}
static inline int
intel_de_wait_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_ms,
u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
value, 2, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
return ret;
}
static inline int
intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_ms,
u32 *out_value)
{
return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
value, 2, timeout_ms, out_value);
}
static inline int
intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_us,
u32 *out_value)
{
return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
value, timeout_us, 0, out_value);
}
static inline int
intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_us)
{
return intel_de_wait_us(display, reg, mask, mask, timeout_us, NULL);
}
static inline int
intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_us)
{
return intel_de_wait_us(display, reg, mask, 0, timeout_us, NULL);
}
static inline int
intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_ms)
{
return intel_de_wait_ms(display, reg, mask, mask, timeout_ms, NULL);
}
static inline int
intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_ms)
{
return intel_de_wait_ms(display, reg, mask, 0, timeout_ms, NULL);
}
int intel_de_wait_us(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_us,
u32 *out_value);
int intel_de_wait_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_ms,
u32 *out_value);
int intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_ms,
u32 *out_value);
int intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout_us,
u32 *out_value);
int intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_us);
int intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_us);
int intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_ms);
int intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout_ms);
/*
* Unlocked mmio-accessors, think carefully before using these.

View File

@ -5063,24 +5063,6 @@ static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_s
!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
}
static void
pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_crtc *crtc,
const char *name,
const struct intel_lt_phy_pll_state *a,
const struct intel_lt_phy_pll_state *b)
{
struct intel_display *display = to_intel_display(crtc);
char *chipname = "LTPHY";
pipe_config_mismatch(p, fastset, crtc, name, chipname);
drm_printf(p, "expected:\n");
intel_lt_phy_dump_hw_state(display, a);
drm_printf(p, "found:\n");
intel_lt_phy_dump_hw_state(display, b);
}
bool
intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
@ -5195,16 +5177,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
#define PIPE_CONF_CHECK_PLL_LT(name) do { \
if (!intel_lt_phy_pll_compare_hw_state(&current_config->name, \
&pipe_config->name)) { \
pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
&pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
@ -5431,10 +5403,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (display->dpll.mgr || HAS_GMCH(display))
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
if (HAS_LT_PHY(display))
PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@ -7896,7 +7864,8 @@ static bool intel_ddi_crt_present(struct intel_display *display)
bool assert_port_valid(struct intel_display *display, enum port port)
{
return !drm_WARN(display->drm, !(DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)),
return !drm_WARN(display->drm,
!(port >= 0 && DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)),
"Platform does not support port %c\n", port_name(port));
}

View File

@ -1654,6 +1654,28 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
#define __STEP_NAME(name) [STEP_##name] = #name,
static void initialize_step(struct intel_display *display, enum intel_step step)
{
static const char step_names[][3] = {
STEP_NAME_LIST(__STEP_NAME)
};
DISPLAY_RUNTIME_INFO(display)->step = step;
/* Step name will remain an empty string if not applicable */
if (step >= 0 && step < ARRAY_SIZE(step_names))
strscpy(DISPLAY_RUNTIME_INFO(display)->step_name, step_names[step]);
}
#undef __STEP_NAME
static const char *step_name(const struct intel_display_runtime_info *runtime)
{
return strlen(runtime->step_name) ? runtime->step_name : "N/A";
}
struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
const struct intel_display_parent_interface *parent)
{
@ -1731,14 +1753,14 @@ struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
subdesc ? &subdesc->step_info : NULL);
}
DISPLAY_RUNTIME_INFO(display)->step = step;
initialize_step(display, step);
drm_info(display->drm, "Found %s%s%s (device ID %04x) %s display version %u.%02u stepping %s\n",
desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "",
pdev->device, display->platform.dgfx ? "discrete" : "integrated",
DISPLAY_RUNTIME_INFO(display)->ip.ver,
DISPLAY_RUNTIME_INFO(display)->ip.rel,
step != STEP_NONE ? intel_step_name(step) : "N/A");
step_name(DISPLAY_RUNTIME_INFO(display)));
return display;
@ -1954,7 +1976,7 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "display version: %u\n",
runtime->ip.ver);
drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step));
drm_printf(p, "display stepping: %s\n", step_name(runtime));
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);

View File

@ -287,6 +287,7 @@ struct intel_display_runtime_info {
u16 step; /* hardware */
} ip;
int step; /* symbolic */
char step_name[3]; /* empty string if not applicable */
u32 rawclk_freq;

View File

@ -835,6 +835,7 @@ struct intel_pipe_wm {
struct skl_wm_level {
u16 min_ddb_alloc;
u16 min_ddb_alloc_uv; /* for pre-icl */
u16 blocks;
u8 lines;
bool enable;
@ -845,13 +846,11 @@ struct skl_wm_level {
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
struct skl_wm_level trans_wm;
struct {
struct skl_wm_level wm0;
struct skl_wm_level trans_wm;
} sagv;
bool is_planar;
};
struct skl_pipe_wm {

View File

@ -39,7 +39,6 @@
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_flipq.h"
#include "intel_step.h"
/**
* DOC: DMC Firmware Support
@ -418,15 +417,12 @@ bool intel_dmc_has_payload(struct intel_display *display)
return has_dmc_id_fw(display, DMC_FW_MAIN);
}
static const struct stepping_info *
intel_get_stepping_info(struct intel_display *display,
struct stepping_info *si)
static void initialize_stepping_info(struct intel_display *display, struct stepping_info *si)
{
const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(display));
const char *step_name = DISPLAY_RUNTIME_INFO(display)->step_name;
si->stepping = step_name[0];
si->substepping = step_name[1];
return si;
si->stepping = step_name[0] ?: '*';
si->substepping = step_name[1] ?: '*';
}
static void gen9_set_dc_state_debugmask(struct intel_display *display)
@ -1274,8 +1270,7 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
struct stepping_info display_info = { '*', '*'};
const struct stepping_info *si = intel_get_stepping_info(display, &display_info);
struct stepping_info si = {};
enum intel_dmc_id dmc_id;
u32 readcount = 0;
u32 r, offset;
@ -1283,6 +1278,8 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
if (!fw)
return -EINVAL;
initialize_stepping_info(display, &si);
/* Extract CSS Header information */
css_header = (struct intel_css_header *)fw->data;
r = parse_dmc_fw_css(dmc, css_header, fw->size);
@ -1293,7 +1290,7 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
/* Extract Package Header information */
package_header = (struct intel_package_header *)&fw->data[readcount];
r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
r = parse_dmc_fw_package(dmc, package_header, &si, fw->size - readcount);
if (!r)
return -EINVAL;

View File

@ -609,6 +609,34 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
return 0;
}
static bool
check_if_vesa_backlight_possible(struct intel_dp *intel_dp)
{
int ret;
u8 bit_min, bit_max;
if (!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP))
return true;
ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &bit_min);
if (ret < 0)
return false;
bit_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
if (bit_min < 1)
return false;
ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &bit_max);
if (ret < 0)
return false;
bit_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
if (bit_max < bit_min)
return false;
return true;
}
static bool
intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
@ -625,12 +653,14 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
return true;
}
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
if (drm_edp_backlight_supported(intel_dp->edp_dpcd) &&
check_if_vesa_backlight_possible(intel_dp)) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
return true;
}
return false;
}

View File

@ -1212,29 +1212,6 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
struct intel_display *display = to_intel_display(encoder);
int ret;
ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
if (ret)
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
crtc_state->port_clock =
intel_lt_phy_calc_port_clock(display, &crtc_state->dpll_hw_state.ltpll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
return 0;
}
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@ -1695,7 +1672,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
}
static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
.crtc_compute_clock = xe3plpd_crtc_compute_clock,
.crtc_compute_clock = hsw_crtc_compute_clock,
.crtc_get_dpll = hsw_crtc_get_dpll,
};
static const struct intel_dpll_global_funcs mtl_dpll_funcs = {

View File

@ -4571,6 +4571,170 @@ static const struct intel_dpll_mgr mtl_pll_mgr = {
.compare_hw_state = mtl_compare_hw_state,
};
static bool xe3plpd_pll_get_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (!encoder)
return false;
return intel_lt_phy_pll_readout_hw_state(encoder, &dpll_hw_state->ltpll);
}
static int xe3plpd_pll_get_freq(struct intel_display *display,
const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (drm_WARN_ON(display->drm, !encoder))
return -EINVAL;
return intel_lt_phy_calc_port_clock(display, &dpll_hw_state->ltpll);
}
static void xe3plpd_pll_enable(struct intel_display *display,
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (drm_WARN_ON(display->drm, !encoder))
return;
intel_xe3plpd_pll_enable(encoder, pll, dpll_hw_state);
}
static void xe3plpd_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (drm_WARN_ON(display->drm, !encoder))
return;
intel_xe3plpd_pll_disable(encoder);
}
static const struct intel_dpll_funcs xe3plpd_tbt_pll_funcs = {
.enable = mtl_tbt_pll_enable,
.disable = mtl_tbt_pll_disable,
.get_hw_state = intel_lt_phy_tbt_pll_readout_hw_state,
.get_freq = mtl_tbt_pll_get_freq,
};
static const struct intel_dpll_funcs xe3plpd_pll_funcs = {
.enable = xe3plpd_pll_enable,
.disable = xe3plpd_pll_disable,
.get_hw_state = xe3plpd_pll_get_hw_state,
.get_freq = xe3plpd_pll_get_freq,
};
static const struct dpll_info xe3plpd_plls[] = {
{ .name = "DPLL 0", .funcs = &xe3plpd_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &xe3plpd_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &xe3plpd_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, .always_on = true },
{ .name = "TC PLL 1", .funcs = &xe3plpd_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &xe3plpd_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &xe3plpd_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
{ .name = "TC PLL 4", .funcs = &xe3plpd_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
{}
};
static int xe3plpd_compute_non_tc_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
int ret;
ret = intel_lt_phy_pll_calc_state(crtc_state, encoder, &port_dpll->hw_state);
if (ret)
return ret;
/* this is mainly for the fastset check */
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
crtc_state->port_clock = intel_lt_phy_calc_port_clock(display, &port_dpll->hw_state.ltpll);
return 0;
}
static int xe3plpd_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll;
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
intel_lt_phy_tbt_pll_calc_state(&port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
ret = intel_lt_phy_pll_calc_state(crtc_state, encoder, &port_dpll->hw_state);
if (ret)
return ret;
/* this is mainly for the fastset check */
if (old_crtc_state->intel_dpll &&
old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
crtc_state->port_clock = intel_lt_phy_calc_port_clock(display, &port_dpll->hw_state.ltpll);
return 0;
}
static int xe3plpd_compute_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
if (intel_encoder_is_tc(encoder))
return xe3plpd_compute_tc_phy_dplls(state, crtc, encoder);
else
return xe3plpd_compute_non_tc_phy_dpll(state, crtc, encoder);
}
static void xe3plpd_dump_hw_state(struct drm_printer *p,
const struct intel_dpll_hw_state *dpll_hw_state)
{
intel_lt_phy_dump_hw_state(p, &dpll_hw_state->ltpll);
}
static bool xe3plpd_compare_hw_state(const struct intel_dpll_hw_state *_a,
const struct intel_dpll_hw_state *_b)
{
const struct intel_lt_phy_pll_state *a = &_a->ltpll;
const struct intel_lt_phy_pll_state *b = &_b->ltpll;
return intel_lt_phy_pll_compare_hw_state(a, b);
}
static const struct intel_dpll_mgr xe3plpd_pll_mgr = {
.dpll_info = xe3plpd_plls,
.compute_dplls = xe3plpd_compute_dplls,
.get_dplls = mtl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = xe3plpd_dump_hw_state,
.compare_hw_state = xe3plpd_compare_hw_state,
};
/**
* intel_dpll_init - Initialize DPLLs
* @display: intel_display device
@ -4585,9 +4749,11 @@ void intel_dpll_init(struct intel_display *display)
mutex_init(&display->dpll.lock);
if (DISPLAY_VER(display) >= 35 || display->platform.dg2)
/* No shared DPLLs on NVL or DG2; port PLLs are part of the PHY */
if (display->platform.dg2)
/* No shared DPLLs on DG2; port PLLs are part of the PHY */
dpll_mgr = NULL;
else if (DISPLAY_VER(display) >= 35)
dpll_mgr = &xe3plpd_pll_mgr;
else if (DISPLAY_VER(display) >= 14)
dpll_mgr = &mtl_pll_mgr;
else if (display->platform.alderlake_p)
@ -4910,6 +5076,7 @@ verify_single_dpll_state(struct intel_display *display,
const struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state = {};
bool pll_mismatch = false;
u8 pipe_mask;
bool active;
@ -4951,9 +5118,18 @@ verify_single_dpll_state(struct intel_display *display,
"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
pll->info->name, pipe_mask, pll->state.pipe_mask);
if (INTEL_DISPLAY_STATE_WARN(display,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
if (pll->on) {
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
if (HAS_LT_PHY(display))
pll_mismatch = !dpll_mgr->compare_hw_state(&pll->state.hw_state,
&dpll_hw_state);
else
pll_mismatch = memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state));
}
if (INTEL_DISPLAY_STATE_WARN(display, pll_mismatch,
"%s: pll hw state mismatch\n",
pll->info->name)) {
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);

View File

@ -278,6 +278,7 @@ struct intel_lt_phy_pll_state {
u8 config[3];
bool ssc_enabled;
bool tbt_mode;
int lane_count;
};
struct intel_dpll_hw_state {

View File

@ -80,9 +80,10 @@ struct intel_dsi {
/* NON_BURST_SYNC_PULSE, NON_BURST_SYNC_EVENTS, or BURST_MODE */
int video_mode;
/* eot for MIPI_EOT_DISABLE register */
u8 eotp_pkt;
u8 clock_stop;
bool lp_clock_during_lpm;
bool blanking_pkt;
bool eot_pkt;
bool clock_stop;
u8 escape_clk_div;
u8 dual_link;

View File

@ -718,8 +718,10 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
"burst" : "<unknown>");
drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val);
drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt));
drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop));
drm_printf(&p, "LP clock during LPM %s\n", str_enabled_disabled(intel_dsi->lp_clock_during_lpm));
drm_printf(&p, "Blanking packets during BLLP %s\n", str_enabled_disabled(intel_dsi->blanking_pkt));
drm_printf(&p, "EoT packet %s\n", str_enabled_disabled(intel_dsi->eot_pkt));
drm_printf(&p, "Clock stop during BLLP %s\n", str_enabled_disabled(intel_dsi->clock_stop));
drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
@ -770,8 +772,10 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
drm_dbg_kms(display->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lp_clock_during_lpm = mipi_config->lp_clock_during_lpm;
intel_dsi->blanking_pkt = mipi_config->blanking_packets_during_bllp;
intel_dsi->eot_pkt = !mipi_config->eot_pkt_disabled;
intel_dsi->clock_stop = mipi_config->enable_clk_stop;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format =
vbt_to_dsi_pixel_format(mipi_config->videomode_color_format);

View File

@ -694,7 +694,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
goto clear_err;
}
/* Generate a STOP condition on the bus. Note that gmbus can't generata
/* Generate a STOP condition on the bus. Note that gmbus can't generate
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */

View File

@ -11,6 +11,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dpll.h"
@ -32,6 +33,7 @@
INTEL_LT_PHY_LANE0)
#define MODE_DP 3
#define MODE_HDMI_20 4
#define MODE_HDMI_FRL 5
#define Q32_TO_INT(x) ((x) >> 32)
#define Q32_TO_FRAC(x) ((x) & 0xFFFFFFFF)
#define DCO_MIN_FREQ_MHZ 11850
@ -1176,9 +1178,30 @@ intel_lt_phy_lane_reset(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0);
}
static bool intel_lt_phy_is_hdmi(const struct intel_lt_phy_pll_state *ltpll)
{
u8 mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK, ltpll->config[0]);
if (mode == MODE_HDMI_20 || mode == MODE_HDMI_FRL)
return true;
return false;
}
static bool intel_lt_phy_is_dp(const struct intel_lt_phy_pll_state *ltpll)
{
u8 mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK, ltpll->config[0]);
if (mode == MODE_DP)
return true;
return false;
}
static void
intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct intel_lt_phy_pll_state *ltpll,
int port_clock,
bool lane_reversal)
{
struct intel_display *display = to_intel_display(encoder);
@ -1195,17 +1218,16 @@ intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
* but since the register bits still remain the same we use
* the same definition
*/
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
intel_hdmi_is_frl(crtc_state->port_clock))
if (intel_lt_phy_is_hdmi(ltpll) && intel_hdmi_is_frl(port_clock))
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* DP2.0 10G and 20G rates enable MPLLA*/
if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
if (port_clock == 1000000 || port_clock == 2000000)
val |= XELPDP_SSC_ENABLE_PLLA;
else
val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
val |= ltpll->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
@ -1248,7 +1270,8 @@ static u32 intel_lt_phy_get_dp_clock(u8 rate)
static bool
intel_lt_phy_config_changed(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
const struct intel_lt_phy_pll_state *ltpll,
u32 port_clock)
{
u8 val, rate;
u32 clock;
@ -1262,9 +1285,9 @@ intel_lt_phy_config_changed(struct intel_encoder *encoder,
* using 1.62 Gbps clock since PHY PLL defaults to that
* otherwise we always need to reconfigure it.
*/
if (intel_crtc_has_dp_encoder(crtc_state)) {
if (intel_lt_phy_is_dp(ltpll)) {
clock = intel_lt_phy_get_dp_clock(rate);
if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock)
if (port_clock == 1620000 && port_clock == clock)
return false;
}
@ -1723,12 +1746,15 @@ intel_lt_phy_calc_port_clock(struct intel_display *display,
int
intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
struct intel_encoder *encoder,
struct intel_dpll_hw_state *hw_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct intel_lt_phy_pll_params *tables;
int i;
memset(hw_state, 0, sizeof(*hw_state));
tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
@ -1738,62 +1764,71 @@ intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
if (intel_dpll_clock_matches(crtc_state->port_clock, clock)) {
crtc_state->dpll_hw_state.ltpll = *tables[i].state;
hw_state->ltpll = *tables[i].state;
if (intel_crtc_has_dp_encoder(crtc_state)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
crtc_state->dpll_hw_state.ltpll.config[2] = 1;
hw_state->ltpll.config[2] = 1;
}
crtc_state->dpll_hw_state.ltpll.ssc_enabled =
hw_state->ltpll.ssc_enabled =
intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder);
hw_state->ltpll.lane_count = crtc_state->lane_count;
return 0;
}
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
return intel_lt_phy_calculate_hdmi_state(&crtc_state->dpll_hw_state.ltpll,
hw_state->ltpll.lane_count = crtc_state->lane_count;
return intel_lt_phy_calculate_hdmi_state(&hw_state->ltpll,
crtc_state->port_clock);
}
return -EINVAL;
}
void intel_lt_phy_tbt_pll_calc_state(struct intel_dpll_hw_state *hw_state)
{
memset(hw_state, 0, sizeof(*hw_state));
hw_state->ltpll.tbt_mode = true;
}
static void
intel_lt_phy_program_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
const struct intel_lt_phy_pll_state *ltpll)
{
u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
int i, j, k;
intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG,
crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED);
ltpll->config[0], MB_WRITE_COMMITTED);
intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG,
crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED);
ltpll->config[1], MB_WRITE_COMMITTED);
intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG,
crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED);
ltpll->config[2], MB_WRITE_COMMITTED);
for (i = 0; i <= 12; i++) {
intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i),
crtc_state->dpll_hw_state.ltpll.addr_msb[i],
ltpll->addr_msb[i],
MB_WRITE_COMMITTED);
intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i),
crtc_state->dpll_hw_state.ltpll.addr_lsb[i],
ltpll->addr_lsb[i],
MB_WRITE_COMMITTED);
for (j = 3, k = 0; j >= 0; j--, k++)
intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0,
LT_PHY_VDR_X_DATAY(i, j),
crtc_state->dpll_hw_state.ltpll.data[i][k],
ltpll->data[i][k],
MB_WRITE_COMMITTED);
}
}
static void
intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
const struct intel_lt_phy_pll_state *ltpll)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool lane_reversal = dig_port->lane_reversal;
u8 lane_count = crtc_state->lane_count;
u8 lane_count = ltpll->lane_count;
bool is_dp_alt =
intel_tc_port_in_dp_alt_mode(dig_port);
enum intel_tc_pin_assignment tc_pin =
@ -1874,9 +1909,11 @@ intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
}
void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_display *display = to_intel_display(encoder);
int port_clock = intel_lt_phy_calc_port_clock(display, &dpll_hw_state->ltpll);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool lane_reversal = dig_port->lane_reversal;
u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
@ -1892,10 +1929,11 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
wakeref = intel_lt_phy_transaction_begin(encoder);
/* 1. Enable MacCLK at default 162 MHz frequency. */
intel_lt_phy_lane_reset(encoder, crtc_state->lane_count);
intel_lt_phy_lane_reset(encoder, dpll_hw_state->ltpll.lane_count);
/* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */
intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
intel_lt_phy_program_port_clock_ctl(encoder, &dpll_hw_state->ltpll,
port_clock, lane_reversal);
/* 3. Change owned PHY lanes power to Ready state. */
intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
@ -1905,12 +1943,12 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
* 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type,
* encoded rate and encoded mode.
*/
if (intel_lt_phy_config_changed(encoder, crtc_state)) {
if (intel_lt_phy_config_changed(encoder, &dpll_hw_state->ltpll, port_clock)) {
/*
* 5. Program the PHY internal PLL registers over PHY message bus for the desired
* frequency and protocol type
*/
intel_lt_phy_program_pll(encoder, crtc_state);
intel_lt_phy_program_pll(encoder, &dpll_hw_state->ltpll);
/* 6. Use the P2P transaction flow */
/*
@ -1942,8 +1980,7 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
* Change. We handle this step in bxt_set_cdclk().
*/
/* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */
intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
crtc_state->port_clock);
intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), port_clock);
/* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
@ -1990,7 +2027,7 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
lane_phy_pulse_status,
lane_phy_pulse_status);
} else {
intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock);
intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), port_clock);
}
/*
@ -2001,7 +2038,7 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
XELPDP_P0_STATE_ACTIVE);
intel_lt_phy_enable_disable_tx(encoder, crtc_state);
intel_lt_phy_enable_disable_tx(encoder, &dpll_hw_state->ltpll);
intel_lt_phy_transaction_end(encoder, wakeref);
}
@ -2136,21 +2173,23 @@ void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
intel_lt_phy_transaction_end(encoder, wakeref);
}
void intel_lt_phy_dump_hw_state(struct intel_display *display,
void intel_lt_phy_dump_hw_state(struct drm_printer *p,
const struct intel_lt_phy_pll_state *hw_state)
{
int i, j;
drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n");
drm_printf(p, "lt_phy_pll_hw_state: lane count: %d, ssc enabled: %d, tbt mode: %d\n",
hw_state->lane_count, hw_state->ssc_enabled, hw_state->tbt_mode);
for (i = 0; i < 3; i++) {
drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n",
i, hw_state->config[i]);
drm_printf(p, "config[%d] = 0x%.4x,\n",
i, hw_state->config[i]);
}
for (i = 0; i <= 12; i++)
for (j = 3; j >= 0; j--)
drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n",
i, j, hw_state->data[i][j]);
drm_printf(p, "vdr_data[%d][%d] = 0x%.4x,\n",
i, j, hw_state->data[i][j]);
}
bool
@ -2174,8 +2213,26 @@ intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
return false;
}
void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
static bool intel_lt_phy_pll_is_enabled(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
return intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)) &
XELPDP_LANE_PCLK_PLL_ACK(0);
}
bool intel_lt_phy_tbt_pll_readout_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
memset(hw_state, 0, sizeof(*hw_state));
hw_state->ltpll.tbt_mode = true;
return true;
}
bool intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_lt_phy_pll_state *pll_state)
{
u8 owned_lane_mask;
@ -2183,14 +2240,19 @@ void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
struct ref_tracker *wakeref;
int i, j, k;
if (!intel_lt_phy_pll_is_enabled(encoder))
return false;
pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
if (pll_state->tbt_mode)
return;
return false;
owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1;
wakeref = intel_lt_phy_transaction_begin(encoder);
pll_state->lane_count = intel_readout_lane_count(encoder, INTEL_LT_PHY_LANE0,
INTEL_LT_PHY_LANE1);
pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG);
pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG);
pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG);
@ -2203,56 +2265,15 @@ void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
}
intel_lt_phy_transaction_end(encoder, wakeref);
}
void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_digital_port *dig_port;
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
struct intel_lt_phy_pll_state pll_hw_state = {};
const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll;
if (DISPLAY_VER(display) < 35)
return;
if (!new_crtc_state->hw.active)
return;
/* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
if (!intel_crtc_needs_modeset(new_crtc_state) &&
!intel_crtc_needs_fastset(new_crtc_state))
return;
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state);
dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port))
return;
INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[0] != pll_sw_state->config[0],
"[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG 0: (expected 0x%04x, found 0x%04x)",
crtc->base.base.id, crtc->base.name,
pll_sw_state->config[0], pll_hw_state.config[0]);
INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[2] != pll_sw_state->config[2],
"[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG 2: (expected 0x%04x, found 0x%04x)",
crtc->base.base.id, crtc->base.name,
pll_sw_state->config[2], pll_hw_state.config[2]);
return true;
}
void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port))
intel_mtl_tbt_pll_enable_clock(encoder, crtc_state->port_clock);
else
intel_lt_phy_pll_enable(encoder, crtc_state);
intel_lt_phy_pll_enable(encoder, pll, dpll_hw_state);
}
void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
@ -2294,7 +2315,7 @@ static void intel_lt_phy_pll_verify_clock(struct intel_display *display,
drm_printf(&p, "PLL state %s (%s):\n",
pll_state_name,
is_precomputed_state ? "precomputed" : "computed");
intel_lt_phy_dump_hw_state(display, pll_state);
intel_lt_phy_dump_hw_state(&p, pll_state);
}
static void intel_lt_phy_pll_verify_params(struct intel_display *display,

View File

@ -8,38 +8,45 @@
#include <linux/types.h>
struct drm_printer;
struct intel_atomic_state;
struct intel_display;
struct intel_dpll;
struct intel_dpll_hw_state;
struct intel_encoder;
struct intel_crtc_state;
struct intel_crtc;
struct intel_lt_phy_pll_state;
void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
void intel_lt_phy_pll_disable(struct intel_encoder *encoder);
int
intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
struct intel_encoder *encoder,
struct intel_dpll_hw_state *hw_state);
void intel_lt_phy_tbt_pll_calc_state(struct intel_dpll_hw_state *hw_state);
int intel_lt_phy_calc_port_clock(struct intel_display *display,
const struct intel_lt_phy_pll_state *lt_state);
void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_lt_phy_dump_hw_state(struct intel_display *display,
void intel_lt_phy_dump_hw_state(struct drm_printer *p,
const struct intel_lt_phy_pll_state *hw_state);
bool
intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
const struct intel_lt_phy_pll_state *b);
void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool intel_lt_phy_tbt_pll_readout_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *hw_state);
bool intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_lt_phy_pll_state *pll_state);
void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int
intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
u32 frequency_khz);
void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
void intel_lt_phy_verify_plls(struct intel_display *display);

View File

@ -246,7 +246,6 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
verify_crtc_state(state, crtc);
intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_lt_phy_pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)

View File

@ -5,6 +5,9 @@
#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_regs.h"
#include "intel_display_core.h"
#include "intel_display_utils.h"
#include "intel_pch.h"
@ -214,6 +217,96 @@ intel_pch_type(const struct intel_display *display, unsigned short id)
}
}
static void intel_pch_ibx_init_clock_gating(struct intel_display *display)
{
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
intel_de_write(display, SOUTH_DSPCLK_GATE_D,
PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void intel_pch_cpt_init_clock_gating(struct intel_display *display)
{
enum pipe pipe;
u32 val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
intel_de_write(display, SOUTH_DSPCLK_GATE_D,
PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
intel_de_rmw(display, SOUTH_CHICKEN2, 0, DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(display, pipe) {
val = intel_de_read(display, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (display->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
intel_de_write(display, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(display, pipe)
intel_de_write(display, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void intel_pch_lpt_init_clock_gating(struct intel_display *display)
{
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
intel_de_rmw(display, TRANS_CHICKEN1(PIPE_A), 0,
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void intel_pch_cnp_init_clock_gating(struct intel_display *display)
{
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
CNP_PWM_CGE_GATING_DISABLE);
}
void intel_pch_init_clock_gating(struct intel_display *display)
{
switch (INTEL_PCH_TYPE(display)) {
case PCH_IBX:
intel_pch_ibx_init_clock_gating(display);
break;
case PCH_CPT:
intel_pch_cpt_init_clock_gating(display);
break;
case PCH_LPT_H:
case PCH_LPT_LP:
intel_pch_lpt_init_clock_gating(display);
break;
case PCH_CNP:
intel_pch_cnp_init_clock_gating(display);
break;
default:
break;
}
}
static bool intel_is_virt_pch(unsigned short id,
unsigned short svendor, unsigned short sdevice)
{

View File

@ -52,5 +52,6 @@ enum intel_pch {
#define HAS_PCH_SPLIT(display) (INTEL_PCH_TYPE(display) != PCH_NONE)
void intel_pch_detect(struct intel_display *display);
void intel_pch_init_clock_gating(struct intel_display *display);
#endif /* __INTEL_PCH__ */

View File

@ -437,7 +437,29 @@ void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
}
static void unlink_nv12_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
if (!plane_state->planar_linked_plane)
return;
plane_state->planar_linked_plane = NULL;
if (!plane_state->is_y_plane)
return;
drm_WARN_ON(display->drm, plane_state->uapi.visible);
plane_state->is_y_plane = false;
crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
}
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@ -1512,31 +1534,6 @@ static void link_nv12_planes(struct intel_crtc_state *crtc_state,
icl_link_nv12_planes(uv_plane_state, y_plane_state);
}
static void unlink_nv12_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
if (!plane_state->planar_linked_plane)
return;
plane_state->planar_linked_plane = NULL;
if (!plane_state->is_y_plane)
return;
drm_WARN_ON(display->drm, plane_state->uapi.visible);
plane_state->is_y_plane = false;
crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
}
static int icl_check_nv12_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@ -1550,17 +1547,6 @@ static int icl_check_nv12_planes(struct intel_atomic_state *state,
if (DISPLAY_VER(display) < 11)
return 0;
/*
* Destroy all old plane links and make the Y plane invisible
* in the crtc_state->active_planes mask.
*/
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->pipe != crtc->pipe)
continue;
unlink_nv12_plane(crtc_state, plane_state);
}
if (!crtc_state->nv12_planes)
return 0;

View File

@ -49,6 +49,7 @@
#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
#include "intel_step.h"
#include "intel_vblank.h"
@ -609,6 +610,13 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_conn
if (intel_dp->mst_detect == DRM_DP_MST)
return;
if (intel_dp_is_edp(intel_dp) &&
intel_has_dpcd_quirk(intel_dp, QUIRK_DISABLE_EDP_PANEL_REPLAY)) {
drm_dbg_kms(display->drm,
"Panel Replay support not currently available for this setup\n");
return;
}
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
&connector->dp.panel_replay_caps.dpcd,
sizeof(connector->dp.panel_replay_caps.dpcd));

View File

@ -86,6 +86,14 @@ static void quirk_edp_limit_rate_hbr2(struct intel_display *display)
drm_info(display->drm, "Applying eDP Limit rate to HBR2 quirk\n");
}
static void quirk_disable_edp_panel_replay(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_set_dpcd_quirk(intel_dp, QUIRK_DISABLE_EDP_PANEL_REPLAY);
drm_info(display->drm, "Applying disable Panel Replay quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
@ -108,6 +116,8 @@ struct intel_dpcd_quirk {
#define SINK_DEVICE_ID_ANY SINK_DEVICE_ID(0, 0, 0, 0, 0, 0)
#define DEVICE_ID_ANY 0
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
void (*hook)(struct intel_display *display);
@ -251,7 +261,14 @@ static const struct intel_dpcd_quirk intel_dpcd_quirks[] = {
.sink_oui = SINK_OUI(0x38, 0xec, 0x11),
.hook = quirk_fw_sync_len,
},
/* Dell XPS 14 DA14260 */
{
.device = DEVICE_ID_ANY,
.subsystem_vendor = 0x1028,
.subsystem_device = 0x0db9,
.sink_oui = SINK_OUI(0x00, 0x22, 0xb9),
.hook = quirk_disable_edp_panel_replay,
},
};
void intel_init_quirks(struct intel_display *display)
@ -262,7 +279,8 @@ void intel_init_quirks(struct intel_display *display)
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
struct intel_quirk *q = &intel_quirks[i];
if (d->device == q->device &&
if ((d->device == q->device ||
q->device == DEVICE_ID_ANY) &&
(d->subsystem_vendor == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
@ -285,7 +303,8 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) {
const struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
if (d->device == q->device &&
if ((d->device == q->device ||
q->device == DEVICE_ID_ANY) &&
(d->subsystem_vendor == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||

View File

@ -21,6 +21,7 @@ enum intel_quirk_id {
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
QUIRK_FW_SYNC_LEN,
QUIRK_EDP_LIMIT_RATE_HBR2,
QUIRK_DISABLE_EDP_PANEL_REPLAY,
};
void intel_init_quirks(struct intel_display *display);

View File

@ -63,7 +63,6 @@ static void skl_sagv_disable(struct intel_display *display);
struct skl_wm_params {
bool x_tiled, y_tiled;
bool rc_surface;
bool is_planar;
u32 width;
u8 cpp;
u32 plane_pixel_rate;
@ -1357,14 +1356,13 @@ skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
}
static void
skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
skl_check_wm_level_nv12(struct skl_wm_level *wm,
const struct skl_ddb_entry *ddb_y,
const struct skl_ddb_entry *ddb)
{
if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
wm->min_ddb_alloc_uv > skl_ddb_entry_size(ddb))
memset(wm, 0, sizeof(*wm));
memset(uv_wm, 0, sizeof(*uv_wm));
}
}
static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
@ -1391,10 +1389,9 @@ struct skl_plane_ddb_iter {
};
static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
struct skl_ddb_entry *ddb,
const struct skl_wm_level *wm,
u64 data_rate)
_skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
u16 min_ddb_alloc,
struct skl_ddb_entry *ddb, u64 data_rate)
{
u16 size, extra = 0;
@ -1411,12 +1408,30 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
* to avoid skl_ddb_add_affected_planes() adding them to
* the state when other planes change their allocations.
*/
size = wm->min_ddb_alloc + extra;
size = min_ddb_alloc + extra;
if (size)
iter->start = skl_ddb_entry_init(ddb, iter->start,
iter->start + size);
}
static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
const struct skl_wm_level *wm,
struct skl_ddb_entry *ddb, u64 data_rate)
{
_skl_allocate_plane_ddb(iter, wm->min_ddb_alloc, ddb, data_rate);
}
static void
skl_allocate_plane_ddb_nv12(struct skl_plane_ddb_iter *iter,
const struct skl_wm_level *wm,
struct skl_ddb_entry *ddb_y, u64 data_rate_y,
struct skl_ddb_entry *ddb, u64 data_rate)
{
_skl_allocate_plane_ddb(iter, wm->min_ddb_alloc, ddb_y, data_rate_y);
_skl_allocate_plane_ddb(iter, wm->min_ddb_alloc_uv, ddb, data_rate);
}
static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@ -1482,7 +1497,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
blocks += wm->wm[level].min_ddb_alloc;
blocks += wm->uv_wm[level].min_ddb_alloc;
blocks += wm->wm[level].min_ddb_alloc_uv;
}
if (blocks <= iter.size) {
@ -1523,15 +1538,13 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
continue;
if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
crtc_state->rel_data_rate_y[plane_id]);
skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
crtc_state->rel_data_rate[plane_id]);
} else {
skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
crtc_state->rel_data_rate[plane_id]);
}
crtc_state->nv12_planes & BIT(plane_id))
skl_allocate_plane_ddb_nv12(&iter, &wm->wm[level],
ddb_y, crtc_state->rel_data_rate_y[plane_id],
ddb, crtc_state->rel_data_rate[plane_id]);
else
skl_allocate_plane_ddb(&iter, &wm->wm[level],
ddb, crtc_state->rel_data_rate[plane_id]);
if (DISPLAY_VER(display) >= 30) {
*min_ddb = wm->wm[0].min_ddb_alloc;
@ -1557,9 +1570,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id))
skl_check_nv12_wm_level(&wm->wm[level],
&wm->uv_wm[level],
ddb_y, ddb);
skl_check_wm_level_nv12(&wm->wm[level], ddb_y, ddb);
else
skl_check_wm_level(&wm->wm[level], ddb);
@ -1675,10 +1686,9 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
intel_fb_is_tiled_modifier(modifier);
wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
if (color_plane == 1 && intel_format_info_is_yuv_semiplanar(format, modifier))
wp->width /= 2;
wp->cpp = format->cpp[color_plane];
@ -2069,11 +2079,11 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(crtc_state);
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
struct skl_wm_level uv_wm[ARRAY_SIZE(wm->wm)] = {};
struct skl_wm_params wm_params;
int ret;
wm->is_planar = true;
int ret, level;
/* uv plane watermarks must also be validated for NV12/Planar */
ret = skl_compute_plane_wm_params(crtc_state, plane_state,
@ -2081,7 +2091,14 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
skl_compute_wm_levels(crtc_state, plane, &wm_params, uv_wm);
/*
* Only keep the min_ddb_alloc for UV as
* the hardware needs nothing else.
*/
for (level = 0; level < display->wm.num_levels; level++)
wm->wm[level].min_ddb_alloc_uv = uv_wm[level].min_ddb_alloc;
return 0;
}
@ -2304,7 +2321,6 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
* thing as bad via min_ddb_alloc=U16_MAX?
*/
wm->wm[level].enable = false;
wm->uv_wm[level].enable = false;
}
}
@ -2375,11 +2391,6 @@ static bool skl_plane_wm_equals(struct intel_display *display,
int level;
for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
* ddb allocation.
*/
if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
return false;
}
@ -2590,14 +2601,30 @@ static char enast(bool enable)
return enable ? '*' : ' ';
}
static noinline_for_stack void
skl_print_plane_changes(struct intel_display *display,
struct intel_plane *plane,
const struct skl_plane_wm *old_wm,
const struct skl_plane_wm *new_wm)
static void
skl_print_plane_ddb_changes(struct intel_plane *plane,
const struct skl_ddb_entry *old,
const struct skl_ddb_entry *new,
const char *ddb_name)
{
struct intel_display *display = to_intel_display(plane);
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
"[PLANE:%d:%s] %5s (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name, ddb_name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
static noinline_for_stack void
skl_print_plane_wm_changes(struct intel_plane *plane,
const struct skl_plane_wm *old_wm,
const struct skl_plane_wm *new_wm)
{
struct intel_display *display = to_intel_display(plane);
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
" -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
@ -2616,7 +2643,7 @@ skl_print_plane_changes(struct intel_display *display,
enast(new_wm->sagv.trans_wm.enable));
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
"[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
@ -2643,7 +2670,7 @@ skl_print_plane_changes(struct intel_display *display,
enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
"[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].blocks, old_wm->wm[1].blocks,
@ -2662,7 +2689,7 @@ skl_print_plane_changes(struct intel_display *display,
new_wm->sagv.trans_wm.blocks);
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
"[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
@ -2679,6 +2706,28 @@ skl_print_plane_changes(struct intel_display *display,
new_wm->trans_wm.min_ddb_alloc,
new_wm->sagv.wm0.min_ddb_alloc,
new_wm->sagv.trans_wm.min_ddb_alloc);
if (DISPLAY_VER(display) >= 11)
return;
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_ddb_uv %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc_uv, old_wm->wm[1].min_ddb_alloc_uv,
old_wm->wm[2].min_ddb_alloc_uv, old_wm->wm[3].min_ddb_alloc_uv,
old_wm->wm[4].min_ddb_alloc_uv, old_wm->wm[5].min_ddb_alloc_uv,
old_wm->wm[6].min_ddb_alloc_uv, old_wm->wm[7].min_ddb_alloc_uv,
old_wm->trans_wm.min_ddb_alloc_uv,
old_wm->sagv.wm0.min_ddb_alloc_uv,
old_wm->sagv.trans_wm.min_ddb_alloc_uv,
new_wm->wm[0].min_ddb_alloc_uv, new_wm->wm[1].min_ddb_alloc_uv,
new_wm->wm[2].min_ddb_alloc_uv, new_wm->wm[3].min_ddb_alloc_uv,
new_wm->wm[4].min_ddb_alloc_uv, new_wm->wm[5].min_ddb_alloc_uv,
new_wm->wm[6].min_ddb_alloc_uv, new_wm->wm[7].min_ddb_alloc_uv,
new_wm->trans_wm.min_ddb_alloc_uv,
new_wm->sagv.wm0.min_ddb_alloc_uv,
new_wm->sagv.trans_wm.min_ddb_alloc_uv);
}
static void
@ -2708,13 +2757,17 @@ skl_print_wm_changes(struct intel_atomic_state *state)
old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
if (skl_ddb_entry_equal(old, new))
if (!skl_ddb_entry_equal(old, new))
skl_print_plane_ddb_changes(plane, old, new, "ddb");
if (DISPLAY_VER(display) >= 11)
continue;
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (!skl_ddb_entry_equal(old, new))
skl_print_plane_ddb_changes(plane, old, new, "ddb_y");
}
for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
@ -2727,7 +2780,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
skl_print_plane_changes(display, plane, old_wm, new_wm);
skl_print_plane_wm_changes(plane, old_wm, new_wm);
}
}
}
@ -2740,11 +2793,6 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
int level;
for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
* ddb allocation.
*/
if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
skl_plane_wm_level(new_pipe_wm, plane->id, level)))
return false;

View File

@ -1367,7 +1367,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
}
tmp = 0;
if (intel_dsi->eotp_pkt == 0)
if (!intel_dsi->eot_pkt)
tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;

View File

@ -535,16 +535,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
i2c_edid->edid_available = true;
}
}
} else if ((op & 0x1) == DP_AUX_I2C_WRITE) {
/* TODO
* We only support EDID reading from I2C_over_AUX. And
* we do not expect the index mode to be used. Right now
* the WRITE operation is ignored. It is good enough to
* support the gfx driver to do EDID access.
*/
} else {
if (drm_WARN_ON(&i915->drm, (op & 0x1) != DP_AUX_I2C_READ))
return;
} else if ((op & 0x1) == DP_AUX_I2C_READ) {
if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->target_selected) {
@ -553,6 +544,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
aux_data_for_write = (val << 16);
} else
aux_data_for_write = (0xff << 16);
} else {
/* TODO
* We only support EDID reading from I2C_over_AUX. And
* we do not expect the index mode to be used. Right now
* the WRITE operation is ignored. It is good enough to
* support the gfx driver to do EDID access.
*/
}
/* write the return value in AUX_CH_DATA reg which includes:
* ACK of I2C_WRITE

View File

@ -115,7 +115,8 @@ initial_plane_vma(struct drm_i915_private *i915,
* important and we should probably use that space with FBC or other
* features.
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
!intel_fbdev_fb_prefer_stolen(&i915->drm, size)) {
drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");

View File

@ -33,6 +33,7 @@
#include "display/intel_display.h"
#include "display/intel_display_core.h"
#include "display/intel_display_regs.h"
#include "display/intel_pch.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
@ -124,16 +125,6 @@ static void glk_init_clock_gating(struct drm_i915_private *i915)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
static void ibx_init_clock_gating(struct drm_i915_private *i915)
{
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
intel_uncore_write(&i915->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
struct intel_display *display = dev_priv->display;
@ -202,42 +193,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *i915)
g4x_disable_trickle_feed(i915);
ibx_init_clock_gating(i915);
}
static void cpt_init_clock_gating(struct drm_i915_private *i915)
{
struct intel_display *display = i915->display;
enum pipe pipe;
u32 val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
intel_uncore_write(&i915->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN2, 0, DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(display, pipe) {
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (display->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
intel_uncore_write(&i915->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(display, pipe) {
intel_uncore_write(&i915->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
intel_pch_init_clock_gating(i915->display);
}
static void gen6_check_mch_setup(struct drm_i915_private *i915)
@ -305,28 +261,11 @@ static void gen6_init_clock_gating(struct drm_i915_private *i915)
g4x_disable_trickle_feed(i915);
cpt_init_clock_gating(i915);
intel_pch_init_clock_gating(i915->display);
gen6_check_mch_setup(i915);
}
static void lpt_init_clock_gating(struct drm_i915_private *i915)
{
struct intel_display *display = i915->display;
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
if (HAS_PCH_LPT_LP(display))
intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D,
0, PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
intel_uncore_rmw(&i915->uncore, TRANS_CHICKEN1(PIPE_A),
0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void gen8_set_l3sqc_credits(struct drm_i915_private *i915,
int general_prio_credits,
int high_prio_credits)
@ -360,20 +299,9 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
SGSI_SIDECLK_DIS);
}
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
struct intel_display *display = i915->display;
if (!HAS_PCH_CNP(display))
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D, 0, CNP_PWM_CGE_GATING_DISABLE);
}
static void cfl_init_clock_gating(struct drm_i915_private *i915)
{
cnp_init_clock_gating(i915);
intel_pch_init_clock_gating(i915->display);
gen9_init_clock_gating(i915);
/* WAC6entrylatency:cfl */
@ -466,7 +394,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR2_1,
0, KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
lpt_init_clock_gating(i915);
intel_pch_init_clock_gating(i915->display);
/* WaDisableDopClockGating:bdw
*
@ -500,7 +428,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
/* WaSwitchSolVfFArbitrationPriority:hsw */
intel_uncore_rmw(&i915->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL);
lpt_init_clock_gating(i915);
intel_pch_init_clock_gating(i915->display);
}
static void ivb_init_clock_gating(struct drm_i915_private *i915)
@ -545,7 +473,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
GEN6_MBC_SNPCR_MED);
if (!HAS_PCH_NOP(display))
cpt_init_clock_gating(i915);
intel_pch_init_clock_gating(display);
gen6_check_mch_setup(i915);
}

View File

@ -8,6 +8,8 @@
#include <linux/types.h>
#include <drm/intel/step.h>
struct drm_i915_private;
struct intel_step_info {
@ -19,61 +21,6 @@ struct intel_step_info {
u8 media_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
#define STEP_NAME_LIST(func) \
func(A0) \
func(A1) \
func(A2) \
func(A3) \
func(B0) \
func(B1) \
func(B2) \
func(B3) \
func(C0) \
func(C1) \
func(C2) \
func(C3) \
func(D0) \
func(D1) \
func(D2) \
func(D3) \
func(E0) \
func(E1) \
func(E2) \
func(E3) \
func(F0) \
func(F1) \
func(F2) \
func(F3) \
func(G0) \
func(G1) \
func(G2) \
func(G3) \
func(H0) \
func(H1) \
func(H2) \
func(H3) \
func(I0) \
func(I1) \
func(I2) \
func(I3) \
func(J0) \
func(J1) \
func(J2) \
func(J3)
/*
* Symbolic steppings that do not match the hardware. These are valid both as gt
* and display steppings as symbolic names.
*/
enum intel_step {
STEP_NONE = 0,
STEP_NAME_LIST(STEP_ENUM_VAL)
STEP_FUTURE,
STEP_FOREVER,
};
void intel_step_init(struct drm_i915_private *i915);
const char *intel_step_name(enum intel_step step);

View File

@ -399,6 +399,35 @@ static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
__gen6_gt_wait_for_thread_c0(uncore);
}
static void
gen6_check_for_fifo_debug(struct intel_uncore *uncore)
{
u32 fifodbg;
fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
}
static void
fw_domains_get_normal_fifo(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
gen6_check_for_fifo_debug(uncore);
fw_domains_get_normal(uncore, fw_domains);
}
static void
fw_domains_get_with_thread_status_fifo(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
gen6_check_for_fifo_debug(uncore);
fw_domains_get_with_thread_status(uncore, fw_domains);
}
static inline u32 fifo_free_entries(struct intel_uncore *uncore)
{
u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
@ -561,21 +590,6 @@ vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
return true;
}
static bool
gen6_check_for_fifo_debug(struct intel_uncore *uncore)
{
u32 fifodbg;
fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
return fifodbg;
}
static bool
check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
@ -592,9 +606,6 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
if (intel_uncore_has_dbg_unclaimed(uncore))
ret |= vlv_check_for_unclaimed_mmio(uncore);
if (intel_uncore_has_fifo(uncore))
ret |= gen6_check_for_fifo_debug(uncore);
return ret;
}
@ -611,6 +622,9 @@ static void forcewake_early_sanitize(struct intel_uncore *uncore,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
if (intel_uncore_has_fifo(uncore))
gen6_check_for_fifo_debug(uncore);
iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(uncore);
if (restore_forcewake) {
@ -2155,6 +2169,14 @@ static const struct intel_uncore_fw_get uncore_get_thread_status = {
.force_wake_get = fw_domains_get_with_thread_status
};
static const struct intel_uncore_fw_get uncore_get_normal_fifo = {
.force_wake_get = fw_domains_get_normal_fifo,
};
static const struct intel_uncore_fw_get uncore_get_thread_status_fifo = {
.force_wake_get = fw_domains_get_with_thread_status_fifo
};
static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
@ -2218,13 +2240,19 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
uncore->fw_get_funcs = &uncore_get_normal;
if (intel_uncore_has_fifo(uncore))
uncore->fw_get_funcs = &uncore_get_normal_fifo;
else
uncore->fw_get_funcs = &uncore_get_normal;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
uncore->fw_get_funcs = &uncore_get_thread_status;
if (intel_uncore_has_fifo(uncore))
uncore->fw_get_funcs = &uncore_get_thread_status_fifo;
else
uncore->fw_get_funcs = &uncore_get_thread_status;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(i915)) {
@ -2239,7 +2267,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
uncore->fw_get_funcs = &uncore_get_thread_status;
uncore->fw_get_funcs = &uncore_get_thread_status_fifo;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@ -2270,7 +2298,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE, FORCEWAKE_ACK);
}
} else if (GRAPHICS_VER(i915) == 6) {
uncore->fw_get_funcs = &uncore_get_thread_status;
uncore->fw_get_funcs = &uncore_get_thread_status_fifo;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}

View File

@ -272,67 +272,6 @@ static int live_forcewake_ops(void *arg)
return err;
}
static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
struct intel_gt *gt = arg;
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_private *i915 = gt->i915;
struct intel_display *display = i915->display;
unsigned long *valid;
u32 offset;
int err;
if (!HAS_FPGA_DBG_UNCLAIMED(display) &&
!IS_VALLEYVIEW(i915) &&
!IS_CHERRYVIEW(i915))
return 0;
/*
* This test may lockup the machine or cause GPU hangs afterwards.
*/
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
if (!valid)
return -ENOMEM;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
check_for_unclaimed_mmio(uncore);
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
intel_uncore_posting_read_fw(uncore, reg);
if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
err = 0;
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
check_for_unclaimed_mmio(uncore);
intel_uncore_posting_read_fw(uncore, reg);
if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
err = -EINVAL;
}
}
bitmap_free(valid);
return err;
}
static int live_fw_table(void *arg)
{
struct intel_gt *gt = arg;
@ -348,7 +287,6 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_fw_table),
SUBTEST(live_forcewake_ops),
SUBTEST(live_forcewake_domains),
};
return intel_gt_live_subtests(tests, to_gt(i915));

View File

@ -251,6 +251,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_dbuf_bw.o \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_de.o \
i915-display/intel_display.o \
i915-display/intel_display_conversion.o \
i915-display/intel_display_device.o \

View File

@ -6,9 +6,8 @@
#ifndef __INTEL_STEP_H__
#define __INTEL_STEP_H__
#include "xe_step.h"
#include "xe_step_types.h"
#define intel_step xe_step
#define intel_step_name xe_step_name
#endif /* __INTEL_STEP_H__ */

View File

@ -98,37 +98,6 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
static inline int
__intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
u32 mask, u32 value, unsigned int fast_timeout_us,
unsigned int slow_timeout_ms, u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
bool atomic;
/*
* Replicate the behavior from i915 here, in which sleep is not
* performed if slow_timeout_ms == 0. This is necessary because
* of some paths in display code where waits are done in atomic
* context.
*/
atomic = !slow_timeout_ms && fast_timeout_us > 0;
return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
out_value, atomic);
}
static inline int
__intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t i915_reg,
u32 mask, u32 value, unsigned int fast_timeout_us,
unsigned int slow_timeout_ms, u32 *out_value)
{
return __intel_wait_for_register(uncore, i915_reg, mask, value,
fast_timeout_us, slow_timeout_ms,
out_value);
}
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{

62
include/drm/intel/step.h Normal file
View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: MIT */
/* Copyright © 2026 Intel Corporation */
#ifndef __STEP_H__
#define __STEP_H__
#define STEP_ENUM_VAL(name) STEP_##name,
#define STEP_NAME_LIST(func) \
func(A0) \
func(A1) \
func(A2) \
func(A3) \
func(B0) \
func(B1) \
func(B2) \
func(B3) \
func(C0) \
func(C1) \
func(C2) \
func(C3) \
func(D0) \
func(D1) \
func(D2) \
func(D3) \
func(E0) \
func(E1) \
func(E2) \
func(E3) \
func(F0) \
func(F1) \
func(F2) \
func(F3) \
func(G0) \
func(G1) \
func(G2) \
func(G3) \
func(H0) \
func(H1) \
func(H2) \
func(H3) \
func(I0) \
func(I1) \
func(I2) \
func(I3) \
func(J0) \
func(J1) \
func(J2) \
func(J3)
/*
* Symbolic steppings that do not match the hardware. These are valid both as gt
* and display steppings as symbolic names.
*/
enum intel_step {
STEP_NONE = 0,
STEP_NAME_LIST(STEP_ENUM_VAL)
STEP_FUTURE,
STEP_FOREVER,
};
#endif /* __STEP_H__ */