drm next fixes for 7.1-rc1

amdgpu:
 - SMU 14 fixes
 - Partition fixes
 - SMUIO 15.x fix
 - SR-IOV fixes
 - JPEG fix
 - PSP 15.x fix
 - NBIF fix
 - Devcoredump fixes
 - DPC fix
 - RAS fixes
 - Aldebaran smu fix
 - IP discovery fix
 - SDMA 7.1 fix
 - Runtime pm fix
 - MES 12.1 fix
 - DML2 fixes
 - DCN 4.2 fixes
 - YCbCr fixes
 - Freesync fixes
 - ISM fixes
 - Overlay cursor fix
 - DC FP fixes
 - UserQ locking fixes
 - DC idle state manager fix
 - ASPM fix
 - GPUVM SVM fix
 - DCE 6 fix
 
 amdkfd:
 - Fix memory clear handling
 - num_of_nodes bounds check fix
 
 i915:
 - Fix uninitialized variable in the alignment loop [psr]
 
 rcar-du:
 - fix NULL-ptr crash
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmnrB3cACgkQDHTzWXnE
 hr4keA//XIGZt8aON+0S7gekYDzf3PQzWPitLIDTad4MEZFe9rxnsE3Jbi7DMSBL
 5d5ZKiJkec5NfTbKhOgv1mxJ/Q/fmTTk/o7909VBdah/ga+3QlbyrQTjPQLSO4CV
 LmysaVtY6yr30N3p/xaGGUl9gLbkobZkUK9UWul8JJc/CE2uJfUBDxZ/QG/50aXK
 1w+82X6JWyJmke7MNKkYE0r2lO5INVDYJxADSLORVZfg4mrAyjZ12olL5OG7UjgV
 a/rEA74GFCFHOMBPlqjrIYvVQ8skv7Ft+SeD0/rTBxccOafBstORChBx+LkaVYET
 l7wcYSl6Ie0fOmJ+LlBcqYH9Briq9+zLapuiqXLzg8wzGbTTrTMHFnMdTBT3aHjx
 EmNXPB+H7pqheT8GpdugFXpSDsiKdhoHqTULTHtfqL16g3sLr2xmKuD9tMioIqqm
 bZCV7T1sAr2aiDlcNtRa961yHix7M64g0IjzcUCciu7YFSYb0Pkg2gmHPyejCdbl
 nFdZnD+4u86WmzCuTafmz6ikPNyKuLHQK/sEuOBWckwUS/0ei5f0YHhK4EHWYK+O
 3lNUp2VjH2K9wNGpnlcrjsk9cVTc0Pwwn1jV4JjxN6rXjXgoJz0Oyeqmvhxrp2AU
 negIWKsio6qqV1/EF2pt8O78y2xeOcpFVxVvnNrTn87EeP2g4WA=
 =wEk4
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2026-04-24' of https://gitlab.freedesktop.org/drm/kernel

Pull drm next fixes from Dave Airlie:
 "This is the first of two fixes for the merge PRs, the other is based
  on 7.0 branch. This mostly AMD fixes, a couple of weeks of backlog
  built up and this weeks. The main complaint I've seen is some boot
  warnings around the FP code handling which this should fix. Otherwise
  a single rcar-du and a single i915 fix.

  amdgpu:
   - SMU 14 fixes
   - Partition fixes
   - SMUIO 15.x fix
   - SR-IOV fixes
   - JPEG fix
   - PSP 15.x fix
   - NBIF fix
   - Devcoredump fixes
   - DPC fix
   - RAS fixes
   - Aldebaran smu fix
   - IP discovery fix
   - SDMA 7.1 fix
   - Runtime pm fix
   - MES 12.1 fix
   - DML2 fixes
   - DCN 4.2 fixes
   - YCbCr fixes
   - Freesync fixes
   - ISM fixes
   - Overlay cursor fix
   - DC FP fixes
   - UserQ locking fixes
   - DC idle state manager fix
   - ASPM fix
   - GPUVM SVM fix
   - DCE 6 fix

  amdkfd:
   - Fix memory clear handling
   - num_of_nodes bounds check fix

  i915:
   - Fix uninitialized variable in the alignment loop [psr]

  rcar-du:
   - fix NULL-ptr crash"

* tag 'drm-next-2026-04-24' of https://gitlab.freedesktop.org/drm/kernel: (75 commits)
  drm/amdkfd: Add upper bound check for num_of_nodes
  drm: rcar-du: Fix crash when no CMM is available
  drm/amd/display: Disable 10-bit truncation and dithering on DCE 6.x
  drm/amdgpu: OR init_pte_flags into invalid leaf PTE updates
  drm/amd: Adjust ASPM support quirk to cover more Intel hosts
  drm/amd/display: Undo accidental fix revert in amdgpu_dm_ism.c
  drm/i915/psr: Init variable to avoid early exit from et alignment loop
  drm/amdgpu: drop userq fence driver refs out of fence process()
  drm/amdgpu/userq: unpin and unref doorbell and wptr outside mutex
  drm/amdgpu/userq: use pm_runtime_resume_and_get and fix err handling
  drm/amdgpu/userq: unmap_helper dont return the queue state
  drm/amdgpu/userq: unmap is to be called before freeing doorbell/wptr bo
  drm/amdgpu/userq: hold root bo lock in caller of input_va_validate
  drm/amdgpu/userq: caller to take reserv lock for vas_list_cleanup
  drm/amdgpu/userq: create_mqd does not need userq_mutex
  drm/amdgpu/userq: dont lock root bo with userq_mutex held
  drm/amdgpu/userq: fix kerneldoc for amdgpu_userq_ensure_ev_fence
  drm/amdgpu/userq: clean the VA mapping list for failed queue creation
  drm/amdgpu/userq: avoid uneccessary locking in amdgpu_userq_create
  drm/amd/display: Fix ISM teardown crash from NULL dc dereference
  ...
This commit is contained in:
Linus Torvalds 2026-04-24 11:33:23 -07:00
commit 92c4c9fdc8
98 changed files with 1380 additions and 535 deletions

View File

@ -1735,7 +1735,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
} else {
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE |
AMDGPU_GEM_CREATE_VRAM_CLEARED;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;

View File

@ -866,6 +866,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
if (dret) {
amdgpu_connector->detected_by_load = false;
drm_edid_free(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
@ -882,6 +883,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
*/
if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) {
drm_edid_free(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
@ -977,6 +979,7 @@ static void amdgpu_connector_shared_ddc(enum drm_connector_status *status,
if (!amdgpu_display_hpd_sense(adev,
amdgpu_connector->hpd.hpd)) {
drm_edid_free(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
*status = connector_status_disconnected;
}
}
@ -1046,6 +1049,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (dret) {
amdgpu_connector->detected_by_load = false;
drm_edid_free(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
@ -1062,6 +1066,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
*/
if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) {
drm_edid_free(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
@ -1412,6 +1417,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
}
drm_edid_free(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {

View File

@ -32,6 +32,8 @@ static const guid_t BOOT = BOOT_TYPE;
static const guid_t CRASHDUMP = AMD_CRASHDUMP;
static const guid_t RUNTIME = AMD_GPU_NONSTANDARD_ERROR;
#define CPER_SIGNATURE_SZ (sizeof(((struct cper_hdr *)0)->signature))
static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size)
{
hdr->record_length += size;
@ -425,23 +427,40 @@ int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos)
{
struct cper_hdr *chdr;
char signature[CPER_SIGNATURE_SZ];
chdr = (struct cper_hdr *)&(ring->ring[pos]);
return strcmp(chdr->signature, "CPER") ? false : true;
if ((pos << 2) >= ring->ring_size)
return false;
if ((pos << 2) + CPER_SIGNATURE_SZ <= ring->ring_size) {
memcpy(signature, &ring->ring[pos], CPER_SIGNATURE_SZ);
} else {
u32 chunk = ring->ring_size - (pos << 2);
memcpy(signature, &ring->ring[pos], chunk);
memcpy(signature + chunk, ring->ring, CPER_SIGNATURE_SZ - chunk);
}
return !memcmp(signature, "CPER", CPER_SIGNATURE_SZ);
}
static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
{
struct cper_hdr *chdr;
struct cper_hdr chdr;
u64 p;
u32 chunk, rec_len = 0;
chdr = (struct cper_hdr *)&(ring->ring[pos]);
chunk = ring->ring_size - (pos << 2);
if (!strcmp(chdr->signature, "CPER")) {
rec_len = chdr->record_length;
if (amdgpu_cper_is_hdr(ring, pos)) {
if (chunk >= sizeof(chdr)) {
memcpy(&chdr, &ring->ring[pos], sizeof(chdr));
} else {
memcpy(&chdr, &ring->ring[pos], chunk);
memcpy((u8 *)&chdr + chunk, ring->ring, sizeof(chdr) - chunk);
}
rec_len = chdr.record_length;
goto calc;
}
@ -450,8 +469,7 @@ static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
goto calc;
for (p = pos + 1; p <= ring->buf_mask; p++) {
chdr = (struct cper_hdr *)&(ring->ring[p]);
if (!strcmp(chdr->signature, "CPER")) {
if (amdgpu_cper_is_hdr(ring, p)) {
rec_len = (p - pos) << 2;
goto calc;
}

View File

@ -464,6 +464,9 @@ static void amdgpu_devcoredump_deferred_work(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, typeof(*adev), coredump_work);
struct amdgpu_coredump_info *coredump = adev->coredump;
if (!coredump)
goto end;
/* Do a one-time preparation of the coredump output because
* repeatingly calling drm_coredump_printer is very slow.
*/
@ -499,7 +502,7 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
int i, off, idx;
/* No need to generate a new coredump if there's one in progress already. */
if (work_pending(&adev->coredump_work))
if (work_busy(&adev->coredump_work))
return;
if (job && job->pasid)
@ -511,7 +514,6 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
coredump->pasid = job->pasid;
if (job && job->pasid) {
struct amdgpu_task_info *ti;
@ -521,6 +523,7 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
coredump->reset_task_info = *ti;
amdgpu_vm_put_task_info(ti);
}
coredump->pasid = job->pasid;
coredump->num_ibs = job->num_ibs;
for (i = 0; i < job->num_ibs; ++i) {
coredump->ibs[i].gpu_addr = job->ibs[i].gpu_addr;
@ -563,7 +566,7 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
coredump->rings[idx].offset = off;
memcpy(&coredump->rings_dw[off], ring->ring, ring->ring_size);
off += ring->ring_size;
off += ring->ring_size / 4;
idx++;
}
coredump->num_rings = idx;

View File

@ -1334,18 +1334,15 @@ static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
return false;
if (c->x86 == 6 &&
adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
if (c->x86_vendor == X86_VENDOR_INTEL) {
switch (c->x86_model) {
case VFM_MODEL(INTEL_ALDERLAKE):
case VFM_MODEL(INTEL_ALDERLAKE_L):
case VFM_MODEL(INTEL_RAPTORLAKE):
case VFM_MODEL(INTEL_RAPTORLAKE_P):
case VFM_MODEL(INTEL_RAPTORLAKE_S):
case VFM_MODEL(INTEL_TIGERLAKE):
case VFM_MODEL(INTEL_TIGERLAKE_L):
return true;
default:
return false;
@ -5518,8 +5515,6 @@ static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
if (amdgpu_reset_in_dpc(adev))
tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
if (!list_is_first(&adev->reset_list, device_list))
list_rotate_to_front(&adev->reset_list, device_list);
@ -6291,6 +6286,9 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_reset_set_dpc_status(adev, true);
mutex_lock(&hive->hive_lock);
} else {
if (amdgpu_device_bus_status_check(adev))
amdgpu_reset_set_dpc_status(adev, true);
}
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
@ -6411,6 +6409,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
tmp_adev->pcie_reset_ctx.in_link_reset = true;
} else {
adev->pcie_reset_ctx.in_link_reset = true;
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
}
@ -6467,9 +6466,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
tmp_adev->pcie_reset_ctx.in_link_reset = false;
list_add_tail(&tmp_adev->reset_list, &device_list);
}
} else
} else {
adev->pcie_reset_ctx.in_link_reset = false;
list_add_tail(&adev->reset_list, &device_list);
}
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
amdgpu_device_recovery_put_reset_lock(adev, &device_list);

View File

@ -535,10 +535,11 @@ static int amdgpu_discovery_get_table_info(struct amdgpu_device *adev,
*info = &bhdrv2->table_list[table_id];
break;
case 1:
case 0:
*info = &bhdr->table_list[table_id];
break;
default:
dev_err(adev->dev, "Invalid ip discovery table version\n");
dev_err(adev->dev, "Invalid ip discovery table version %d\n",bhdr->version_major);
return -EINVAL;
}

View File

@ -2013,6 +2013,8 @@ static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
device_remove_file(adev->dev, &dev_attr_compute_partition_mem_alloc_mode);
if (xcp_switch_supported)
device_remove_file(adev->dev,
&dev_attr_available_compute_partition);

View File

@ -239,13 +239,12 @@ int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
u64 size;
int r = 0;
/* Caller must hold vm->root.bo reservation */
dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
return r;
va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
if (!va_map) {
r = -EINVAL;
@ -255,13 +254,11 @@ int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
if (user_addr >= va_map->start &&
va_map->last - user_addr + 1 >= size) {
amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
amdgpu_bo_unreserve(vm->root.bo);
return 0;
}
r = -EINVAL;
out_err:
amdgpu_bo_unreserve(vm->root.bo);
return r;
}
@ -270,15 +267,13 @@ static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
struct amdgpu_bo_va_mapping *mapping;
bool r;
if (amdgpu_bo_reserve(vm->root.bo, false))
return false;
dma_resv_assert_held(vm->root.bo->tbo.base.resv);
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
r = true;
else
r = false;
amdgpu_bo_unreserve(vm->root.bo);
return r;
}
@ -314,25 +309,21 @@ static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
{
struct amdgpu_userq_va_cursor *va_cursor, *tmp;
struct amdgpu_bo_va_mapping *mapping;
int r;
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
if (r)
return r;
/* Caller must hold vm->root.bo reservation */
dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
if (!mapping) {
r = -EINVAL;
goto err;
return -EINVAL;
}
dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
queue, va_cursor->gpu_addr);
amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
}
err:
amdgpu_bo_unreserve(queue->vm->root.bo);
return r;
return 0;
}
static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
@ -446,8 +437,6 @@ static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
/* Wait for mode-1 reset to complete */
down_read(&adev->reset_domain->sem);
/* Drop the userq reference. */
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
uq_funcs->mqd_destroy(queue);
/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
@ -455,11 +444,19 @@ static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
queue->fence_drv = NULL;
queue->userq_mgr = NULL;
list_del(&queue->userq_va_list);
kfree(queue);
up_read(&adev->reset_domain->sem);
}
/**
* amdgpu_userq_ensure_ev_fence - ensure a valid, unsignaled eviction fence exists
* @uq_mgr: the usermode queue manager for this process
* @evf_mgr: the eviction fence manager to check and rearm
*
* Ensures that a valid and not yet signaled eviction fence is attached to the
* usermode queue before any queue operations proceed. If it is signalled, then
* rearm a new eviction fence.
*/
void
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr)
@ -619,6 +616,9 @@ static int
amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_vm *vm = &fpriv->vm;
int r = 0;
cancel_delayed_work_sync(&uq_mgr->resume_work);
@ -626,38 +626,44 @@ amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_que
/* Cancel any pending hang detection work and cleanup */
cancel_delayed_work_sync(&queue->hang_detect_work);
r = amdgpu_bo_reserve(vm->root.bo, false);
if (r) {
drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n");
return r;
}
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
amdgpu_bo_unreserve(vm->root.bo);
mutex_lock(&uq_mgr->userq_mutex);
queue->hang_detect_fence = NULL;
amdgpu_userq_wait_for_last_fence(queue);
r = amdgpu_bo_reserve(queue->db_obj.obj, true);
if (!r) {
amdgpu_bo_unpin(queue->db_obj.obj);
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
r = amdgpu_bo_reserve(queue->wptr_obj.obj, true);
if (!r) {
amdgpu_bo_unpin(queue->wptr_obj.obj);
amdgpu_bo_unreserve(queue->wptr_obj.obj);
}
amdgpu_bo_unref(&queue->wptr_obj.obj);
atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
#if defined(CONFIG_DEBUG_FS)
debugfs_remove_recursive(queue->debugfs_queue);
#endif
amdgpu_userq_detect_and_reset_queues(uq_mgr);
r = amdgpu_userq_unmap_helper(queue);
/*TODO: It requires a reset for userq hw unmap error*/
if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
if (r) {
drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
queue->state = AMDGPU_USERQ_STATE_HUNG;
}
atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
amdgpu_userq_cleanup(queue);
mutex_unlock(&uq_mgr->userq_mutex);
amdgpu_bo_reserve(queue->db_obj.obj, true);
amdgpu_bo_unpin(queue->db_obj.obj);
amdgpu_bo_unreserve(queue->db_obj.obj);
amdgpu_bo_unref(&queue->db_obj.obj);
amdgpu_bo_reserve(queue->wptr_obj.obj, true);
amdgpu_bo_unpin(queue->wptr_obj.obj);
amdgpu_bo_unreserve(queue->wptr_obj.obj);
amdgpu_bo_unref(&queue->wptr_obj.obj);
kfree(queue);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@ -730,35 +736,25 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (r)
return r;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
r = pm_runtime_resume_and_get(adev_to_drm(adev)->dev);
if (r < 0) {
drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
drm_file_err(uq_mgr->file, "pm_runtime_resume_and_get() failed for userqueue create\n");
return r;
}
/*
* There could be a situation that we are creating a new queue while
* the other queues under this UQ_mgr are suspended. So if there is any
* resume work pending, wait for it to get done.
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
if (!uq_funcs) {
drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
args->in.ip_type);
r = -EINVAL;
goto unlock;
goto err_pm_runtime;
}
queue = kzalloc_obj(struct amdgpu_usermode_queue);
if (!queue) {
drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
r = -ENOMEM;
goto unlock;
goto err_pm_runtime;
}
INIT_LIST_HEAD(&queue->userq_va_list);
@ -773,20 +769,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
db_info.doorbell_offset = args->in.doorbell_offset;
queue->userq_mgr = uq_mgr;
/* Validate the userq virtual address.*/
r = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
if (r)
goto free_queue;
if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
r = -EINVAL;
goto free_queue;
amdgpu_bo_unreserve(fpriv->vm.root.bo);
goto clean_mapping;
}
amdgpu_bo_unreserve(fpriv->vm.root.bo);
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
r = -EINVAL;
goto free_queue;
goto clean_mapping;
}
queue->doorbell_index = index;
@ -794,7 +797,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_fence_driver_alloc(adev, &queue->fence_drv);
if (r) {
drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
goto free_queue;
goto clean_mapping;
}
r = uq_funcs->mqd_create(queue, &args->in);
@ -803,6 +806,8 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto clean_fence_driver;
}
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
((queue->queue_type == AMDGPU_HW_IP_GFX) ||
@ -814,7 +819,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
down_read(&adev->reset_domain->sem);
goto clean_mqd;
}
}
@ -830,9 +834,8 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (r) {
if (!skip_map_queue)
amdgpu_userq_unmap_helper(queue);
r = -ENOMEM;
goto clean_mqd;
goto clean_reset_domain;
}
r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
@ -840,8 +843,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
xa_erase(&uq_mgr->userq_xa, qid);
if (!skip_map_queue)
amdgpu_userq_unmap_helper(queue);
goto clean_mqd;
goto clean_reset_domain;
}
up_read(&adev->reset_domain->sem);
@ -853,16 +855,21 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
mutex_unlock(&uq_mgr->userq_mutex);
return 0;
clean_mqd:
uq_funcs->mqd_destroy(queue);
clean_reset_domain:
up_read(&adev->reset_domain->sem);
clean_mqd:
mutex_unlock(&uq_mgr->userq_mutex);
uq_funcs->mqd_destroy(queue);
clean_fence_driver:
amdgpu_userq_fence_driver_free(queue);
clean_mapping:
amdgpu_bo_reserve(fpriv->vm.root.bo, true);
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
amdgpu_bo_unreserve(fpriv->vm.root.bo);
free_queue:
kfree(queue);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
err_pm_runtime:
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@ -992,10 +999,16 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_usermode_queue *queue;
unsigned long queue_id;
int ret = 0, r;
if (amdgpu_bo_reserve(vm->root.bo, false))
return false;
mutex_lock(&uq_mgr->userq_mutex);
/* Resume all the queues for this process */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
@ -1013,6 +1026,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
}
mutex_unlock(&uq_mgr->userq_mutex);
amdgpu_bo_unreserve(vm->root.bo);
if (ret)
drm_file_err(uq_mgr->file,

View File

@ -145,13 +145,22 @@ amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
amdgpu_userq_fence_driver_put(userq->fence_drv);
}
static void
amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence *userq_fence)
{
unsigned long i;
for (i = 0; i < userq_fence->fence_drv_array_count; i++)
amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
userq_fence->fence_drv_array_count = 0;
}
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
{
struct amdgpu_userq_fence *userq_fence, *tmp;
LIST_HEAD(to_be_signaled);
struct dma_fence *fence;
unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
@ -159,21 +168,26 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
if (rptr < fence->seqno)
list_for_each_entry(userq_fence, &fence_drv->fences, link) {
if (rptr < userq_fence->base.seqno)
break;
}
list_cut_before(&to_be_signaled, &fence_drv->fences,
&userq_fence->link);
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
list_for_each_entry_safe(userq_fence, tmp, &to_be_signaled, link) {
fence = &userq_fence->base;
list_del_init(&userq_fence->link);
dma_fence_signal(fence);
for (i = 0; i < userq_fence->fence_drv_array_count; i++)
amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
list_del(&userq_fence->link);
/* Drop fence_drv_array outside fence_list_lock
* to avoid the recursion lock.
*/
amdgpu_userq_fence_put_fence_drv_array(userq_fence);
dma_fence_put(fence);
}
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
@ -228,6 +242,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
struct amdgpu_userq_fence_driver *fence_drv;
struct dma_fence *fence;
unsigned long flags;
bool signaled = false;
fence_drv = userq->fence_drv;
if (!fence_drv)
@ -274,13 +289,17 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
if (!dma_fence_is_signaled(fence))
if (!dma_fence_is_signaled(fence)) {
list_add_tail(&userq_fence->link, &fence_drv->fences);
else
} else {
signaled = true;
dma_fence_put(fence);
}
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
if (signaled)
amdgpu_userq_fence_put_fence_drv_array(userq_fence);
*f = fence;
return 0;

View File

@ -34,6 +34,7 @@
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vcn.h"
#include "amdgpu_reset.h"
#include "soc15d.h"
/* Firmware Names */
@ -361,7 +362,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
if (in_ras_intr || amdgpu_reset_in_dpc(adev))
return 0;
return amdgpu_vcn_save_vcpu_bo_inst(adev, i);

View File

@ -21,6 +21,8 @@
*/
#include "amdgpu_vm.h"
#include "amdgpu.h"
#include "amdgpu_reset.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
@ -108,11 +110,19 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
struct amdgpu_device *adev = p->adev;
if (p->needs_flush)
atomic64_inc(&p->vm->tlb_seq);
mb();
amdgpu_device_flush_hdp(p->adev, NULL);
/* A reset flushed the HDP anyway, so that here can be skipped when a reset is ongoing */
if (!down_read_trylock(&adev->reset_domain->sem))
return 0;
amdgpu_device_flush_hdp(adev, NULL);
up_read(&adev->reset_domain->sem);
return 0;
}

View File

@ -693,8 +693,11 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
!(flags & AMDGPU_PTE_VALID) &&
!(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
/* Workaround for fault priority problem on GMC9 */
flags |= AMDGPU_PTE_EXECUTABLE;
/* Workaround for fault priority problem on GMC9 and GFX12,
* EXECUTABLE for GMC9 fault priority and init_pte_flags
* (e.g. AMDGPU_PTE_IS_PTE on GFX12)
*/
flags |= AMDGPU_PTE_EXECUTABLE | adev->gmc.init_pte_flags;
}
/*

View File

@ -64,6 +64,11 @@
#define regPC_CONFIG_CNTL_1 0x194d
#define regPC_CONFIG_CNTL_1_BASE_IDX 1
#define regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0 0x0030
#define regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0_BASE_IDX 1
#define regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0 0x0031
#define regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0_BASE_IDX 1
#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
@ -5234,11 +5239,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, true);
} else {
preempt_disable();
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
if (amdgpu_ip_version(adev, SMUIO_HWIP, 0) < IP_VERSION(15, 0, 0)) {
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_UPPER);
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_LOWER);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_UPPER);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_LOWER);
} else {
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0);
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0,
regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0);
}
preempt_enable();
}
clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);

View File

@ -736,15 +736,35 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
*/
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
struct amdgpu_device *adev = ring->adev;
if (!amdgpu_sriov_vf(adev)) {
int jpeg_inst = GET_INST(JPEG, ring->me);
uint32_t value = 0x80004000; /* default DS14 */
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
/* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
amdgpu_ring_write(ring, 0x69004);
value = 0x80010000;
break;
case IP_VERSION(4, 2, 0):
amdgpu_ring_write(ring, 0x60804);
if (jpeg_inst & 1)
value = 0x80010000;
break;
default:
amdgpu_ring_write(ring, 0x62a04);
break;
}
amdgpu_ring_write(ring,
PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x80004000);
amdgpu_ring_write(ring, value);
}
}
@ -757,15 +777,35 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
*/
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
struct amdgpu_device *adev = ring->adev;
if (!amdgpu_sriov_vf(adev)) {
int jpeg_inst = GET_INST(JPEG, ring->me);
uint32_t value = 0x00004000; /* default DS14 */
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x62a04);
/* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
amdgpu_ring_write(ring, 0x69004);
value = 0x00010000;
break;
case IP_VERSION(4, 2, 0):
amdgpu_ring_write(ring, 0x60804);
if (jpeg_inst & 1)
value = 0x00010000;
break;
default:
amdgpu_ring_write(ring, 0x62a04);
break;
}
amdgpu_ring_write(ring,
PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x00004000);
amdgpu_ring_write(ring, value);
}
}

View File

@ -322,8 +322,14 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
goto free_mqd;
}
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
if (r) {
kfree(compute_mqd);
goto free_mqd;
}
r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va,
2048);
amdgpu_bo_unreserve(queue->vm->root.bo);
if (r) {
kfree(compute_mqd);
goto free_mqd;
@ -365,14 +371,22 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
shadow_info.shadow_size);
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
if (r) {
kfree(mqd_gfx_v11);
goto free_mqd;
}
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
shadow_info.shadow_size);
if (r) {
amdgpu_bo_unreserve(queue->vm->root.bo);
kfree(mqd_gfx_v11);
goto free_mqd;
}
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va,
shadow_info.csa_size);
amdgpu_bo_unreserve(queue->vm->root.bo);
if (r) {
kfree(mqd_gfx_v11);
goto free_mqd;
@ -394,8 +408,15 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
r = -ENOMEM;
goto free_mqd;
}
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
if (r) {
kfree(mqd_sdma_v11);
goto free_mqd;
}
r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va,
32);
amdgpu_bo_unreserve(queue->vm->root.bo);
if (r) {
kfree(mqd_sdma_v11);
goto free_mqd;

View File

@ -2028,7 +2028,7 @@ static int mes_v12_1_test_ring(struct amdgpu_device *adev, int xcc_id,
int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
int sdma_ring_align = 0x10, compute_ring_align = 0x100;
uint32_t tmp, xcc_offset;
int r = 0, i, wptr = 0;
int r = 0, i, j, wptr = 0;
if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
if (!adev->mes.enable_coop_mode) {
@ -2077,11 +2077,11 @@ static int mes_v12_1_test_ring(struct amdgpu_device *adev, int xcc_id,
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
regSCRATCH_REG0);
} else {
for (i = 0; i < num_xcc; i++) {
if (xcc_id != adev->mes.master_xcc_ids[i])
for (j = 0; j < num_xcc; j++) {
if (xcc_id != adev->mes.master_xcc_ids[j])
continue;
tmp = RREG32_SOC15(GC, GET_INST(GC, i),
tmp = RREG32_SOC15(GC, GET_INST(GC, j),
regSCRATCH_REG0);
if (tmp != 0xDEADBEEF)
break;

View File

@ -54,6 +54,8 @@
#define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL_nbif_4_10_BASE_IDX 3
#define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL1_nbif_4_10 0x4f0af6
#define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL1_nbif_4_10_BASE_IDX 3
#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10 0x0021
#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10_BASE_IDX 2
static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
{
@ -65,7 +67,12 @@ static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbif_v6_3_1_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
u32 tmp;
if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4))
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10);
else
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;

View File

@ -32,6 +32,7 @@
#include "mp/mp_15_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_15_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_15_0_0_ta.bin");
static int psp_v15_0_0_init_microcode(struct psp_context *psp)
{

View File

@ -1268,6 +1268,18 @@ static int sdma_v7_1_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
switch (amdgpu_user_queue) {
case -1:
default:
adev->sdma.no_user_submission = true;
adev->sdma.disable_uq = true;
break;
case 0:
adev->sdma.no_user_submission = false;
adev->sdma.disable_uq = true;
break;
}
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to init sdma firmware!\n");

View File

@ -776,6 +776,9 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
goto out_unlock;
}
if (args->num_of_nodes > kfd_topology_get_num_devices())
return -EINVAL;
/* Fill in process-aperture information for all available
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user

View File

@ -1191,6 +1191,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
return NULL;
}
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev);
uint32_t kfd_topology_get_num_devices(void);
int kfd_numa_node_to_apic_id(int numa_node_id);
uint32_t kfd_gpu_node_num(void);

View File

@ -2297,6 +2297,17 @@ int kfd_topology_remove_device(struct kfd_node *gpu)
return res;
}
uint32_t kfd_topology_get_num_devices(void)
{
uint32_t num_devices;
down_read(&topology_lock);
num_devices = sys_props.num_devices;
up_read(&topology_lock);
return num_devices;
}
/* kfd_topology_enum_kfd_devices - Enumerate through all devices in KFD
* topology. If GPU device is found @idx, then valid kfd_dev pointer is
* returned through @kdev

View File

@ -95,6 +95,7 @@
#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_colorop.h>
#include <drm/drm_gem_atomic_helper.h>
#include <media/cec-notifier.h>
@ -2255,6 +2256,10 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.idle_workqueue = NULL;
}
/* Disable ISM before dc_destroy() invalidates dm->dc */
scoped_guard(mutex, &adev->dm.dc_lock)
amdgpu_dm_ism_disable(&adev->dm);
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
@ -3994,7 +3999,7 @@ void amdgpu_dm_update_connector_after_detect(
if (sink) {
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
amdgpu_dm_update_freesync_caps(connector, NULL, true);
/*
* retain and release below are used to
* bump up refcount for sink because the link doesn't point
@ -4006,9 +4011,9 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
amdgpu_dm_update_freesync_caps(connector,
aconnector->drm_edid);
aconnector->drm_edid, true);
} else {
amdgpu_dm_update_freesync_caps(connector, NULL);
amdgpu_dm_update_freesync_caps(connector, NULL, true);
if (!aconnector->dc_sink) {
aconnector->dc_sink = aconnector->dc_em_sink;
dc_sink_retain(aconnector->dc_sink);
@ -4052,7 +4057,7 @@ void amdgpu_dm_update_connector_after_detect(
* If yes, put it here.
*/
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
amdgpu_dm_update_freesync_caps(connector, NULL, true);
dc_sink_release(aconnector->dc_sink);
}
@ -4085,13 +4090,13 @@ void amdgpu_dm_update_connector_after_detect(
"failed to create aconnector->requested_timing\n");
}
amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid, true);
update_connector_ext_caps(aconnector);
dm_set_panel_type(aconnector);
} else {
hdmi_cec_unset_edid(aconnector);
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
amdgpu_dm_update_freesync_caps(connector, NULL, true);
aconnector->num_modes = 0;
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
@ -8855,7 +8860,7 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* drm_edid_connector_add_modes() and need to be
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, drm_edid);
amdgpu_dm_update_freesync_caps(connector, drm_edid, false);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@ -12329,6 +12334,38 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
* available.
*/
/**
* dm_plane_color_pipeline_active() - Check if a plane's color pipeline active.
* @state: DRM atomic state
* @plane: DRM plane to check
* @use_old: if true, inspect the old colorop states; otherwise the new ones
*
* A color pipeline may be selected (color_pipeline != NULL) but still is
* inactive if every colorop in the chain is bypassed. Only return
* true when at least one colorop has bypass == false, meaning the cursor
* would be subjected to the transformation in native mode.
*
* Return: true if the pipeline modifies pixels, false otherwise.
*/
static bool dm_plane_color_pipeline_active(struct drm_atomic_state *state,
struct drm_plane *plane,
bool use_old)
{
struct drm_colorop *colorop;
struct drm_colorop_state *old_colorop_state, *new_colorop_state;
int i;
for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) {
struct drm_colorop_state *cstate = use_old ? old_colorop_state : new_colorop_state;
if (cstate->colorop->plane != plane)
continue;
if (!cstate->bypass)
return true;
}
return false;
}
/**
* dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc
* @adev: amdgpu device
@ -12340,8 +12377,8 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
* the dm_crtc_state.
*
* The cursor should be enabled in overlay mode if there exists an underlying
* plane - on which the cursor may be blended - that is either YUV formatted, or
* scaled differently from the cursor.
* plane - on which the cursor may be blended - that is either YUV formatted,
* scaled differently from the cursor, or has a color pipeline active.
*
* Since zpos info is required, drm_atomic_normalize_zpos must be called before
* calling this function.
@ -12379,7 +12416,7 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
/*
* Cursor mode can change if a plane's format changes, scale changes, is
* enabled/disabled, or z-order changes.
* enabled/disabled, z-order changes, or color management properties change.
*/
for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) {
int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
@ -12404,6 +12441,12 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
consider_mode_change = true;
break;
}
if (dm_plane_color_pipeline_active(state, plane, true) !=
dm_plane_color_pipeline_active(state, plane, false)) {
consider_mode_change = true;
break;
}
}
if (!consider_mode_change && !crtc_state->zpos_changed)
@ -12444,6 +12487,12 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
/* Underlying plane has an active color pipeline - cursor would be transformed */
if (dm_plane_color_pipeline_active(state, plane, false)) {
*cursor_mode = DM_CURSOR_OVERLAY_MODE;
return 0;
}
dm_get_plane_scale(plane_state,
&underlying_scale_w, &underlying_scale_h);
dm_get_plane_scale(cursor_state,
@ -12823,7 +12872,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
} else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) {
drm_dbg_driver(crtc->dev,
"[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n",
"[CRTC:%d:%s] Cannot enable native cursor due to scaling, YUV, or color pipeline restrictions\n",
crtc->base.id, crtc->name);
ret = -EINVAL;
goto fail;
@ -13088,6 +13137,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
vsdb->freesync_mccs_vcp_code = output->amd_vsdb.freesync_mccs_vcp_code;
} else {
drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
@ -13122,6 +13172,8 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
vsdb_info->amd_vsdb_version = version;
vsdb_info->min_refresh_rate_hz = min_rate;
vsdb_info->max_refresh_rate_hz = max_rate;
/* Not enabled on DMCU*/
vsdb_info->freesync_mccs_vcp_code = 0;
return true;
}
/* not amd vsdb */
@ -13260,6 +13312,10 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
*
* @connector: Connector to query.
* @drm_edid: DRM EDID from monitor
* @do_mccs: Controls whether MCCS (Monitor Control Command Set) over
* DDC (Display Data Channel) transactions are performed. When true,
* the driver queries the monitor to get or update additional FreeSync
* capability information. When false, these transactions are skipped.
*
* Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
* track of some of the display information in the internal data struct used by
@ -13267,7 +13323,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
* FreeSync parameters.
*/
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
const struct drm_edid *drm_edid)
const struct drm_edid *drm_edid, bool do_mccs)
{
int i = 0;
struct amdgpu_dm_connector *amdgpu_dm_connector =
@ -13333,14 +13389,19 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
} else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
if (i >= 0) {
amdgpu_dm_connector->vsdb_info = vsdb_info;
sink->edid_caps.freesync_vcp_code = vsdb_info.freesync_mccs_vcp_code;
connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
if (vsdb_info.freesync_supported) {
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
}
}
}
@ -13349,22 +13410,38 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
amdgpu_dm_connector->pack_sdp_v1_3 = true;
amdgpu_dm_connector->as_type = as_type;
if (i >= 0) {
amdgpu_dm_connector->vsdb_info = vsdb_info;
sink->edid_caps.freesync_vcp_code = vsdb_info.freesync_mccs_vcp_code;
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
if (vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
amdgpu_dm_connector->pack_sdp_v1_3 = true;
amdgpu_dm_connector->as_type = as_type;
connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
}
}
}
/* Handle MCCS */
if (do_mccs)
dm_helpers_read_mccs_caps(adev->dm.dc->ctx, amdgpu_dm_connector->dc_link, sink);
if ((sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) &&
(!sink->edid_caps.freesync_vcp_code ||
(sink->edid_caps.freesync_vcp_code && !sink->mccs_caps.freesync_supported)))
freesync_capable = false;
if (do_mccs && sink->mccs_caps.freesync_supported && freesync_capable)
dm_helpers_mccs_vcp_set(adev->dm.dc->ctx, amdgpu_dm_connector->dc_link, sink);
update:
if (dm_con_state)
dm_con_state->freesync_capable = freesync_capable;

View File

@ -758,6 +758,11 @@ struct amdgpu_hdmi_vsdb_info {
*/
unsigned int max_refresh_rate_hz;
/**
* @freesync_mccs_vcp_code: MCCS VCP code for freesync state
*/
unsigned int freesync_mccs_vcp_code;
/**
* @replay_mode: Replay supported
*/
@ -1066,7 +1071,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
const struct drm_edid *drm_edid);
const struct drm_edid *drm_edid, bool do_mccs);
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);

View File

@ -457,9 +457,12 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr
static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
{
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
/*
* amdgpu_dm_ism_fini() is intentionally called in amdgpu_dm_fini().
* It must be called before dc_destroy() in amdgpu_dm_fini()
* to avoid ISM accessing an invalid dc handle once dc is released.
*/
amdgpu_dm_ism_fini(&acrtc->ism);
drm_crtc_cleanup(crtc);
kfree(crtc);
}

View File

@ -49,6 +49,45 @@
#include "ddc_service_types.h"
#include "clk_mgr.h"
#define MCCS_DEST_ADDR (0x6E >> 1)
#define MCCS_SRC_ADDR 0x51
#define MCCS_LENGTH_OFFSET 0x80
#define MCCS_MAX_DATA_SIZE 0x20
enum mccs_op_code {
MCCS_OP_CODE_VCP_REQUEST = 0x01,
MCCS_OP_CODE_VCP_REPLY = 0x02,
MCCS_OP_CODE_VCP_SET = 0x03,
MCCS_OP_CODE_VCP_RESET = 0x09,
MCCS_OP_CODE_CAP_REQUEST = 0xF3,
MCCS_OP_CODE_CAP_REPLY = 0xE3
};
enum mccs_op_buff_size {
MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST = 5,
MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST = 11,
MCCS_OP_BUFF_SIZE_WR_VCP_SET = 7,
};
enum vcp_reply_mask {
FREESYNC_SUPPORTED = 0x1
};
union vcp_reply {
struct {
unsigned char src_addr;
unsigned char length; /* Length is offset by MccsLengthOffs = 0x80 */
unsigned char reply_op_code; /* Should return MCCS_OP_CODE_VCP_REPLY = 0x02 */
unsigned char result_code; /* 00h No Error, 01h Unsupported VCP Code */
unsigned char request_code; /* Should return mccs vcp code sent in the vcp request */
unsigned char type_code; /* VCP type code: 00h Set parameter, 01h Momentary */
unsigned char max_value[2]; /* 2 bytes returning max value current value */
unsigned char present_value[2]; /* NOTE: Byte0 is MSB, Byte1 is LSB */
unsigned char check_sum;
} bytes;
unsigned char raw[11];
};
static u32 edid_extract_panel_id(struct edid *edid)
{
return (u32)edid->mfg_id[0] << 24 |
@ -1400,6 +1439,8 @@ static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
case DP_BRANCH_DEVICE_ID_0060AD:
case DP_BRANCH_DEVICE_ID_00E04C:
case DP_BRANCH_DEVICE_ID_90CC24:
case DP_BRANCH_DEVICE_ID_001CF8:
case DP_BRANCH_DEVICE_ID_001FF2:
ret_val = true;
break;
default:
@ -1439,3 +1480,203 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream
// TODO
return false;
}
static int mccs_operation_vcp_request(unsigned int vcp_code, struct dc_link *link,
union vcp_reply *reply)
{
const unsigned char retry_interval_ms = 40;
unsigned char retry = 5;
struct amdgpu_dm_connector *aconnector = link->priv;
struct i2c_adapter *ddc;
struct i2c_msg msg = {0};
int ret = 0;
int idx;
unsigned char wr_data[MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST] = {
MCCS_SRC_ADDR, /* Byte0 - Src Addr */
MCCS_LENGTH_OFFSET + 2, /* Byte1 - Length */
MCCS_OP_CODE_VCP_REQUEST, /* Byte2 - MCCS Command */
(unsigned char) vcp_code, /* Byte3 - VCP Code */
MCCS_DEST_ADDR << 1 /* Byte4 - CheckSum */
};
/* calculate checksum */
for (idx = 0; idx < (MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST - 1); idx++)
wr_data[(MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST-1)] ^= wr_data[idx];
if (link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
do {
msg.addr = MCCS_DEST_ADDR;
msg.flags = 0;
msg.len = MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST;
msg.buf = wr_data;
ret = i2c_transfer(ddc, &msg, 1);
if (ret != 1)
goto mccs_retry;
msleep(retry_interval_ms);
msg.addr = MCCS_DEST_ADDR;
msg.flags = I2C_M_RD;
msg.len = MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST;
msg.buf = reply->raw;
ret = i2c_transfer(ddc, &msg, 1);
/* sink might reply with null msg if it can't reply in time */
if (ret == 1 && reply->bytes.length > MCCS_LENGTH_OFFSET)
break;
mccs_retry:
retry--;
msleep(retry_interval_ms);
} while (retry);
if (!retry) {
drm_dbg_driver(aconnector->base.dev,
"%s: MCCS VCP request failed after retries", __func__);
return -EIO;
}
return 0;
}
void dm_helpers_read_mccs_caps(struct dc_context *ctx, struct dc_link *link,
struct dc_sink *sink)
{
bool mccs_op = false;
struct dpcd_caps *dpcd_caps;
struct drm_device *dev;
uint16_t freesync_vcp_value = 0;
union vcp_reply vcp_reply_value = {0};
if (!ctx)
return;
dev = adev_to_drm(ctx->driver_context);
if (!link || !sink) {
drm_dbg_driver(dev, "%s: link or sink is NULL", __func__);
return;
}
sink->mccs_caps.freesync_supported = false;
dpcd_caps = &link->dpcd_caps;
if (sink->edid_caps.freesync_vcp_code != 0) {
if (dc_is_dp_signal(link->connector_signal)) {
if ((dpcd_caps->dpcd_rev.raw >= DPCD_REV_14) &&
(dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) &&
dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id) &&
(dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true))
mccs_op = true;
if ((dpcd_caps->dongle_type != DISPLAY_DONGLE_NONE &&
dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) {
if (mccs_op == false)
drm_dbg_driver(dev, "%s: Legacy Pcon support", __func__);
mccs_op = true;
}
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
// Todo: Freesync over MST
mccs_op = false;
}
}
if (dc_is_hdmi_signal(link->connector_signal)) {
drm_dbg_driver(dev, "%s: Local HDMI sink", __func__);
mccs_op = true;
}
if (mccs_op == true) {
// MCCS VCP request to get VCP value
if (!mccs_operation_vcp_request(sink->edid_caps.freesync_vcp_code, link,
&vcp_reply_value)) {
freesync_vcp_value = vcp_reply_value.bytes.present_value[1];
freesync_vcp_value |= (uint16_t) vcp_reply_value.bytes.present_value[0] << 8;
}
// If VCP Value bit 0 is 1, freesyncSupport = true
sink->mccs_caps.freesync_supported =
(freesync_vcp_value & FREESYNC_SUPPORTED) ? true : false;
}
}
}
static int mccs_operation_vcp_set(unsigned int vcp_code, struct dc_link *link, uint16_t value)
{
const unsigned char retry_interval_ms = 40;
unsigned char retry = 5;
struct amdgpu_dm_connector *aconnector = link->priv;
struct i2c_adapter *ddc;
struct i2c_msg msg = {0};
int ret = 0;
int idx;
unsigned char wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET] = {
MCCS_SRC_ADDR, /* Byte0 - Src Addr */
MCCS_LENGTH_OFFSET + 4, /* Byte1 - Length */
MCCS_OP_CODE_VCP_SET, /* Byte2 - MCCS Command */
(unsigned char)vcp_code, /* Byte3 - VCP Code */
(unsigned char)(value >> 8), /* Byte4 - Value High Byte */
(unsigned char)(value & 0xFF), /* Byte5 - Value Low Byte */
MCCS_DEST_ADDR << 1 /* Byte6 - CheckSum */
};
/* calculate checksum */
for (idx = 0; idx < (MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1); idx++)
wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1] ^= wr_data[idx];
if (link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
do {
msg.addr = MCCS_DEST_ADDR;
msg.flags = 0;
msg.len = MCCS_OP_BUFF_SIZE_WR_VCP_SET;
msg.buf = wr_data;
ret = i2c_transfer(ddc, &msg, 1);
if (ret == 1)
break;
retry--;
msleep(retry_interval_ms);
} while (retry);
if (!retry)
return -EIO;
return 0;
}
void dm_helpers_mccs_vcp_set(struct dc_context *ctx, struct dc_link *link,
struct dc_sink *sink)
{
struct drm_device *dev;
const uint16_t enable = 0x0101;
if (!ctx)
return;
dev = adev_to_drm(ctx->driver_context);
if (!link || !sink) {
drm_dbg_driver(dev, "%s: link or sink is NULL", __func__);
return;
}
if (!sink->mccs_caps.freesync_supported) {
drm_dbg_driver(dev, "%s: MCCS freesync not supported on this sink", __func__);
return;
}
if (mccs_operation_vcp_set(sink->edid_caps.freesync_vcp_code, link, enable))
drm_dbg_driver(dev, "%s: Failed to set VCP code %d", __func__,
sink->edid_caps.freesync_vcp_code);
}

View File

@ -270,7 +270,6 @@ static void dm_ism_commit_idle_optimization_state(struct amdgpu_dm_ism *ism,
struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
struct amdgpu_display_manager *dm = &adev->dm;
int r;
trace_amdgpu_dm_ism_commit(dm->active_vblank_irq_count,
vblank_enabled,
@ -323,16 +322,7 @@ static void dm_ism_commit_idle_optimization_state(struct amdgpu_dm_ism *ism,
*/
if (!vblank_enabled && dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
r = amdgpu_dpm_pause_power_profile(adev, true);
if (r)
dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
r = amdgpu_dpm_pause_power_profile(adev, false);
if (r)
dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
}
@ -472,6 +462,9 @@ void amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism *ism,
/* ISM transitions must be called with mutex acquired */
ASSERT(mutex_is_locked(&dm->dc_lock));
/* ISM should not run after dc is destroyed */
ASSERT(dm->dc);
if (!acrtc_state) {
trace_amdgpu_dm_ism_event(acrtc->crtc_id, "NO_STATE",
"NO_STATE", "N/A");
@ -545,6 +538,8 @@ void amdgpu_dm_ism_disable(struct amdgpu_display_manager *dm)
struct amdgpu_crtc *acrtc;
struct amdgpu_dm_ism *ism;
ASSERT(mutex_is_locked(&dm->dc_lock));
drm_for_each_crtc(crtc, dm->ddev) {
acrtc = to_amdgpu_crtc(crtc);
ism = &acrtc->ism;

View File

@ -474,7 +474,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->drm_edid);
connector, aconnector->drm_edid, true);
#if defined(CONFIG_DRM_AMD_DC_FP)
if (!validate_dsc_caps_on_connector(aconnector))

View File

@ -611,80 +611,6 @@ static struct clk_bw_params dcn42_bw_params = {
};
static struct wm_table ddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
};
static struct wm_table lpddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
};
struct dcn42_ss_info_table dcn42_ss_info_table = {
.ss_divider = 1000,
.ss_percentage = {0, 0, 375, 375, 375}
@ -1141,10 +1067,6 @@ void dcn42_clk_mgr_construct(
if (ctx->dc_bios->integrated_info) {
clk_mgr->base.base.dentist_vco_freq_khz = ctx->dc_bios->integrated_info->dentist_vco_freq;
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
dcn42_bw_params.wm_table = lpddr5_wm_table;
else
dcn42_bw_params.wm_table = ddr5_wm_table;
dcn42_bw_params.vram_type = ctx->dc_bios->integrated_info->memory_type;
dcn42_bw_params.dram_channel_width_bytes = ctx->dc_bios->integrated_info->memory_type == 0x22 ? 8 : 4;
dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 1;

View File

@ -23,8 +23,6 @@
*
*/
#include <linux/array_size.h>
#include "dm_services.h"
#include "core_types.h"
#include "timing_generator.h"

View File

@ -5069,6 +5069,12 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
}
}
if (stream->ctx->dce_version < DCE_VERSION_8_0 &&
stream->timing.display_color_depth >= COLOR_DEPTH_101010) {
/* DCE 6.x doesn't support 10-bit truncation or dither options. */
option = DITHER_OPTION_DISABLE;
}
if (option == DITHER_OPTION_DISABLE)
return;

View File

@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
struct dcn_optc_reg_state;
struct dcn_dccg_reg_state;
#define DC_VER "3.2.376"
#define DC_VER "3.2.378"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@ -562,6 +562,7 @@ struct dc_config {
bool frame_update_cmd_version2;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
bool no_native422_support;
};
enum visual_confirm {
@ -986,7 +987,6 @@ struct link_service;
* causing an issue or not.
*/
struct dc_debug_options {
bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
int visual_confirm_rect_height;
@ -1061,9 +1061,11 @@ struct dc_debug_options {
bool hdmi20_disable;
bool skip_detection_link_training;
uint32_t edid_read_retry_times;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
uint8_t force_odm_combine; //bit vector based on otg inst
uint8_t seamless_boot_odm_combine;
uint8_t force_odm_combine_4to1; //bit vector based on otg inst
int minimum_z8_residency_time;
int minimum_z10_residency_time;
bool disable_z9_mpc;
@ -2725,6 +2727,7 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
struct mccs_caps mccs_caps;
struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;

View File

@ -52,6 +52,7 @@ struct dc_dsc_policy {
uint32_t max_target_bpp;
uint32_t min_target_bpp;
bool enable_dsc_when_not_needed;
bool ycbcr422_simple;
};
struct dc_dsc_config_options {

View File

@ -162,13 +162,13 @@ struct test_pattern {
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
struct dc_stream_debug_options {
char force_odm_combine_segments;
uint8_t force_odm_combine_segments;
/*
* When force_odm_combine_segments is non zero, allow dc to
* temporarily transition to ODM bypass when minimal transition state
* is required to prevent visual glitches showing on the screen
*/
char allow_transition_for_forced_odm;
uint8_t allow_transition_for_forced_odm;
};
#define LUMINANCE_DATA_TABLE_SIZE 10

View File

@ -205,6 +205,8 @@ struct dc_edid_caps {
uint32_t audio_latency;
uint32_t video_latency;
unsigned char freesync_vcp_code;
uint8_t qs_bit;
uint8_t qy_bit;
@ -1313,6 +1315,10 @@ struct dc_panel_config {
} rio;
};
struct mccs_caps {
bool freesync_supported;
};
#define MAX_SINKS_PER_LINK 4
/*

View File

@ -23,8 +23,6 @@
*
*/
#include <linux/array_size.h>
#include "dm_services.h"

View File

@ -181,6 +181,16 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_link *link,
struct dc_sink *sink);
void dm_helpers_read_mccs_caps(
struct dc_context *ctx,
struct dc_link *link,
struct dc_sink *sink);
void dm_helpers_mccs_vcp_set(
struct dc_context *ctx,
struct dc_link *link,
struct dc_sink *sink);
bool dm_helpers_dp_handle_test_pattern_request(
struct dc_context *ctx,
const struct dc_link *link,

View File

@ -2399,7 +2399,7 @@ static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_li
return low_pstate_lvl;
}
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
void dcn21_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);

View File

@ -78,7 +78,7 @@ int dcn21_populate_dml_pipes_from_context(struct dc *dc,
enum dc_validate_mode validate_mode);
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum
dc_validate_mode, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);

View File

@ -587,7 +587,7 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
void dcn31_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
@ -665,7 +665,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
}
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
void dcn315_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
@ -726,7 +726,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
}
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
void dcn316_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;

View File

@ -44,9 +44,9 @@ void dcn31_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel);
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn31_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
void dcn315_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,

View File

@ -1610,38 +1610,6 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
return false;
}
static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end = 0;
uint32_t asic_blank_start = 0;
uint32_t newVstartup = 0;
patched_crtc_timing = *dc_crtc_timing;
if (patched_crtc_timing.flags.INTERLACE == 1) {
if (patched_crtc_timing.v_front_porch < 2)
patched_crtc_timing.v_front_porch = 2;
} else {
if (patched_crtc_timing.v_front_porch < 1)
patched_crtc_timing.v_front_porch = 1;
}
/* blank_start = frame end - front porch */
asic_blank_start = patched_crtc_timing.v_total -
patched_crtc_timing.v_front_porch;
/* blank_end = blank_start - active */
asic_blank_end = asic_blank_start -
patched_crtc_timing.v_border_bottom -
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
}
static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
@ -1756,11 +1724,6 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
}
}
if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
dcn20_adjust_freesync_v_startup(
&context->res_ctx.pipe_ctx[i].stream->timing,
&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */

View File

@ -100,6 +100,7 @@ DML21 += src/dml2_mcg/dml2_mcg_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
DML21 += src/dml2_pmo/dml2_pmo_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
DML21 += src/dml2_pmo/dml2_pmo_dcn42.o
DML21 += src/dml2_standalone_libraries/lib_float_math.o
DML21 += dml21_translation_helper.o
DML21 += dml21_wrapper.o

View File

@ -1812,6 +1812,8 @@ static dml_float_t CalculateWriteBackDISPCLK(
dml_uint_t WritebackLineBufferSize,
dml_float_t DISPCLKDPPCLKVCOSpeed)
{
(void)WritebackPixelFormat;
(void)WritebackVRatio;
dml_float_t DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
@ -1830,6 +1832,8 @@ static dml_float_t CalculateWriteBackDelay(
dml_uint_t WritebackSourceHeight,
dml_uint_t HTotal)
{
(void)WritebackPixelFormat;
(void)WritebackHRatio;
dml_float_t CalculateWriteBackDelay;
dml_float_t Line_length;
dml_float_t Output_lines_last_notclamped;
@ -1977,6 +1981,7 @@ static void CalculateFlipSchedule(
dml_float_t *final_flip_bw,
dml_bool_t *ImmediateFlipSupportedForPipe)
{
(void)HostVMMinPageSize;
dml_float_t min_row_time = 0.0;
dml_uint_t HostVMDynamicLevelsTrips = 0;
dml_float_t TimeForFetchingMetaPTEImmediateFlip = 0;
@ -2118,6 +2123,11 @@ static void CalculateDCCConfiguration(
dml_uint_t *IndependentBlockLuma,
dml_uint_t *IndependentBlockChroma)
{
(void)SurfaceWidthChroma;
(void)SurfaceHeightChroma;
(void)TilingFormat;
(void)BytePerPixelDETY;
(void)BytePerPixelDETC;
dml_uint_t DETBufferSizeForDCC = nomDETInKByte * 1024;
dml_uint_t yuv420;
@ -2489,6 +2499,7 @@ static dml_uint_t CalculateVMAndRowBytes(
dml_uint_t *DPDE0BytesFrame,
dml_uint_t *MetaPTEBytesFrame)
{
(void)SourcePixelFormat;
dml_uint_t MPDEBytesFrame;
dml_uint_t DCCMetaSurfaceBytes;
dml_uint_t ExtraDPDEBytesFrame;
@ -3662,6 +3673,8 @@ static void CalculateVMGroupAndRequestTimes(
dml_float_t TimePerVMRequestVBlank[],
dml_float_t TimePerVMRequestFlip[])
{
(void)dpte_row_width_luma_ub;
(void)dpte_row_width_chroma_ub;
dml_uint_t num_group_per_lower_vm_stage;
dml_uint_t num_req_per_lower_vm_stage;
@ -3762,6 +3775,7 @@ static void CalculateVMGroupAndRequestTimes(
static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scratch,
struct CalculateStutterEfficiency_params_st *p)
{
(void)scratch;
dml_float_t DETBufferingTimeY = 0;
dml_float_t SwathWidthYCriticalSurface = 0;
dml_float_t SwathHeightYCriticalSurface = 0;
@ -4085,6 +4099,7 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat
static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st *scratch,
struct CalculateSwathAndDETConfiguration_params_st *p)
{
(void)scratch;
dml_uint_t MaximumSwathHeightY[__DML_NUM_PLANES__];
dml_uint_t MaximumSwathHeightC[__DML_NUM_PLANES__];
dml_uint_t RoundedUpMaxSwathSizeBytesY[__DML_NUM_PLANES__];
@ -4331,6 +4346,7 @@ static void CalculateSwathWidth(
dml_uint_t swath_width_luma_ub[], // per-pipe
dml_uint_t swath_width_chroma_ub[]) // per-pipe
{
(void)BytePerPixY;
enum dml_odm_mode MainSurfaceODMMode;
dml_uint_t surface_width_ub_l;
dml_uint_t surface_height_ub_l;
@ -5029,6 +5045,7 @@ static void CalculateMaxDETAndMinCompressedBufferSize(
dml_uint_t *nomDETInKByte,
dml_uint_t *MinCompressedBufferSizeInKByte)
{
(void)ROBBufferSizeInKByte;
*MaxTotalDETInKByte = ConfigReturnBufferSizeInKByte - ConfigReturnBufferSegmentSizeInKByte;
*nomDETInKByte = (dml_uint_t)(dml_floor((dml_float_t) *MaxTotalDETInKByte / (dml_float_t) MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;

View File

@ -178,6 +178,7 @@ dml_float_t dml_log2(dml_float_t x)
dml_float_t dml_round(dml_float_t val, dml_bool_t bankers_rounding)
{
(void)bankers_rounding;
// if (bankers_rounding)
// return (dml_float_t) lrint(val);
// else {
@ -217,6 +218,7 @@ dml_uint_t dml_round_to_multiple(dml_uint_t num, dml_uint_t multiple, dml_bool_t
void dml_print_data_rq_regs_st(const dml_display_plane_rq_regs_st *rq_regs)
{
(void)rq_regs;
dml_print("DML: ===================================== \n");
dml_print("DML: DISPLAY_PLANE_RQ_REGS_ST\n");
dml_print("DML: chunk_size = 0x%x\n", rq_regs->chunk_size);
@ -248,6 +250,7 @@ void dml_print_rq_regs_st(const dml_display_rq_regs_st *rq_regs)
void dml_print_dlg_regs_st(const dml_display_dlg_regs_st *dlg_regs)
{
(void)dlg_regs;
dml_print("DML: ===================================== \n");
dml_print("DML: DISPLAY_DLG_REGS_ST \n");
dml_print("DML: refcyc_h_blank_end = 0x%x\n", dlg_regs->refcyc_h_blank_end);
@ -299,6 +302,7 @@ void dml_print_dlg_regs_st(const dml_display_dlg_regs_st *dlg_regs)
void dml_print_ttu_regs_st(const dml_display_ttu_regs_st *ttu_regs)
{
(void)ttu_regs;
dml_print("DML: ===================================== \n");
dml_print("DML: DISPLAY_TTU_REGS_ST \n");
dml_print("DML: qos_level_low_wm = 0x%x\n", ttu_regs->qos_level_low_wm);
@ -326,6 +330,7 @@ void dml_print_ttu_regs_st(const dml_display_ttu_regs_st *ttu_regs)
void dml_print_dml_policy(const struct dml_mode_eval_policy_st *policy)
{
(void)policy;
dml_print("DML: ===================================== \n");
dml_print("DML: DML_MODE_EVAL_POLICY_ST\n");
dml_print("DML: Policy: UseUnboundedRequesting = 0x%x\n", policy->UseUnboundedRequesting);
@ -353,6 +358,8 @@ void dml_print_dml_policy(const struct dml_mode_eval_policy_st *policy)
void dml_print_mode_support(struct display_mode_lib_st *mode_lib, dml_uint_t j)
{
(void)j;
(void)mode_lib;
dml_print("DML: MODE SUPPORT: ===============================================\n");
dml_print("DML: MODE SUPPORT: Voltage State %d\n", j);
dml_print("DML: MODE SUPPORT: Mode Supported : %s\n", mode_lib->ms.support.ModeSupport[j] == true ? "Supported" : "NOT Supported");
@ -526,6 +533,7 @@ void dml_print_dml_mode_support_info(const struct dml_mode_support_info_st *supp
void dml_print_dml_display_cfg_timing(const struct dml_timing_cfg_st *timing, dml_uint_t num_plane)
{
(void)timing;
for (dml_uint_t i = 0; i < num_plane; i++) {
dml_print("DML: timing_cfg: plane=%d, HTotal = %d\n", i, timing->HTotal[i]);
dml_print("DML: timing_cfg: plane=%d, VTotal = %d\n", i, timing->VTotal[i]);
@ -542,6 +550,7 @@ void dml_print_dml_display_cfg_timing(const struct dml_timing_cfg_st *timing, dm
void dml_print_dml_display_cfg_plane(const struct dml_plane_cfg_st *plane, dml_uint_t num_plane)
{
(void)plane;
dml_print("DML: plane_cfg: num_plane = %d\n", num_plane);
dml_print("DML: plane_cfg: GPUVMEnable = %d\n", plane->GPUVMEnable);
dml_print("DML: plane_cfg: HostVMEnable = %d\n", plane->HostVMEnable);
@ -590,6 +599,7 @@ void dml_print_dml_display_cfg_plane(const struct dml_plane_cfg_st *plane, dml_u
void dml_print_dml_display_cfg_surface(const struct dml_surface_cfg_st *surface, dml_uint_t num_plane)
{
(void)surface;
for (dml_uint_t i = 0; i < num_plane; i++) {
dml_print("DML: surface_cfg: plane=%d, PitchY = %d\n", i, surface->PitchY[i]);
dml_print("DML: surface_cfg: plane=%d, SurfaceWidthY = %d\n", i, surface->SurfaceWidthY[i]);
@ -609,6 +619,7 @@ void dml_print_dml_display_cfg_surface(const struct dml_surface_cfg_st *surface,
void dml_print_dml_display_cfg_hw_resource(const struct dml_hw_resource_st *hw, dml_uint_t num_plane)
{
(void)hw;
for (dml_uint_t i = 0; i < num_plane; i++) {
dml_print("DML: hw_resource: plane=%d, ODMMode = %d\n", i, hw->ODMMode[i]);
dml_print("DML: hw_resource: plane=%d, DPPPerSurface = %d\n", i, hw->DPPPerSurface[i]);
@ -620,6 +631,7 @@ void dml_print_dml_display_cfg_hw_resource(const struct dml_hw_resource_st *hw,
__DML_DLL_EXPORT__ void dml_print_soc_state_bounding_box(const struct soc_state_bounding_box_st *state)
{
(void)state;
dml_print("DML: state_bbox: socclk_mhz = %f\n", state->socclk_mhz);
dml_print("DML: state_bbox: dscclk_mhz = %f\n", state->dscclk_mhz);
dml_print("DML: state_bbox: phyclk_mhz = %f\n", state->phyclk_mhz);
@ -649,6 +661,7 @@ __DML_DLL_EXPORT__ void dml_print_soc_state_bounding_box(const struct soc_state_
__DML_DLL_EXPORT__ void dml_print_soc_bounding_box(const struct soc_bounding_box_st *soc)
{
(void)soc;
dml_print("DML: soc_bbox: dprefclk_mhz = %f\n", soc->dprefclk_mhz);
dml_print("DML: soc_bbox: xtalclk_mhz = %f\n", soc->xtalclk_mhz);
dml_print("DML: soc_bbox: pcierefclk_mhz = %f\n", soc->pcierefclk_mhz);
@ -686,6 +699,7 @@ __DML_DLL_EXPORT__ void dml_print_soc_bounding_box(const struct soc_bounding_box
__DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg)
{
(void)clk_cfg;
dml_print("DML: clk_cfg: 0-use_required, 1-use pipe.clks_cfg, 2-use state bbox\n");
dml_print("DML: clk_cfg: dcfclk_option = %d\n", clk_cfg->dcfclk_option);
dml_print("DML: clk_cfg: dispclk_option = %d\n", clk_cfg->dispclk_option);

View File

@ -389,7 +389,9 @@ static void populate_dml21_dummy_surface_cfg(struct dml2_surface_cfg *surface, c
surface->tiling = dml2_sw_64kb_2d;
}
static void populate_dml21_dummy_plane_cfg(struct dml2_plane_parameters *plane, const struct dc_stream_state *stream)
static void populate_dml21_dummy_plane_cfg(struct dml2_plane_parameters *plane,
const struct dc_stream_state *stream,
const struct dml2_soc_bb *soc_bb)
{
unsigned int width, height;
@ -433,7 +435,8 @@ static void populate_dml21_dummy_plane_cfg(struct dml2_plane_parameters *plane,
plane->pixel_format = dml2_444_32;
plane->dynamic_meta_data.enable = false;
plane->overrides.gpuvm_min_page_size_kbytes = 256;
plane->overrides.gpuvm_min_page_size_kbytes = soc_bb->gpuvm_min_page_size_kbytes;
plane->overrides.hostvm_min_page_size_kbytes = soc_bb->hostvm_min_page_size_kbytes;
}
static void populate_dml21_surface_config_from_plane_state(
@ -441,6 +444,7 @@ static void populate_dml21_surface_config_from_plane_state(
struct dml2_surface_cfg *surface,
const struct dc_plane_state *plane_state)
{
(void)in_dc;
surface->plane0.pitch = plane_state->plane_size.surface_pitch;
surface->plane1.pitch = plane_state->plane_size.chroma_pitch;
surface->plane0.height = plane_state->plane_size.surface_size.height;
@ -503,7 +507,7 @@ static const struct scaler_data *get_scaler_data_for_plane(
static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dml_ctx,
struct dml2_plane_parameters *plane, const struct dc_plane_state *plane_state,
const struct dc_state *context, unsigned int stream_index)
const struct dc_state *context, unsigned int stream_index, const struct dml2_soc_bb *soc_bb)
{
const struct scaler_data *scaler_data = get_scaler_data_for_plane(dml_ctx, plane_state, context);
struct dc_stream_state *stream = context->streams[stream_index];
@ -647,7 +651,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->composition.rotation_angle = (enum dml2_rotation_angle) plane_state->rotation;
plane->stream_index = stream_index;
plane->overrides.gpuvm_min_page_size_kbytes = 256;
plane->overrides.gpuvm_min_page_size_kbytes = soc_bb->gpuvm_min_page_size_kbytes;
plane->overrides.hostvm_min_page_size_kbytes = soc_bb->hostvm_min_page_size_kbytes;
plane->immediate_flip = plane_state->flip_immediate;
@ -785,7 +790,9 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (context->stream_status[stream_index].plane_count == 0) {
disp_cfg_plane_location = dml_dispcfg->num_planes++;
populate_dml21_dummy_surface_cfg(&dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->streams[stream_index]);
populate_dml21_dummy_plane_cfg(&dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->streams[stream_index]);
populate_dml21_dummy_plane_cfg(
&dml_dispcfg->plane_descriptors[disp_cfg_plane_location],
context->streams[stream_index], &dml_ctx->v21.dml_init.soc_bb);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
@ -797,7 +804,10 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
populate_dml21_plane_config_from_plane_state(
dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location],
context->stream_status[stream_index].plane_states[plane_index],
context, stream_index, &dml_ctx->v21.dml_init.soc_bb);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
@ -873,6 +883,7 @@ static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_wat
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx)
{
(void)in_dc;
const struct dml2_display_cfg_programming *programming = in_ctx->v21.mode_programming.programming;
unsigned int wm_index;
@ -907,6 +918,7 @@ void dml21_get_pipe_mcache_config(
struct dml2_per_plane_programming *pln_prog,
struct dml2_pipe_configuration_descriptor *mcache_pipe_config)
{
(void)context;
mcache_pipe_config->plane0.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x;
mcache_pipe_config->plane0.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width;

View File

@ -88,6 +88,7 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__],
int dml_plane_idx)
{
(void)in_dc;
unsigned int dml_stream_index;
unsigned int main_stream_id;
unsigned int dc_plane_index;
@ -282,6 +283,7 @@ static struct dc_plane_state *dml21_add_phantom_plane(struct dml2_context *dml_c
struct dc_plane_state *main_plane,
struct dml2_per_plane_programming *plane_programming)
{
(void)plane_programming;
struct dc_plane_state *phantom_plane;
phantom_plane = dml_ctx->config.svp_pstate.callbacks.create_phantom_plane(dc, context, main_plane);

View File

@ -58,8 +58,8 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
void dml21_destroy(struct dml2_context *dml2)
{
vfree(dml2->v21.dml_init.dml2_instance);
vfree(dml2->v21.mode_programming.programming);
DC_RUN_WITH_PREEMPTION_ENABLED(vfree(dml2->v21.dml_init.dml2_instance));
DC_RUN_WITH_PREEMPTION_ENABLED(vfree(dml2->v21.mode_programming.programming));
}
void dml21_copy(struct dml2_context *dst_dml_ctx,

View File

@ -51,6 +51,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const st
static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
struct dml2_context *in_ctx, unsigned int pipe_cnt)
{
(void)out_new_hw_state;
(void)pipe_cnt;
unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0;
struct dml2_per_plane_programming *pln_prog = NULL;
struct dml2_per_stream_programming *stream_prog = NULL;

View File

@ -68,6 +68,7 @@ static const struct dml2_soc_qos_parameters dml_dcn42_variant_a_soc_qos_params =
.qos_type = dml2_qos_param_type_dcn3,
};
/* Default SOC bounding box for DCN42 based on LPDDR5/LPCAMM2 latencies*/
static const struct dml2_soc_bb dml2_socbb_dcn42 = {
.clk_table = {
.wck_ratio = {
@ -185,12 +186,13 @@ static const struct dml2_soc_bb dml2_socbb_dcn42 = {
.qos_type = dml2_qos_param_type_dcn3,
},
/* DCN42 params for LPDDR5/LPCAMM2 */
.power_management_parameters = {
.dram_clk_change_blackout_us = 29,
.dram_clk_change_blackout_us = 36,
.fclk_change_blackout_us = 0,
.g7_ppt_blackout_us = 0,
.stutter_enter_plus_exit_latency_us = 11,
.stutter_exit_latency_us = 9,
.stutter_enter_plus_exit_latency_us = 14,
.stutter_exit_latency_us = 12,
.z8_stutter_enter_plus_exit_latency_us = 300,
.z8_stutter_exit_latency_us = 200,
},
@ -203,12 +205,12 @@ static const struct dml2_soc_bb dml2_socbb_dcn42 = {
.xtalclk_mhz = 24,
.pcie_refclk_mhz = 100,
.dchub_refclk_mhz = 50,
.mall_allocated_for_dcn_mbytes = 64,
.mall_allocated_for_dcn_mbytes = 0,
.max_outstanding_reqs = 256,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.return_bus_width_bytes = 64,
.hostvm_min_page_size_kbytes = 4,
.gpuvm_min_page_size_kbytes = 256,
.gpuvm_min_page_size_kbytes = 4,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_non_cached_page_table_levels = 2,
.phy_downspread_percent = 0.38,
@ -222,6 +224,17 @@ static const struct dml2_soc_bb dml2_socbb_dcn42 = {
.max_fclk_for_uclk_dpm_khz = 2200 * 1000,
};
/* DCN42 params for DDR5 */
struct dml2_soc_power_management_parameters dcn42_ddr5_power_management_parameters = {
.dram_clk_change_blackout_us = 36,
.fclk_change_blackout_us = 0,
.g7_ppt_blackout_us = 0,
.stutter_enter_plus_exit_latency_us = 23.5,
.stutter_exit_latency_us = 21.5,
.z8_stutter_enter_plus_exit_latency_us = 300,
.z8_stutter_exit_latency_us = 200,
};
static const struct dml2_ip_capabilities dml2_dcn42_max_ip_caps = {
.pipe_count = 4,
.otg_count = 4,
@ -234,7 +247,7 @@ static const struct dml2_ip_capabilities dml2_dcn42_max_ip_caps = {
.config_return_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 32,
.compressed_buffer_segment_size_in_kbytes = 64,
.cursor_buffer_size = 24,
.cursor_buffer_size = 42,
.max_flip_time_us = 110,
.max_flip_time_lines = 50,
.hostvm_mode = 0,

View File

@ -26,20 +26,6 @@ enum dml2_swizzle_mode {
dml2_gfx11_sw_64kb_r_x,
dml2_gfx11_sw_256kb_d_x,
dml2_gfx11_sw_256kb_r_x,
dml2_sw_linear_256b, // GFX10 SW_LINEAR only accepts 256 byte aligned pitch
dml2_gfx10_sw_64kb_r_x,
dml2_gfx102_sw_64kb_s,
dml2_gfx102_sw_64kb_s_t,
dml2_gfx102_sw_64kb_s_x,
dml2_gfx102_sw_64kb_r_x,
dml2_linear_64elements, // GFX7 LINEAR_ALIGNED accepts pitch alignment of the maximum of 64 elements or 256 bytes
dml2_gfx7_1d_thin,
dml2_gfx7_2d_thin_gen_zero,
dml2_gfx7_2d_thin_gen_one,
dml2_gfx7_2d_thin_arlene,
dml2_gfx7_2d_thin_anubis
};
enum dml2_source_format_class {

View File

@ -135,7 +135,7 @@ struct dml2_core_ip_params core_dcn42_ip_caps_base = {
.cursor_64bpp_support = true,
.dynamic_metadata_vm_enabled = false,
.max_num_hdmi_frl_outputs = 0,
.max_num_hdmi_frl_outputs = 1,
.max_num_dp2p0_outputs = 2,
.max_num_dp2p0_streams = 4,
.imall_supported = 1,
@ -155,7 +155,7 @@ struct dml2_core_ip_params core_dcn42_ip_caps_base = {
.min_meta_chunk_size_bytes = 256,
.dchub_arb_to_ret_delay = 102,
.hostvm_mode = 1,
.hostvm_mode = 0,
};
static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params)
@ -281,6 +281,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main,
const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream)
{
(void)main_stream;
memcpy(phantom, main, sizeof(struct dml2_plane_parameters));
phantom->stream_index = phantom_stream_index;

View File

@ -840,6 +840,7 @@ static void CalculateSwathWidth(
unsigned int swath_width_luma_ub[], // per-pipe
unsigned int swath_width_chroma_ub[]) // per-pipe
{
(void)BytePerPixY;
enum dml2_odm_mode MainSurfaceODMMode;
double odm_hactive_factor = 1.0;
unsigned int req_width_horz_y;
@ -1283,6 +1284,8 @@ static double TruncToValidBPP(
// Output
unsigned int *RequiredSlots)
{
(void)DSCInputBitPerComponent;
(void)RequiredSlots;
double MaxLinkBPP;
unsigned int MinDSCBPP;
double MaxDSCBPP;
@ -1922,6 +1925,7 @@ static void CalculateRowBandwidth(
double *dpte_row_bw,
double *meta_row_bw)
{
(void)use_one_row_for_frame;
if (!DCCEnable || !mrq_present) {
*meta_row_bw = 0;
} else if (dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha) {
@ -2020,6 +2024,11 @@ static void CalculateDCCConfiguration(
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
(void)SurfaceWidthChroma;
(void)SurfaceHeightChroma;
(void)TilingFormat;
(void)BytePerPixelDETY;
(void)BytePerPixelDETC;
unsigned int DETBufferSizeForDCC = nomDETInKByte * 1024;
unsigned int segment_order_horz_contiguous_luma;
@ -2270,6 +2279,7 @@ static void calculate_mcache_row_bytes(
struct dml2_core_internal_scratch *scratch,
struct dml2_core_calcs_calculate_mcache_row_bytes_params *p)
{
(void)scratch;
unsigned int vmpg_bytes = 0;
unsigned int blk_bytes = 0;
float meta_per_mvmpg_per_channel = 0;
@ -3642,6 +3652,8 @@ static double CalculateWriteBackDelay(
unsigned int WritebackSourceHeight,
unsigned int HTotal)
{
(void)WritebackPixelFormat;
(void)WritebackHRatio;
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
@ -3959,6 +3971,7 @@ static enum dml2_odm_mode DecideODMMode(unsigned int HActive,
double SurfaceRequiredDISPCLKWithODMCombineThreeToOne,
double SurfaceRequiredDISPCLKWithODMCombineFourToOne)
{
(void)SurfaceRequiredDISPCLKWithODMCombineFourToOne;
enum dml2_odm_mode MinimumRequiredODMModeForMaxDispClock;
enum dml2_odm_mode MinimumRequiredODMModeForMaxDSCHActive;
enum dml2_odm_mode MinimumRequiredODMModeForMax420HActive;
@ -4460,6 +4473,8 @@ static double CalculateWriteBackDISPCLK(
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
(void)WritebackPixelFormat;
(void)WritebackVRatio;
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * math_ceil2((double)WritebackHTaps / 8.0, 1) / WritebackHRatio;
@ -4561,6 +4576,10 @@ static void CalculateSurfaceSizeInMall(
unsigned int SurfaceSizeInMALL[],
bool *ExceededMALLSize)
{
(void)Read256BytesBlockWidthY;
(void)Read256BytesBlockWidthC;
(void)Read256BytesBlockHeightY;
(void)Read256BytesBlockHeightC;
unsigned int TotalSurfaceSizeInMALLForSS = 0;
unsigned int TotalSurfaceSizeInMALLForSubVP = 0;
unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;
@ -4620,6 +4639,7 @@ static void calculate_tdlut_setting(
struct dml2_core_internal_scratch *scratch,
struct dml2_core_calcs_calculate_tdlut_setting_params *p)
{
(void)scratch;
// locals
unsigned int tdlut_bpe = 8;
unsigned int tdlut_width;
@ -6503,6 +6523,7 @@ static void CalculateFlipSchedule(
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
(void)use_one_row_for_frame_flip;
struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals;
l->dual_plane = dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
@ -7381,7 +7402,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
s->tdlut_bytes_per_group,
s->HostVMInefficiencyFactor,
s->HostVMInefficiencyFactorPrefetch,
mode_lib->soc.hostvm_min_page_size_kbytes,
mode_lib->soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->soc.qos_parameters.qos_type,
!(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
mode_lib->soc.max_outstanding_reqs,
@ -7477,12 +7498,11 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
@ -8965,7 +8985,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes;
CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthY;
CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthC;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes;
CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present;
@ -9968,6 +9988,8 @@ static void CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
(void)dpte_row_width_luma_ub;
(void)dpte_row_width_chroma_ub;
unsigned int num_group_per_lower_vm_stage = 0;
unsigned int num_req_per_lower_vm_stage = 0;
unsigned int num_group_per_lower_vm_stage_flip;
@ -10755,7 +10777,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes;
CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->mp.SwathWidthY;
CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->mp.SwathWidthC;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes;
CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present;
@ -10971,7 +10993,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->tdlut_bytes_per_group,
s->HostVMInefficiencyFactor,
s->HostVMInefficiencyFactorPrefetch,
mode_lib->soc.hostvm_min_page_size_kbytes,
mode_lib->soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->soc.qos_parameters.qos_type,
!(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
mode_lib->soc.max_outstanding_reqs,
@ -11264,12 +11286,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k];
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->mp.UrgentLatency;
CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->mp.ExtraLatencyPrefetch;
CalculatePrefetchSchedule_params->TCalc = mode_lib->mp.TCalc;
CalculatePrefetchSchedule_params->vm_bytes = mode_lib->mp.vm_bytes[k];

View File

@ -269,6 +269,9 @@ struct dml2_core_internal_mode_support_info {
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
bool global_temp_read_or_ppt_supported;
bool fclk_pstate_schedule_admissible;
bool temp_read_pstate_schedule_admissible;
bool ppt_pstate_schedule_admissible;
bool USRRetrainingSupport;
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
@ -1063,6 +1066,8 @@ struct dml2_core_calcs_mode_support_locals {
bool dummy_boolean_array[2][DML2_MAX_PLANES];
double dummy_single[3];
double dummy_single_array[DML2_MAX_PLANES];
double dummy_double_array[3][DML2_MAX_PLANES];
enum dml2_pstate_method dummy_pstate_method_array[DML2_MAX_PLANES];
struct dml2_core_internal_watermarks dummy_watermark;
double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
@ -1721,30 +1726,30 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
double ReturnBW;
bool SynchronizeTimings;
bool SynchronizeDRRDisplaysForUCLKPStateChange;
unsigned int *dpte_group_bytes;
const unsigned int *dpte_group_bytes;
struct dml2_core_internal_SOCParametersList mmSOCParameters;
unsigned int WritebackChunkSize;
double SOCCLK;
double DCFClkDeepSleep;
unsigned int *DETBufferSizeY;
unsigned int *DETBufferSizeC;
unsigned int *SwathHeightY;
unsigned int *SwathHeightC;
unsigned int *SwathWidthY;
unsigned int *SwathWidthC;
unsigned int *DPPPerSurface;
double *BytePerPixelDETY;
double *BytePerPixelDETC;
unsigned int *DSTXAfterScaler;
unsigned int *DSTYAfterScaler;
const unsigned int *DETBufferSizeY;
const unsigned int *DETBufferSizeC;
const unsigned int *SwathHeightY;
const unsigned int *SwathHeightC;
const unsigned int *SwathWidthY;
const unsigned int *SwathWidthC;
const unsigned int *DPPPerSurface;
const double *BytePerPixelDETY;
const double *BytePerPixelDETC;
const unsigned int *DSTXAfterScaler;
const unsigned int *DSTYAfterScaler;
bool UnboundedRequestEnabled;
unsigned int CompressedBufferSizeInkByte;
bool max_outstanding_when_urgent_expected;
unsigned int max_outstanding_requests;
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
unsigned int *meta_row_height_c;
enum dml2_pstate_method *uclk_pstate_switch_modes;
const unsigned int max_outstanding_requests;
const unsigned int max_request_size_bytes;
const unsigned int *meta_row_height_l;
const unsigned int *meta_row_height_c;
const enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
struct dml2_core_internal_watermarks *Watermark;
@ -1931,7 +1936,6 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
bool DynamicMetadataVMEnabled;
unsigned int DynamicMetadataLinesBeforeActiveRequired;
unsigned int DynamicMetadataTransmittedBytes;
double UrgentLatency;
double ExtraLatencyPrefetch;
double TCalc;
unsigned int vm_bytes;

View File

@ -428,10 +428,6 @@ bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_c
unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) {
return dml2_core_utils_get_tile_block_size_bytes_backcompat(sw_mode, byte_per_pixel);
}
if (sw_mode == dml2_sw_linear)
return 256;
else if (sw_mode == dml2_sw_256b_2d)
@ -462,56 +458,14 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
};
}
unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
if (sw_mode == dml2_sw_linear_256b)
return 256;
else if (sw_mode == dml2_gfx10_sw_64kb_r_x)
return 65536;
else if (sw_mode == dml2_gfx102_sw_64kb_s)
return 65536;
else if (sw_mode == dml2_gfx102_sw_64kb_s_t)
return 65536;
else if (sw_mode == dml2_gfx102_sw_64kb_s_x)
return 65536;
else if (sw_mode == dml2_gfx102_sw_64kb_r_x)
return 65536;
else if (sw_mode == dml2_linear_64elements)
return 256;
else if (sw_mode == dml2_gfx7_1d_thin)
return 256;
else if (sw_mode == dml2_gfx7_2d_thin_gen_zero)
return (128 * 64 * byte_per_pixel);
else if (sw_mode == dml2_gfx7_2d_thin_gen_one)
return (128 * 128 * byte_per_pixel);
else if (sw_mode == dml2_gfx7_2d_thin_arlene)
return (64 * 32 * byte_per_pixel);
else if (sw_mode == dml2_gfx7_2d_thin_anubis)
return (128 * 128 * byte_per_pixel);
else {
DML_ASSERT(0);
return 256;
};
}
bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) {
return dml2_core_utils_get_segment_horizontal_contiguous_backcompat(sw_mode, byte_per_pixel);
} else {
return (byte_per_pixel != 2);
}
}
bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
return !((byte_per_pixel == 4) &&
((sw_mode == dml2_gfx10_sw_64kb_r_x) || (sw_mode == dml2_gfx102_sw_64kb_s) || (sw_mode == dml2_gfx102_sw_64kb_s_t) || (sw_mode == dml2_gfx102_sw_64kb_s_x)));
return (byte_per_pixel != 2);
}
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
return sw_mode == dml2_sw_linear;
};
@ -544,20 +498,6 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_d_x ||
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
else if (sw_mode == dml2_sw_linear_256b ||
sw_mode == dml2_gfx10_sw_64kb_r_x ||
sw_mode == dml2_gfx102_sw_64kb_s ||
sw_mode == dml2_gfx102_sw_64kb_s_t ||
sw_mode == dml2_gfx102_sw_64kb_s_x ||
sw_mode == dml2_gfx102_sw_64kb_r_x)
version = 10;
else if (sw_mode == dml2_linear_64elements ||
sw_mode == dml2_gfx7_1d_thin ||
sw_mode == dml2_gfx7_2d_thin_gen_zero ||
sw_mode == dml2_gfx7_2d_thin_gen_one ||
sw_mode == dml2_gfx7_2d_thin_arlene ||
sw_mode == dml2_gfx7_2d_thin_anubis)
version = 7;
else {
DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
DML_ASSERT(0);
@ -648,6 +588,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main,
const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream)
{
(void)main_stream;
memcpy(phantom, main, sizeof(struct dml2_plane_parameters));
phantom->stream_index = phantom_stream_index;
@ -845,3 +786,11 @@ bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode)
return false;
}
}
double dml2_core_utils_get_frame_time_us(const struct dml2_stream_parameters *stream)
{
double otg_vline_time_us = (double)stream->timing.h_total / (double)stream->timing.pixel_clock_khz * 1000.0;
double non_vtotal = stream->timing.vblank_nom + stream->timing.v_active;
double frame_time_us = non_vtotal * otg_vline_time_us;
return frame_time_us;
}

View File

@ -22,8 +22,6 @@ void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_in
bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode);
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
@ -41,5 +39,6 @@ bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stre
bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate);
bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate);
bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode);
double dml2_core_utils_get_frame_time_us(const struct dml2_stream_parameters *stream);
#endif /* __DML2_CORE_UTILS_H__ */

View File

@ -552,6 +552,7 @@ static int get_displays_without_vactive_margin_mask(struct dml2_dpmm_map_mode_to
static int get_displays_with_fams_mask(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int latency_hiding_requirement_us)
{
(void)latency_hiding_requirement_us;
unsigned int i;
int displays_with_fams_mask = 0x0;

View File

@ -8,11 +8,13 @@
static bool dummy_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
(void)in_out;
return true;
}
static bool dummy_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out)
{
(void)in_out;
return true;
}

View File

@ -9,6 +9,7 @@
static bool dummy_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
{
(void)in_out;
return true;
}

View File

@ -0,0 +1,192 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2026 Advanced Micro Devices, Inc.
#include "dml2_pmo_dcn42.h"
#include "lib_float_math.h"
#include "dml2_debug.h"
#include "dml2_pmo_dcn4_fams2.h"
/*
* DCN42 PMO Policy Implementation
* This implementation provides VBlank-only strategies for 1, 2, 3, and 4 display
* configurations, ensuring p-state watermark support in the blank period only.
*/
static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_1_display[] = {
// VBlank only
{
.per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
};
static const int dcn42_strategy_list_1_display_size = sizeof(dcn42_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy);
static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_2_display[] = {
// VBlank only for both displays
{
.per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
};
static const int dcn42_strategy_list_2_display_size = sizeof(dcn42_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy);
static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_3_display[] = {
// VBlank only for all three displays
{
.per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = true,
},
};
static const int dcn42_strategy_list_3_display_size = sizeof(dcn42_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy);
static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_4_display[] = {
// VBlank only for all four displays
{
.per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = true,
},
};
static const int dcn42_strategy_list_4_display_size = sizeof(dcn42_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy);
bool pmo_dcn42_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
{
const struct dml2_pmo_scratch *s = &in_out->instance->scratch;
const int REQUIRED_RESERVED_TIME =
(int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
bool p_state_supported = true;
unsigned int stream_index;
if (in_out->base_display_config->display_config.overrides.all_streams_blanked)
return true;
if (s->pmo_dcn4.cur_pstate_candidate < 0)
return false;
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
if (dcn4_get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < REQUIRED_RESERVED_TIME ||
dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > 0) {
p_state_supported = false;
break;
}
} else {
p_state_supported = false;
break;
}
}
return p_state_supported;
}
bool pmo_dcn42_initialize(struct dml2_pmo_initialize_in_out *in_out)
{
int i = 0;
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int base_list_size = 0;
const struct dml2_pmo_pstate_strategy *base_list = NULL;
unsigned int *expanded_list_size = NULL;
struct dml2_pmo_pstate_strategy *expanded_list = NULL;
DML_LOG_COMP_IF_ENTER();
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
pmo->odm_combine_limit = 4;
pmo->mcg_clock_table_size = in_out->mcg_clock_table_size;
/*
* DCN42 does not support FAMS features like SubVP and DRR.
* These parameters are initialized to safe values but won't be used
* since our strategies only use VBlank.
*/
pmo->fams_params.v2.subvp.refresh_rate_limit_max = 0;
pmo->fams_params.v2.subvp.refresh_rate_limit_min = 0;
pmo->fams_params.v2.drr.refresh_rate_limit_max = 0;
pmo->fams_params.v2.drr.refresh_rate_limit_min = 0;
pmo->options = in_out->options;
/* Generate permutations of p-state configs from base strategy list */
for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
switch (i+1) {
case 1:
if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
base_list = pmo->options->override_strategy_lists[i];
base_list_size = pmo->options->num_override_strategies_per_list[i];
} else {
base_list = dcn42_strategy_list_1_display;
base_list_size = dcn42_strategy_list_1_display_size;
}
expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
break;
case 2:
if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
base_list = pmo->options->override_strategy_lists[i];
base_list_size = pmo->options->num_override_strategies_per_list[i];
} else {
base_list = dcn42_strategy_list_2_display;
base_list_size = dcn42_strategy_list_2_display_size;
}
expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
break;
case 3:
if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
base_list = pmo->options->override_strategy_lists[i];
base_list_size = pmo->options->num_override_strategies_per_list[i];
} else {
base_list = dcn42_strategy_list_3_display;
base_list_size = dcn42_strategy_list_3_display_size;
}
expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
break;
case 4:
if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
base_list = pmo->options->override_strategy_lists[i];
base_list_size = pmo->options->num_override_strategies_per_list[i];
} else {
base_list = dcn42_strategy_list_4_display;
base_list_size = dcn42_strategy_list_4_display_size;
}
expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
break;
}
DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/*
* Populate list using DCN4 FAMS2 expansion function.
* Since our strategies only contain VBlank methods, the expansion
* will not introduce any FAMS-specific logic.
*/
pmo_dcn4_fams2_expand_base_pstate_strategies(
base_list,
base_list_size,
i + 1,
expanded_list,
expanded_list_size);
}
DML_LOG_DEBUG("%s exit with true\n", __func__);
DML_LOG_COMP_IF_EXIT();
return true;
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2026 Advanced Micro Devices, Inc.
*/
#ifndef __DML2_PMO_DCN42_H__
#define __DML2_PMO_DCN42_H__
#include "dml2_internal_shared_types.h"
struct dml2_pmo_initialize_in_out;
struct dml2_pmo_test_for_pstate_support_in_out;
bool pmo_dcn42_initialize(struct dml2_pmo_initialize_in_out *in_out);
bool pmo_dcn42_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
#endif /* __DML2_PMO_DCN42_H__ */

View File

@ -428,6 +428,7 @@ static void insert_strategy_into_expanded_list(
struct dml2_pmo_pstate_strategy *expanded_strategy_list,
unsigned int *num_expanded_strategies)
{
(void)stream_count;
if (expanded_strategy_list && num_expanded_strategies) {
memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
@ -520,6 +521,7 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
const unsigned int stream_count)
{
(void)variant_strategy;
bool valid = true;
unsigned int i;
@ -1180,6 +1182,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
{
(void)stream_count;
scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy;
scratch->pmo_dcn4.num_pstate_candidates++;
}
@ -1659,7 +1662,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
return is_config_schedulable(pmo, display_cfg, pstate_strategy);
}
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
int dcn4_get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
unsigned int i;
int min_vactive_margin_us = 0xFFFFFFF;
@ -1847,6 +1850,7 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
struct display_configuation_with_meta *display_config,
int stream_index)
{
(void)display_config;
struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
@ -1903,7 +1907,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
// Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us))
if (dcn4_get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us))
set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
/* FAMS2 meta */
@ -1990,6 +1994,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
struct dml2_pmo_instance *pmo,
int plane_mask)
{
(void)pmo;
unsigned int plane_index;
struct dml2_plane_parameters *plane;
@ -2177,7 +2182,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
return success;
}
static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
int dcn4_get_minimum_reserved_time_us_for_planes(
const struct display_configuation_with_meta *display_config,
int plane_mask)
{
int min_time_us = 0xFFFFFF;
unsigned int plane_index = 0;
@ -2217,16 +2224,16 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
if (dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
} else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
if (dcn4_get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
REQUIRED_RESERVED_TIME ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
p_state_supported = false;
break;
}
@ -2238,7 +2245,7 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
}
} else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
p_state_supported = false;
break;
}

View File

@ -7,6 +7,16 @@
#include "dml2_internal_shared_types.h"
struct display_configuation_with_meta;
int dcn4_get_vactive_pstate_margin(
const struct display_configuation_with_meta *display_cfg,
int plane_mask);
int dcn4_get_minimum_reserved_time_us_for_planes(
const struct display_configuation_with_meta *display_config,
int plane_mask);
bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out);
bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);

View File

@ -9,16 +9,19 @@
static bool dummy_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
{
(void)in_out;
return false;
}
static bool dummy_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out)
{
(void)in_out;
return true;
}
static bool dummy_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out)
{
(void)in_out;
return false;
}

View File

@ -23,7 +23,7 @@ double math_mod(const double arg1, const double arg2)
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 - arg1 * ((int)(arg1 / arg2));
return arg1 - arg2 * ((int)(arg1 / arg2));
}
double math_min2(const double arg1, const double arg2)

View File

@ -17,6 +17,7 @@ static void setup_unoptimized_display_config_with_meta(const struct dml2_instanc
static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
{
(void)dml;
memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
out->stage1.min_clk_index_for_latency = 0;
}
@ -472,6 +473,7 @@ static unsigned int count_elements_in_span(int *array, unsigned int array_size,
static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
{
(void)h_active;
int i, slice_width;
const char MAX_SCL_VP_OVERLAP = 3;
bool success = false;

View File

@ -178,6 +178,10 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
static bool validate_pipe_assignment(const struct dml2_context *ctx, const struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, const struct dml2_dml_to_dc_pipe_mapping *mapping)
{
(void)ctx;
(void)disp_cfg;
(void)mapping;
(void)state;
// int i, j, k;
//
// unsigned int plane_id;
@ -292,6 +296,7 @@ static unsigned int find_last_resort_pipe_candidates(const struct dc_state *exis
const unsigned int stream_id,
unsigned int *last_resort_pipe_candidates)
{
(void)stream_id;
unsigned int num_last_resort_candidates = 0;
int i;
@ -541,6 +546,7 @@ static void add_odm_slice_to_odm_tree(struct dml2_context *ctx,
struct dc_pipe_mapping_scratch *scratch,
unsigned int odm_slice_index)
{
(void)ctx;
struct pipe_ctx *pipe = NULL;
int i;
@ -567,6 +573,8 @@ static struct pipe_ctx *add_plane_to_blend_tree(struct dml2_context *ctx,
unsigned int odm_slice,
struct pipe_ctx *top_pipe)
{
(void)ctx;
(void)plane;
int i;
for (i = 0; i < pipe_pool->num_pipes_assigned_to_plane_for_mpcc_combine; i++) {
@ -722,6 +730,7 @@ static void free_unused_pipes_for_plane(struct dml2_context *ctx, struct dc_stat
static void remove_pipes_from_blend_trees(struct dml2_context *ctx, struct dc_state *state, struct dc_plane_pipe_pool *pipe_pool, unsigned int odm_slice)
{
(void)ctx;
struct pipe_ctx *pipe;
int i;

View File

@ -33,6 +33,7 @@
void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
(void)in_dc;
switch (dml2->v20.dml_core_ctx.project) {
case dml_project_dcn32:
case dml_project_dcn321:
@ -244,6 +245,7 @@ void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, stru
void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out)
{
(void)in_dc;
out->dprefclk_mhz = dml2->config.bbox_overrides.dprefclk_mhz;
out->xtalclk_mhz = dml2->config.bbox_overrides.xtalclk_mhz;
out->pcierefclk_mhz = 100;
@ -328,6 +330,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out)
{
(void)in_dc;
struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch;
struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params;
int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0};
@ -782,6 +785,7 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st *
static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *out, unsigned int location,
const struct dc_stream_state *in, const struct pipe_ctx *pipe, struct dml2_context *dml2)
{
(void)pipe;
unsigned int output_bpc;
out->DSCEnable[location] = (enum dml_dsc_enable)in->timing.flags.DSC;
@ -1133,6 +1137,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
const struct dc_stream_state *stream, const struct dml_display_cfg_st *dml_dispcfg)
{
(void)dml_dispcfg;
int i = 0;
int location = -1;
@ -1173,6 +1178,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *conte
static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2, const struct dc_plane_state *plane,
const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index)
{
(void)dml_dispcfg;
unsigned int plane_id;
unsigned int i = 0;
unsigned int location = UINT_MAX;

View File

@ -465,6 +465,7 @@ void dml2_initialize_det_scratch(struct dml2_context *in_ctx)
static unsigned int find_planes_per_stream_and_stream_count(struct dml2_context *in_ctx, struct dml_display_cfg_st *dml_dispcfg, int *num_of_planes_per_stream)
{
(void)in_ctx;
unsigned int plane_index, stream_index = 0, num_of_streams;
for (plane_index = 0; plane_index < dml_dispcfg->num_surfaces; plane_index++) {

View File

@ -108,6 +108,17 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
return true;
}
void dml2_destroy(struct dml2_context *dml2)
{
if (!dml2)
return;
if (dml2->architecture == dml2_architecture_21)
dml21_destroy(dml2);
DC_RUN_WITH_PREEMPTION_ENABLED(vfree(dml2));
}
void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)

View File

@ -548,16 +548,6 @@ void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
}
}
void dml2_destroy(struct dml2_context *dml2)
{
if (!dml2)
return;
if (dml2->architecture == dml2_architecture_21)
dml21_destroy(dml2);
vfree(dml2);
}
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
unsigned int *fclk_change_support, unsigned int *dram_clk_change_support)
{

View File

@ -563,6 +563,7 @@ void dml_rq_dlg_get_dlg_reg(dml_display_dlg_regs_st *disp_dlg_regs,
void dml_rq_dlg_get_arb_params(struct display_mode_lib_st *mode_lib, dml_display_arb_params_st *arb_param)
{
(void)mode_lib;
memset(arb_param, 0, sizeof(*arb_param));
arb_param->max_req_outstanding = 256;
arb_param->min_req_outstanding = 256; // turn off the sat level feature if this set to max

View File

@ -680,9 +680,6 @@ static void get_dsc_enc_caps(
} else {
build_dsc_enc_caps(dsc, dsc_enc_caps);
}
if (dsc->ctx->dc->debug.native422_support)
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@ -1100,13 +1097,14 @@ static bool setup_dsc_config(
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps;
break;
case PIXEL_ENCODING_YCBCR422:
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
if (!is_dsc_possible) {
if (policy.ycbcr422_simple) {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422;
dsc_cfg->ycbcr422_simple = is_dsc_possible;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
} else {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
}
break;
case PIXEL_ENCODING_YCBCR420:
@ -1406,6 +1404,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
policy->min_target_bpp = 8;
/* DP specs limits to 3 x bpc */
policy->max_target_bpp = 3 * bpc;
policy->ycbcr422_simple = true;
break;
case PIXEL_ENCODING_YCBCR420:
/* DP specs limits to 6 */

View File

@ -100,7 +100,7 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;

View File

@ -128,7 +128,7 @@ void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int m
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;

View File

@ -78,7 +78,7 @@ static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsign
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;

View File

@ -781,10 +781,8 @@ static void restore_phy_clocks_for_destructive_link_verification(const struct dc
}
static void verify_link_capability_destructive(struct dc_link *link,
struct dc_sink *sink,
enum dc_detect_reason reason)
{
(void)sink;
bool should_prepare_phy_clocks =
should_prepare_phy_clocks_for_link_verification(link->dc, reason);
@ -857,11 +855,11 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
return destrictive;
}
static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
static void verify_link_capability(struct dc_link *link,
enum dc_detect_reason reason)
{
if (should_verify_link_capability_destructively(link, reason))
verify_link_capability_destructive(link, sink, reason);
verify_link_capability_destructive(link, reason);
else
verify_link_capability_non_destructive(link);
}
@ -1236,6 +1234,20 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (dc_is_hdmi_signal(link->connector_signal))
read_scdc_caps(link->ddc, link->local_sink);
/* When FreeSync is toggled through OSD,
* we see same EDID no matter what. Check MCCS caps
* to see if we should update FreeSync caps now.
*/
dm_helpers_read_mccs_caps(
link->ctx,
link,
sink);
if (prev_sink != NULL) {
if (memcmp(&sink->mccs_caps, &prev_sink->mccs_caps, sizeof(struct mccs_caps)))
same_edid = false;
}
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type ==
DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
@ -1455,8 +1467,9 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
verify_link_capability(link, link->local_sink, reason);
if (is_local_sink_detect_success && link->local_sink) {
verify_link_capability(link, reason);
}
DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
link->link_index, is_local_sink_detect_success, pre_link_type, link->type);

View File

@ -181,7 +181,8 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
/* link can be also enabled by vbios. In this case it is not recorded
* in pipe_ctx. Disable link phy here to make sure it is completely off
*/
dp_disable_link_phy(link, &link_res, link->connector_signal);
if (dc_is_dp_signal(link->connector_signal))
dp_disable_link_phy(link, &link_res, link->connector_signal);
}
void link_resume(struct dc_link *link)

View File

@ -743,8 +743,6 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
{
struct dc_link_settings initial_link_setting = {
LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
initial_link_setting.link_rate = link->preferred_link_setting.link_rate;
struct dc_link_settings current_link_setting =
initial_link_setting;
uint32_t link_bw;
@ -752,6 +750,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
return false;
if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
initial_link_setting.link_rate = link->preferred_link_setting.link_rate;
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing

View File

@ -1394,6 +1394,13 @@ static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *pla
return dcn20_patch_unknown_plane_state(plane_state);
}
static void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
DC_FP_START();
dcn21_update_bw_bounding_box_fpu(dc, bw_params);
DC_FP_END();
}
static const struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,

View File

@ -1854,6 +1854,13 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
DC_FP_START();
dcn31_update_bw_bounding_box_fpu(dc, bw_params);
DC_FP_END();
}
static struct resource_funcs dcn31_res_pool_funcs = {
.destroy = dcn31_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@ -1996,6 +2003,8 @@ static bool dcn31_resource_construct(
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
dc->config.no_native422_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {

View File

@ -1849,6 +1849,13 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
DC_FP_START();
dcn315_update_bw_bounding_box_fpu(dc, bw_params);
DC_FP_END();
}
static struct resource_funcs dcn315_res_pool_funcs = {
.destroy = dcn315_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@ -1959,6 +1966,8 @@ static bool dcn315_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
dc->config.no_native422_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {

View File

@ -1725,6 +1725,13 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
DC_FP_START();
dcn316_update_bw_bounding_box_fpu(dc, bw_params);
DC_FP_END();
}
static struct resource_funcs dcn316_res_pool_funcs = {
.destroy = dcn316_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,

View File

@ -155,6 +155,10 @@ static void dcn42_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc
dcn42_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table, &soc_bb->vmin_limit,
dc->clk_mgr->bw_params);
}
if (dc->clk_mgr->bw_params->vram_type == Ddr5MemType) {
soc_bb->power_management_parameters = dcn42_ddr5_power_management_parameters;
}
}
static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)

View File

@ -36,6 +36,7 @@
#define DP_BRANCH_DEVICE_ID_006037 0x006037
#define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8
#define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD
#define DP_BRANCH_DEVICE_ID_001FF2 0x001FF2
#define DP_BRANCH_HW_REV_10 0x10
#define DP_BRANCH_HW_REV_20 0x20

View File

@ -153,7 +153,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
* round down the vtotal value to avoid stretching vblank over
* panel's vtotal boundary.
*/
v_total = div64_u64(div64_u64(((unsigned long long)(
v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000000);
} else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) {
@ -161,11 +161,11 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
* round up the vtotal value to prevent off-by-one error causing
* v_total_min to be below the panel's lower bound
*/
v_total = div64_u64(div64_u64(((unsigned long long)(
v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total) + (1000000 - 1), 1000000);
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total) + 500000, 1000000);
}
@ -196,11 +196,11 @@ static unsigned int calc_v_total_from_duration(
uint32_t h_total_up_scaled;
h_total_up_scaled = stream->timing.h_total * 10000;
v_total = div_u64((unsigned long long)duration_in_us
v_total = (unsigned int)div_u64((unsigned long long)duration_in_us
* stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
h_total_up_scaled); //ceiling for MMax and MMin for MVRR
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)(
duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
}
@ -232,22 +232,28 @@ static void update_v_total_for_static_ramp(
target_duration_in_us;
/* Calculate ratio between new and current frame duration with 3 digit */
unsigned int frame_duration_ratio = div64_u64(1000000,
uint64_t frame_duration_ratio_u64 = div64_u64(1000000,
(1000 + div64_u64(((unsigned long long)(
STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) *
current_duration_in_us),
1000000)));
ASSERT(frame_duration_ratio_u64 <= 0xFFFFFFFF);
unsigned int frame_duration_ratio = (unsigned int)frame_duration_ratio_u64;
/* Calculate delta between new and current frame duration in us */
unsigned int frame_duration_delta = div64_u64(((unsigned long long)(
uint64_t frame_duration_delta_u64 = div64_u64(((unsigned long long)(
current_duration_in_us) *
(1000 - frame_duration_ratio)), 1000);
ASSERT(frame_duration_delta_u64 <= 0xFFFFFFFF);
unsigned int frame_duration_delta = (unsigned int)frame_duration_delta_u64;
/* Adjust frame duration delta based on ratio between current and
* standard frame duration (frame duration at 60 Hz refresh rate).
*/
unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)(
uint64_t ramp_rate_interpolated_u64 = div64_u64(((unsigned long long)(
frame_duration_delta) * current_duration_in_us), 16666);
ASSERT(ramp_rate_interpolated_u64 <= 0xFFFFFFFF);
unsigned int ramp_rate_interpolated = (unsigned int)ramp_rate_interpolated_u64;
/* Going to a higher refresh rate (lower frame duration) */
if (ramp_direction_is_up) {
@ -277,7 +283,7 @@ static void update_v_total_for_static_ramp(
}
}
v_total = div64_u64(div64_u64(((unsigned long long)(
v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
@ -1058,8 +1064,12 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
else
in_out_vrr->fixed_refresh_in_uhz = 0;
refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) -
div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
{
uint64_t rr_tmp = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) -
div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
ASSERT(rr_tmp <= 0xFFFFFFFF);
refresh_range = (unsigned int)rr_tmp;
}
in_out_vrr->supported = true;
}

View File

@ -250,10 +250,12 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
table->backlight_offsets[0] = params.backlight_lut_array[0];
ASSERT(params.backlight_lut_array[0] <= 0xFFFF);
table->backlight_offsets[0] = (uint16_t)params.backlight_lut_array[0];
table->backlight_thresholds[num_entries-1] = 0xFFFF;
ASSERT(params.backlight_lut_array[params.backlight_lut_array_size - 1] <= 0xFFFF);
table->backlight_offsets[num_entries-1] =
params.backlight_lut_array[params.backlight_lut_array_size - 1];
(uint16_t)params.backlight_lut_array[params.backlight_lut_array_size - 1];
/* Setup all brightness levels between 0% and 100% exclusive
* Fills brightness-to-backlight transform table. Backlight custom curve
@ -265,12 +267,17 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
*/
for (i = 1; i+1 < num_entries; i++) {
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
table->backlight_thresholds[i] =
cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] =
cpu_to_be16(params.backlight_lut_array[lut_index]);
unsigned int threshold_val = DIV_ROUNDUP((i * 65536), num_entries);
unsigned int offset_val = params.backlight_lut_array[lut_index];
ASSERT(threshold_val <= 0xFFFF);
ASSERT(offset_val <= 0xFFFF);
table->backlight_thresholds[i] = cpu_to_be16((uint16_t)threshold_val);
table->backlight_offsets[i] = cpu_to_be16((uint16_t)offset_val);
}
}
@ -282,10 +289,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
table->backlight_offsets[0] = params.backlight_lut_array[0];
ASSERT(params.backlight_lut_array[0] <= 0xFFFF);
table->backlight_offsets[0] = (uint16_t)params.backlight_lut_array[0];
table->backlight_thresholds[num_entries-1] = 0xFFFF;
ASSERT(params.backlight_lut_array[params.backlight_lut_array_size - 1] <= 0xFFFF);
table->backlight_offsets[num_entries-1] =
params.backlight_lut_array[params.backlight_lut_array_size - 1];
(uint16_t)params.backlight_lut_array[params.backlight_lut_array_size - 1];
/* Setup all brightness levels between 0% and 100% exclusive
* Fills brightness-to-backlight transform table. Backlight custom curve
@ -299,12 +308,16 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
lut_index = DIV_ROUNDUP((i * params.backlight_lut_array_size), num_entries);
ASSERT(lut_index < params.backlight_lut_array_size);
unsigned int threshold_val = DIV_ROUNDUP((i * 65536), num_entries);
unsigned int offset_val = params.backlight_lut_array[lut_index];
ASSERT(threshold_val <= 0xFFFF);
ASSERT(offset_val <= 0xFFFF);
table->backlight_thresholds[i] = (big_endian) ?
cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
cpu_to_be16((uint16_t)threshold_val) : cpu_to_le16((uint16_t)threshold_val);
table->backlight_offsets[i] = (big_endian) ?
cpu_to_be16(params.backlight_lut_array[lut_index]) :
cpu_to_le16(params.backlight_lut_array[lut_index]);
cpu_to_be16((uint16_t)offset_val) : cpu_to_le16((uint16_t)offset_val);
}
}
@ -740,9 +753,12 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
}
if (params.backlight_ramping_override) {
ASSERT(params.backlight_ramping_reduction <= 0xFFFF);
ASSERT(params.backlight_ramping_start <= 0xFFFF);
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
config.blRampReduction[i] = params.backlight_ramping_reduction;
config.blRampStart[i] = params.backlight_ramping_start;
config.blRampReduction[i] = (uint16_t)params.backlight_ramping_reduction;
config.blRampStart[i] = (uint16_t)params.backlight_ramping_start;
}
} else {
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
@ -1060,6 +1076,7 @@ void calculate_replay_link_off_frame_count(struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
{
unsigned int data_points_size;
uint64_t caps_size;
if (config_no >= ARRAY_SIZE(custom_backlight_profiles))
return false;
@ -1067,7 +1084,9 @@ bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_back
data_points_size = custom_backlight_profiles[config_no].num_data_points
* sizeof(custom_backlight_profiles[config_no].data_points[0]);
caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size;
caps_size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size;
ASSERT(caps_size <= 0xFFFF);
caps->size = (uint16_t)caps_size;
caps->flags = 0;
caps->error_code = 0;
caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage;

View File

@ -57,7 +57,10 @@ static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned in
static void evict_vmids(struct core_vmid *core_vmid)
{
int i;
uint16_t ord = dc_get_vmid_use_vector(core_vmid->dc);
int ord_int = dc_get_vmid_use_vector(core_vmid->dc);
ASSERT(ord_int >= 0 && ord_int <= 0xFFFF);
uint16_t ord = (uint16_t)ord_int;
// At this point any positions with value 0 are unused vmids, evict them
for (i = 1; i < core_vmid->num_vmid; i++) {
@ -120,7 +123,8 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
ASSERT(0);
}
return vmid;
ASSERT(vmid >= 0 && vmid <= 0xFF);
return (uint8_t)vmid;
}
void mod_vmid_reset(struct mod_vmid *mod_vmid)

View File

@ -995,12 +995,15 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret)
return ret;
if (ret) {
size = ret;
goto out_pm_put;
}
if (size == 0)
size = sysfs_emit(buf, "\n");
out_pm_put:
amdgpu_pm_put_access(adev);
return size;
@ -3902,11 +3905,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, od_type, buf, &size);
if (ret)
return ret;
if (ret) {
size = ret;
goto out_pm_put;
}
if (size == 0)
size = sysfs_emit(buf, "\n");
out_pm_put:
amdgpu_pm_put_access(adev);
return size;

View File

@ -3062,9 +3062,6 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
smu7_set_private_data_based_on_pptable_v0(hwmgr);
}
if (result)
goto fail;
data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =

View File

@ -584,6 +584,7 @@ struct cmn2asic_mapping {
/* Message flags for smu_msg_args */
#define SMU_MSG_FLAG_ASYNC BIT(0) /* Async send - skip post-poll */
#define SMU_MSG_FLAG_LOCK_HELD BIT(1) /* Caller holds ctl->lock */
#define SMU_MSG_FLAG_FORCE_READ_ARG BIT(2) /* force read smu arg from pmfw */
/* smu_msg_ctl flags */
#define SMU_MSG_CTL_DEBUG_MAILBOX BIT(0) /* Debug mailbox supported */

View File

@ -1846,6 +1846,7 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
amdgpu_device_load_pci_state(adev->pdev);
dev_dbg(adev->dev, "wait for reset ack\n");
ret = -ETIME;
while (ret == -ETIME && timeout) {
ret = smu_msg_wait_response(ctl, 0);
/* Wait a bit more time for getting ACK */
@ -1855,7 +1856,7 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
continue;
}
if (ret != 1) {
if (ret != 0) {
dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
SMU_RESET_MODE_2, ret);
goto out;
@ -1865,10 +1866,9 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
} else {
dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
smu->smc_fw_version);
ret = -EOPNOTSUPP;
}
if (ret == 1)
ret = 0;
out:
mutex_unlock(&ctl->lock);

View File

@ -2214,17 +2214,61 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
od_table->OverDriveTable.UclkFmax);
}
#define OD_ERROR_MSG_MAP(msg) \
[msg] = #msg
static const char *od_error_message[] = {
OD_ERROR_MSG_MAP(OD_REQUEST_ADVANCED_NOT_SUPPORTED),
OD_ERROR_MSG_MAP(OD_UNSUPPORTED_FEATURE),
OD_ERROR_MSG_MAP(OD_INVALID_FEATURE_COMBO_ERROR),
OD_ERROR_MSG_MAP(OD_GFXCLK_VF_CURVE_OFFSET_ERROR),
OD_ERROR_MSG_MAP(OD_VDD_GFX_VMAX_ERROR),
OD_ERROR_MSG_MAP(OD_VDD_SOC_VMAX_ERROR),
OD_ERROR_MSG_MAP(OD_PPT_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_MIN_PWM_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_ACOUSTIC_TARGET_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_ACOUSTIC_LIMIT_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_TARGET_TEMP_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_ZERO_RPM_STOP_TEMP_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_CURVE_PWM_ERROR),
OD_ERROR_MSG_MAP(OD_FAN_CURVE_TEMP_ERROR),
OD_ERROR_MSG_MAP(OD_FULL_CTRL_GFXCLK_ERROR),
OD_ERROR_MSG_MAP(OD_FULL_CTRL_UCLK_ERROR),
OD_ERROR_MSG_MAP(OD_FULL_CTRL_FCLK_ERROR),
OD_ERROR_MSG_MAP(OD_FULL_CTRL_VDD_GFX_ERROR),
OD_ERROR_MSG_MAP(OD_FULL_CTRL_VDD_SOC_ERROR),
OD_ERROR_MSG_MAP(OD_TDC_ERROR),
OD_ERROR_MSG_MAP(OD_GFXCLK_ERROR),
OD_ERROR_MSG_MAP(OD_UCLK_ERROR),
OD_ERROR_MSG_MAP(OD_FCLK_ERROR),
OD_ERROR_MSG_MAP(OD_OP_TEMP_ERROR),
OD_ERROR_MSG_MAP(OD_OP_GFX_EDC_ERROR),
OD_ERROR_MSG_MAP(OD_OP_GFX_PCC_ERROR),
OD_ERROR_MSG_MAP(OD_POWER_FEATURE_CTRL_ERROR),
};
static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
OverDriveTableExternal_t *od_table)
{
int ret;
ret = smu_cmn_update_table(smu,
SMU_TABLE_OVERDRIVE,
0,
(void *)od_table,
true);
if (ret)
dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
uint32_t read_arg = 0;
int ret, od_error_type;
ret = smu_cmn_update_table_read_arg(smu,
SMU_TABLE_OVERDRIVE,
0,
(void *)od_table,
&read_arg,
true);
if (ret) {
dev_err(smu->adev->dev, "Failed to upload overdrive table, ret:%d\n", ret);
if ((read_arg & 0xff) == TABLE_TRANSFER_FAILED) {
od_error_type = read_arg >> 16;
dev_err(smu->adev->dev, "Invalid overdrive table content: %s (%d)\n",
od_error_type < ARRAY_SIZE(od_error_message) ?
od_error_message[od_error_type] : "unknown",
od_error_type);
}
}
return ret;
}
@ -2374,6 +2418,7 @@ static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long inp
}
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_LEGACY_BIT);
break;
case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
od_table->OverDriveTable.FanZeroRpmEnable =
@ -2402,7 +2447,8 @@ static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long inp
od_table->OverDriveTable.FanMinimumPwm =
boot_overdrive_table->OverDriveTable.FanMinimumPwm;
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_LEGACY_BIT);
od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
default:
dev_info(adev->dev, "Invalid table index: %ld\n", input);
@ -2572,6 +2618,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_LEGACY_BIT);
break;
case PP_OD_EDIT_ACOUSTIC_LIMIT:
@ -2641,7 +2688,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_EDIT_FAN_MINIMUM_PWM:
if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_LEGACY_BIT)) {
dev_warn(adev->dev, "Fan curve setting not supported!\n");
return -ENOTSUPP;
}
@ -2659,7 +2706,8 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
od_table->OverDriveTable.FanMinimumPwm = input[0];
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_LEGACY_BIT);
od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:

View File

@ -496,7 +496,8 @@ static int smu_msg_v1_send_msg(struct smu_msg_ctl *ctl,
}
/* Read output args */
if (ret == 0 && args->num_out_args > 0) {
if ((ret == 0 || (args->flags & SMU_MSG_FLAG_FORCE_READ_ARG)) &&
args->num_out_args > 0) {
__smu_msg_v1_read_out_args(ctl, args);
dev_dbg(adev->dev, "smu send message: %s(%d) resp : 0x%08x",
smu_get_message_name(smu, args->msg), index, reg);
@ -1060,20 +1061,24 @@ int smu_cmn_check_fw_version(struct smu_context *smu)
return 0;
}
int smu_cmn_update_table(struct smu_context *smu,
enum smu_table_id table_index,
int argument,
void *table_data,
bool drv2smu)
int smu_cmn_update_table_read_arg(struct smu_context *smu,
enum smu_table_id table_index,
int argument,
void *table_data,
uint32_t *read_arg,
bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct smu_msg_args args;
int table_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_TABLE,
table_index);
uint32_t table_size;
int ret = 0;
if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
@ -1088,11 +1093,19 @@ int smu_cmn_update_table(struct smu_context *smu,
amdgpu_hdp_flush(adev, NULL);
}
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
table_id | ((argument & 0xFFFF) << 16),
NULL);
args.msg = drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram;
args.args[0] = ((argument & 0xFFFF) << 16) | (table_id & 0xffff);
args.num_args = 1;
args.out_args[0] = 0;
args.num_out_args = read_arg ? 1 : 0;
args.flags = read_arg ? SMU_MSG_FLAG_FORCE_READ_ARG : 0;
args.timeout = 0;
ret = ctl->ops->send_msg(ctl, &args);
if (read_arg)
*read_arg = args.out_args[0];
if (ret)
return ret;

View File

@ -102,6 +102,9 @@ int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,
#define SMU_DPM_PCIE_GEN_IDX(gen) smu_cmn_dpm_pcie_gen_idx((gen))
#define SMU_DPM_PCIE_WIDTH_IDX(width) smu_cmn_dpm_pcie_width_idx((width))
#define smu_cmn_update_table(smu, table_index, argument, table_data, drv2smu) \
smu_cmn_update_table_read_arg((smu), (table_index), (argument), (table_data), NULL, (drv2smu))
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
@ -168,11 +171,12 @@ int smu_cmn_get_smc_version(struct smu_context *smu,
uint32_t *if_version,
uint32_t *smu_version);
int smu_cmn_update_table(struct smu_context *smu,
enum smu_table_id table_index,
int argument,
void *table_data,
bool drv2smu);
int smu_cmn_update_table_read_arg(struct smu_context *smu,
enum smu_table_id table_index,
int argument,
void *table_data,
uint32_t *read_arg,
bool drv2smu);
int smu_cmn_vram_cpy(struct smu_context *smu, void *dst,
const void *src, size_t len);

View File

@ -517,14 +517,9 @@ int amdgpu_virt_ras_hw_fini(struct amdgpu_device *adev)
(struct amdgpu_virt_ras_cmd *)ras_mgr->virt_ras_cmd;
struct vram_blocks_ecc *blks_ecc = &virt_ras->blocks_ecc;
if (blks_ecc->shared_mem.cpu_addr) {
__set_cmd_auto_update(adev,
RAS_CMD__GET_ALL_BLOCK_ECC_STATUS,
blks_ecc->shared_mem.gpa,
blks_ecc->shared_mem.size, false);
if (blks_ecc->shared_mem.cpu_addr)
memset(blks_ecc->shared_mem.cpu_addr, 0, blks_ecc->shared_mem.size);
}
memset(blks_ecc, 0, sizeof(*blks_ecc));
return 0;

View File

@ -2981,7 +2981,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
return ret;
do {
bool cursor_in_su_area;
bool cursor_in_su_area = false;
/*
* Adjust su area to cover cursor fully as necessary

View File

@ -513,7 +513,7 @@ static void rcar_du_cmm_setup(struct drm_crtc *crtc)
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_cmm_config cmm_config = {};
if (!rcrtc->cmm->dev)
if (!rcrtc->cmm)
return;
if (drm_lut)
@ -667,7 +667,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
if (rcrtc->cmm->dev)
if (rcrtc->cmm)
rcar_cmm_disable(rcrtc->cmm->dev);
/*
@ -726,7 +726,7 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
struct rcar_du_device *rcdu = rcrtc->dev;
if (rcrtc->cmm->dev)
if (rcrtc->cmm)
rcar_cmm_enable(rcrtc->cmm->dev);
rcar_du_crtc_get(rcrtc);