From: Dave Airlie Date: Thu, 20 Nov 2025 22:55:08 +0000 (+1000) Subject: Merge tag 'v6.18-rc6' into drm-next X-Git-Url: https://gentwo.org/gitweb/?a=commitdiff_plain;h=ce0478b02ed29465c1de3d03c3eea721355f9d2d;p=linux%2F.git Merge tag 'v6.18-rc6' into drm-next Linux 6.18-rc6 Backmerge in order to merge msm next Signed-off-by: Dave Airlie --- ce0478b02ed29465c1de3d03c3eea721355f9d2d diff --cc drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index c1461317eb29,ed3bef1edfe4..e22cfa7c6d32 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@@ -81,17 -81,16 +81,29 @@@ static int amdgpu_dma_buf_attach(struc struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; + + /* + * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. + * Such buffers cannot be safely accessed over P2P due to device-local + * compression metadata. Fallback to system-memory path instead. + * Device supports GFX12 (GC 12.x or newer) + * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag + * + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && ++ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) ++ attach->peer2peer = false; + + /* + * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. + * Such buffers cannot be safely accessed over P2P due to device-local + * compression metadata. Fallback to system-memory path instead. + * Device supports GFX12 (GC 12.x or newer) + * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag + * + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) attach->peer2peer = false; diff --cc drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 5e2813e9ae2f,62a39204fe0b..c21eb34b8cc3 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@@ -51,31 -51,7 +51,30 @@@ #define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) +/** + * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders + * + * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7, + * although not every GPU may have that many. + */ #define MAX_DIG_LINK_ENCODERS 7 + +/** + * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders + * + * Analog encoders are ENGINE_ID_DACA/B, there are at most 2, + * although not every GPU may have that many. Modern GPUs typically + * don't have analog encoders. + */ +#define MAX_DAC_LINK_ENCODERS 2 + +/** + * define MAX_LINK_ENCODERS - maximum number link encoders in total + * + * This includes both analog and digital encoders. + */ +#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS) + - #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 #define MAX_HPO_DP2_LINK_ENCODERS 4 diff --cc drivers/gpu/drm/xe/xe_exec_queue.c index 8724f8de67e2,cb5f204c08ed..12adfc3a0547 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@@ -387,10 -343,7 +387,13 @@@ void xe_exec_queue_destroy(struct kref { struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); struct xe_exec_queue *eq, *next; + int i; + + xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0); + ++ if (q->ufence_syncobj) ++ drm_syncobj_put(q->ufence_syncobj); + if (q->ufence_syncobj) drm_syncobj_put(q->ufence_syncobj); diff --cc drivers/gpu/drm/xe/xe_gt.c index 6d479948bf21,6d3db5e55d98..dbb5e7a9bc6a --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@@ -875,13 -872,21 +875,12 @@@ static void gt_reset_worker(struct work err_out: xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); + err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); - xe_device_declare_wedged(gt_to_xe(gt)); - err_pm_put: xe_pm_runtime_put(gt_to_xe(gt)); - - return err; -} - -static void gt_reset_worker(struct work_struct *w) -{ - struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker); - - gt_reset(gt); } void xe_gt_reset_async(struct xe_gt *gt)