]> Gentwo Git Trees - linux/.git/commitdiff
drm/amdgpu: avoid memory allocation in the critical code path v3
authorChristian König <christian.koenig@amd.com>
Wed, 29 Oct 2025 14:36:32 +0000 (15:36 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 14 Nov 2025 16:27:46 +0000 (11:27 -0500)
When we run out of VMIDs we need to wait for some to become available.
Previously we were using a dma_fence_array for that, but this means that
we have to allocate memory.

Instead just wait for the first not signaled fence from the least recently
used VMID to signal. That is not as efficient since we end up in this
function multiple times again, but allocating memory can easily fail or
deadlock if we have to wait for memory to become available.

v2: remove now unused VM manager fields
v3: fix dma_fence reference

Signed-off-by: Christian König <christian.koenig@amd.com>
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4258
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index 3ef5bc95642cab58d882f109badaa5ddc8368be9..b2af2cc6826c440cd42eb4c3ade43d0fb46679cf 100644 (file)
@@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->vm_hub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-       struct dma_fence **fences;
-       unsigned i;
 
+       /* If anybody is waiting for a VMID let everybody wait for fairness */
        if (!dma_fence_is_signaled(ring->vmid_wait)) {
                *fence = dma_fence_get(ring->vmid_wait);
                return 0;
        }
 
-       fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
-       if (!fences)
-               return -ENOMEM;
-
        /* Check if we have an idle VMID */
-       i = 0;
-       list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+       list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
                /* Don't use per engine and per process VMID at the same time */
                struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
                        NULL : ring;
 
-               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
-               if (!fences[i])
-                       break;
-               ++i;
+               *fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
+               if (!(*fence))
+                       return 0;
        }
 
-       /* If we can't find a idle VMID to use, wait till one becomes available */
-       if (&(*idle)->list == &id_mgr->ids_lru) {
-               u64 fence_context = adev->vm_manager.fence_context + ring->idx;
-               unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
-               struct dma_fence_array *array;
-               unsigned j;
-
-               *idle = NULL;
-               for (j = 0; j < i; ++j)
-                       dma_fence_get(fences[j]);
-
-               array = dma_fence_array_create(i, fences, fence_context,
-                                              seqno, true);
-               if (!array) {
-                       for (j = 0; j < i; ++j)
-                               dma_fence_put(fences[j]);
-                       kfree(fences);
-                       return -ENOMEM;
-               }
-
-               *fence = dma_fence_get(&array->base);
-               dma_fence_put(ring->vmid_wait);
-               ring->vmid_wait = &array->base;
-               return 0;
-       }
-       kfree(fences);
+       /*
+        * If we can't find a idle VMID to use, wait on a fence from the least
+        * recently used in the hope that it will be available soon.
+        */
+       *idle = NULL;
+       dma_fence_put(ring->vmid_wait);
+       ring->vmid_wait = dma_fence_get(*fence);
 
+       /* This is the reference we return */
+       dma_fence_get(*fence);
        return 0;
 }
 
index 700b4a776532caa1f31985b5bb89ae03440f07fe..7fc081e88b6a9e4f72eb878b62143013d046587e 100644 (file)
@@ -2843,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  */
 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 {
-       unsigned i;
-
        /* Concurrent flushes are only possible starting with Vega10 and
         * are broken on Navi10 and Navi14.
         */
@@ -2853,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
                                              adev->asic_type == CHIP_NAVI14);
        amdgpu_vmid_mgr_init(adev);
 
-       adev->vm_manager.fence_context =
-               dma_fence_context_alloc(AMDGPU_MAX_RINGS);
-       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-               adev->vm_manager.seqno[i] = 0;
-
        spin_lock_init(&adev->vm_manager.prt_lock);
        atomic_set(&adev->vm_manager.num_prt_users, 0);
 
index cf0ec94e8a07547e6795baf19829161fe48ab0d5..15d757c016cbbce14c4284df4bc15ee1228e50cb 100644 (file)
@@ -453,10 +453,6 @@ struct amdgpu_vm_manager {
        unsigned int                            first_kfd_vmid;
        bool                                    concurrent_flush;
 
-       /* Handling of VM fences */
-       u64                                     fence_context;
-       unsigned                                seqno[AMDGPU_MAX_RINGS];
-
        uint64_t                                max_pfn;
        uint32_t                                num_level;
        uint32_t                                block_size;