From aed9a1a4f7106ff99a882ad06318cebfa71016a2 Mon Sep 17 00:00:00 2001 From: Mohamed Ahmed Date: Thu, 9 May 2024 23:43:52 +0300 Subject: [PATCH 1/4] drm/nouveau: use tile_mode and pte_kind for VM_BIND bo allocations Allow PTE kind and tile mode on BO create with VM_BIND, and add a GETPARAM to indicate this change. This is needed to support modifiers in NVK and ensure correctness when dealing with the nouveau GL driver. The userspace modifiers implementation this is for can be found here: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24795 Fixes: b88baab82871 ("drm/nouveau: implement new VM_BIND uAPI") Signed-off-by: Mohamed Ahmed Reviewed-by: Faith Ekstrand Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20240509204352.7597-1-mohamedahmedegypt2001@gmail.com --- drivers/gpu/drm/nouveau/nouveau_abi16.c | 3 ++ drivers/gpu/drm/nouveau/nouveau_bo.c | 44 +++++++++++-------------- include/uapi/drm/nouveau_drm.h | 7 ++++ 3 files changed, 29 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 80f74ee0fc78..47e53e17b4e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); break; } + case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE: + getparam->value = 1; + break; default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db8cbf615112..186add400ea5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, } nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); - if (!nouveau_cli_uvmm(cli) || internal) { - /* for BO noVM allocs, don't assign kinds */ - if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { - nvbo->kind = (tile_flags & 0x0000ff00) >> 8; - if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); - } - nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; - } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - nvbo->kind = (tile_flags & 0x00007f00) >> 8; - nvbo->comp = (tile_flags & 0x00030000) >> 16; - if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); - } - } else { - nvbo->zeta = (tile_flags & 0x00000007); + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { + nvbo->kind = (tile_flags & 0x0000ff00) >> 8; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); } - nvbo->mode = tile_mode; + nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; + } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + nvbo->kind = (tile_flags & 0x00007f00) >> 8; + nvbo->comp = (tile_flags & 0x00030000) >> 16; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + } else { + nvbo->zeta = (tile_flags & 0x00000007); + } + nvbo->mode = tile_mode; + + if (!nouveau_cli_uvmm(cli) || internal) { /* Determine the desirable target GPU page size for the buffer. */ for (i = 0; i < vmm->page_nr; i++) { /* Because we cannot currently allow VMM maps to fail @@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, } nvbo->page = vmm->page[pi].shift; } else { - /* reject other tile flags when in VM mode. */ - if (tile_mode) - return ERR_PTR(-EINVAL); - if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG) - return ERR_PTR(-EINVAL); - /* Determine the desirable target GPU page size for the buffer. */ for (i = 0; i < vmm->page_nr; i++) { /* Because we cannot currently allow VMM maps to fail diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index cd84227f1b42..5402f77ee859 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -68,6 +68,13 @@ extern "C" { */ #define NOUVEAU_GETPARAM_VRAM_USED 19 +/* + * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE + * + * Query whether tile mode and PTE kind are accepted with VM allocs or not. + */ +#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20 + struct drm_nouveau_getparam { __u64 param; __u64 value; From 117bbc0e43adc6f76a3fc39a98f75a811a853459 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Feb 2024 10:51:13 +0000 Subject: [PATCH 2/4] drm/buddy: stop using PAGE_SIZE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The drm_buddy minimum page-size requirements should be distinct from the CPU PAGE_SIZE. Only restriction is that the minimum page-size is at least 4K. Signed-off-by: Matthew Auld Cc: Arunpravin Paneer Selvam Cc: Christian König Cc: Arnd Bergmann Reviewed-by: Arunpravin Paneer Selvam Acked-by: Arnd Bergmann Link: https://patchwork.freedesktop.org/patch/msgid/20240229105112.250077-3-matthew.auld@intel.com Signed-off-by: Christian König --- drivers/gpu/drm/drm_buddy.c | 2 +- include/drm/drm_buddy.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 5ebdd6f8f36e..f999568d69c1 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -102,7 +102,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) if (size < chunk_size) return -EINVAL; - if (chunk_size < PAGE_SIZE) + if (chunk_size < SZ_4K) return -EINVAL; if (!is_power_of_2(chunk_size)) diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index a5b39fc01003..19ed661a32f3 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -53,8 +53,8 @@ struct drm_buddy_block { struct list_head tmp_link; }; -/* Order-zero must be at least PAGE_SIZE */ -#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) +/* Order-zero must be at least SZ_4K */ +#define DRM_BUDDY_MAX_ORDER (63 - 12) /* * Binary Buddy System. @@ -82,7 +82,7 @@ struct drm_buddy { unsigned int n_roots; unsigned int max_order; - /* Must be at least PAGE_SIZE */ + /* Must be at least SZ_4K */ u64 chunk_size; u64 size; u64 avail; From 520fb7f183e9b4d0ad7a2f084f3c4987845425e2 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Feb 2024 10:51:14 +0000 Subject: [PATCH 3/4] drm/tests/buddy: stop using PAGE_SIZE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gives the wrong impression that min page-size has to be tied to the CPU PAGE_SIZE. Signed-off-by: Matthew Auld Cc: Arunpravin Paneer Selvam Cc: Christian König Cc: Arnd Bergmann Reviewed-by: Arunpravin Paneer Selvam Link: https://patchwork.freedesktop.org/patch/msgid/20240229105112.250077-4-matthew.auld@intel.com Signed-off-by: Christian König --- drivers/gpu/drm/tests/drm_buddy_test.c | 42 +++++++++++++------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index e48863a44556..f2397696d252 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -329,8 +329,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) * Eventually we will have a fully 50% fragmented mm. */ - mm_size = PAGE_SIZE << max_order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE), + mm_size = SZ_4K << max_order; + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); @@ -344,7 +344,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) } for (order = top; order--;) { - size = get_size(order, PAGE_SIZE); + size = get_size(order, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), @@ -358,7 +358,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) } /* There should be one final page for this sub-allocation */ - size = get_size(0, PAGE_SIZE); + size = get_size(0, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM for hole\n"); @@ -368,7 +368,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) list_move_tail(&block->link, &holes); - size = get_size(top, PAGE_SIZE); + size = get_size(top, mm.chunk_size); KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", @@ -379,7 +379,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) /* Nothing larger than blocks of chunk_size now available */ for (order = 1; order <= max_order; order++) { - size = get_size(order, PAGE_SIZE); + size = get_size(order, mm.chunk_size); KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded at order %d, it should be full!", @@ -408,14 +408,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) * page left. */ - mm_size = PAGE_SIZE << max_order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE), + mm_size = SZ_4K << max_order; + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); for (order = 0; order < max_order; order++) { - size = get_size(order, PAGE_SIZE); + size = get_size(order, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d\n", @@ -428,7 +428,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) } /* And now the last remaining block available */ - size = get_size(0, PAGE_SIZE); + size = get_size(0, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM on final alloc\n"); @@ -440,7 +440,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) /* Should be completely full! */ for (order = max_order; order--;) { - size = get_size(order, PAGE_SIZE); + size = get_size(order, mm.chunk_size); KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded, it should be full!"); @@ -456,7 +456,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) list_del(&block->link); drm_buddy_free_block(&mm, block); - size = get_size(order, PAGE_SIZE); + size = get_size(order, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d\n", @@ -471,7 +471,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) } /* To confirm, now the whole mm should be available */ - size = get_size(max_order, PAGE_SIZE); + size = get_size(max_order, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc (realloc) hit -ENOMEM with order=%d\n", @@ -502,15 +502,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test) * try to allocate them all. */ - mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1); + mm_size = SZ_4K * ((1 << (max_order + 1)) - 1); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE), + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); for (order = 0; order <= max_order; order++) { - size = get_size(order, PAGE_SIZE); + size = get_size(order, mm.chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d\n", @@ -523,7 +523,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test) } /* Should be completely full! */ - size = get_size(0, PAGE_SIZE); + size = get_size(0, mm.chunk_size); KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded, it should be full!"); @@ -540,7 +540,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test) LIST_HEAD(allocated); struct drm_buddy mm; - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE)); + KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K)); KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER, "mm.max_order(%d) != %d\n", mm.max_order, @@ -548,7 +548,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test) size = mm.chunk_size << mm.max_order; KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size, - PAGE_SIZE, &allocated, flags)); + mm.chunk_size, &allocated, flags)); block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link); KUNIT_EXPECT_TRUE(test, block); @@ -558,10 +558,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test) drm_buddy_block_order(block), mm.max_order); KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block), - BIT_ULL(mm.max_order) * PAGE_SIZE, + BIT_ULL(mm.max_order) * mm.chunk_size, "block size(%llu) != %llu\n", drm_buddy_block_size(&mm, block), - BIT_ULL(mm.max_order) * PAGE_SIZE); + BIT_ULL(mm.max_order) * mm.chunk_size); drm_buddy_free_list(&mm, &allocated); drm_buddy_fini(&mm); From 39bc27bd688066a63e56f7f64ad34fae03fbe3b8 Mon Sep 17 00:00:00 2001 From: "Wachowski, Karol" Date: Mon, 20 May 2024 12:05:14 +0200 Subject: [PATCH 4/4] drm/shmem-helper: Fix BUG_ON() on mmap(PROT_WRITE, MAP_PRIVATE) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lack of check for copy-on-write (COW) mapping in drm_gem_shmem_mmap allows users to call mmap with PROT_WRITE and MAP_PRIVATE flag causing a kernel panic due to BUG_ON in vmf_insert_pfn_prot: BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); Return -EINVAL early if COW mapping is detected. This bug affects all drm drivers using default shmem helpers. It can be reproduced by this simple example: void *ptr = mmap(0, size, PROT_WRITE, MAP_PRIVATE, fd, mmap_offset); ptr[0] = 0; Fixes: 2194a63a818d ("drm: Add library for shmem backed GEM objects") Cc: Noralf Trønnes Cc: Eric Anholt Cc: Rob Herring Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Cc: dri-devel@lists.freedesktop.org Cc: # v5.2+ Signed-off-by: Wachowski, Karol Signed-off-by: Jacek Lawrynowicz Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20240520100514.925681-1-jacek.lawrynowicz@linux.intel.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index e435f986cd13..1ff0678be7c7 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -610,6 +610,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return ret; } + if (is_cow_mapping(vma->vm_flags)) + return -EINVAL; + dma_resv_lock(shmem->base.resv, NULL); ret = drm_gem_shmem_get_pages(shmem); dma_resv_unlock(shmem->base.resv);