mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
Merge tag 'drm-misc-next-2022-11-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 6.2: Cross-subsystem Changes: - fbdev: Make fb_modesetting_disabled() static - udmabuf: Add vmap and vunmap methods to udmabuf_ops Core Changes: - doc: make drm-uapi igt-tests more readable - fb-helper: Revert of the damage worker removal - fourcc: Add missing big-endian XRGB1555 and RGB565 formats - gem-shmem: Fix for resource leakage in __drm_gem_shmem_create() - scheduler: Fix lockup in drm_sched_entity_kill() Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20221124074615.ahflw5q5ktfdsr7k@houat
This commit is contained in:
@@ -402,19 +402,19 @@ It's possible to run the IGT-tests in a VM in two ways:
|
|||||||
1. Use IGT inside a VM
|
1. Use IGT inside a VM
|
||||||
2. Use IGT from the host machine and write the results in a shared directory.
|
2. Use IGT from the host machine and write the results in a shared directory.
|
||||||
|
|
||||||
As follow, there is an example of using a VM with a shared directory with
|
Following is an example of using a VM with a shared directory with
|
||||||
the host machine to run igt-tests. As an example it's used virtme::
|
the host machine to run igt-tests. This example uses virtme::
|
||||||
|
|
||||||
$ virtme-run --rwdir /path/for/shared_dir --kdir=path/for/kernel/directory --mods=auto
|
$ virtme-run --rwdir /path/for/shared_dir --kdir=path/for/kernel/directory --mods=auto
|
||||||
|
|
||||||
Run the igt-tests in the guest machine, as example it's ran the 'kms_flip'
|
Run the igt-tests in the guest machine. This example runs the 'kms_flip'
|
||||||
tests::
|
tests::
|
||||||
|
|
||||||
$ /path/for/igt-gpu-tools/scripts/run-tests.sh -p -s -t "kms_flip.*" -v
|
$ /path/for/igt-gpu-tools/scripts/run-tests.sh -p -s -t "kms_flip.*" -v
|
||||||
|
|
||||||
In this example, instead of build the igt_runner, Piglit is used
|
In this example, instead of building the igt_runner, Piglit is used
|
||||||
(-p option); it's created html summary of the tests results and it's saved
|
(-p option). It creates an HTML summary of the test results and saves
|
||||||
in the folder "igt-gpu-tools/results"; it's executed only the igt-tests
|
them in the folder "igt-gpu-tools/results". It executes only the igt-tests
|
||||||
matching the -t option.
|
matching the -t option.
|
||||||
|
|
||||||
Display CRC Support
|
Display CRC Support
|
||||||
|
@@ -13,6 +13,8 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/udmabuf.h>
|
#include <linux/udmabuf.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/iosys-map.h>
|
||||||
|
|
||||||
static int list_limit = 1024;
|
static int list_limit = 1024;
|
||||||
module_param(list_limit, int, 0644);
|
module_param(list_limit, int, 0644);
|
||||||
@@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
|
||||||
|
{
|
||||||
|
struct udmabuf *ubuf = buf->priv;
|
||||||
|
void *vaddr;
|
||||||
|
|
||||||
|
dma_resv_assert_held(buf->resv);
|
||||||
|
|
||||||
|
vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
|
||||||
|
if (!vaddr)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
iosys_map_set_vaddr(map, vaddr);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
|
||||||
|
{
|
||||||
|
struct udmabuf *ubuf = buf->priv;
|
||||||
|
|
||||||
|
dma_resv_assert_held(buf->resv);
|
||||||
|
|
||||||
|
vm_unmap_ram(map->vaddr, ubuf->pagecount);
|
||||||
|
}
|
||||||
|
|
||||||
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
|
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
@@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = {
|
|||||||
.unmap_dma_buf = unmap_udmabuf,
|
.unmap_dma_buf = unmap_udmabuf,
|
||||||
.release = release_udmabuf,
|
.release = release_udmabuf,
|
||||||
.mmap = mmap_udmabuf,
|
.mmap = mmap_udmabuf,
|
||||||
|
.vmap = vmap_udmabuf,
|
||||||
|
.vunmap = vunmap_udmabuf,
|
||||||
.begin_cpu_access = begin_cpu_udmabuf,
|
.begin_cpu_access = begin_cpu_udmabuf,
|
||||||
.end_cpu_access = end_cpu_udmabuf,
|
.end_cpu_access = end_cpu_udmabuf,
|
||||||
};
|
};
|
||||||
|
@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
|
|||||||
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
|
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
|
||||||
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
|
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
|
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
{ .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
|
{ .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
|
#endif
|
||||||
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
|
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
|
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
|
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
|
||||||
|
@@ -169,6 +169,20 @@ void drm_gem_private_object_init(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_private_object_init);
|
EXPORT_SYMBOL(drm_gem_private_object_init);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_gem_private_object_fini - Finalize a failed drm_gem_object
|
||||||
|
* @obj: drm_gem_object
|
||||||
|
*
|
||||||
|
* Uninitialize an already allocated GEM object when it initialized failed
|
||||||
|
*/
|
||||||
|
void drm_gem_private_object_fini(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
WARN_ON(obj->dma_buf);
|
||||||
|
|
||||||
|
dma_resv_fini(&obj->_resv);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_gem_private_object_fini);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_gem_object_handle_free - release resources bound to userspace handles
|
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||||||
* @obj: GEM object to clean up.
|
* @obj: GEM object to clean up.
|
||||||
@@ -930,12 +944,11 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
|
|||||||
void
|
void
|
||||||
drm_gem_object_release(struct drm_gem_object *obj)
|
drm_gem_object_release(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
WARN_ON(obj->dma_buf);
|
|
||||||
|
|
||||||
if (obj->filp)
|
if (obj->filp)
|
||||||
fput(obj->filp);
|
fput(obj->filp);
|
||||||
|
|
||||||
dma_resv_fini(&obj->_resv);
|
drm_gem_private_object_fini(obj);
|
||||||
|
|
||||||
drm_gem_free_mmap_offset(obj);
|
drm_gem_free_mmap_offset(obj);
|
||||||
drm_gem_lru_remove(obj);
|
drm_gem_lru_remove(obj);
|
||||||
}
|
}
|
||||||
|
@@ -79,8 +79,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
|
|||||||
} else {
|
} else {
|
||||||
ret = drm_gem_object_init(dev, obj, size);
|
ret = drm_gem_object_init(dev, obj, size);
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
drm_gem_private_object_fini(obj);
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
}
|
||||||
|
|
||||||
ret = drm_gem_create_mmap_offset(obj);
|
ret = drm_gem_create_mmap_offset(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@@ -399,6 +399,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
|
|||||||
*/
|
*/
|
||||||
int drmm_mode_config_init(struct drm_device *dev)
|
int drmm_mode_config_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
mutex_init(&dev->mode_config.mutex);
|
mutex_init(&dev->mode_config.mutex);
|
||||||
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
|
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
|
||||||
mutex_init(&dev->mode_config.idr_mutex);
|
mutex_init(&dev->mode_config.idr_mutex);
|
||||||
@@ -420,7 +422,11 @@ int drmm_mode_config_init(struct drm_device *dev)
|
|||||||
init_llist_head(&dev->mode_config.connector_free_list);
|
init_llist_head(&dev->mode_config.connector_free_list);
|
||||||
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
|
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
|
||||||
|
|
||||||
drm_mode_create_standard_properties(dev);
|
ret = drm_mode_create_standard_properties(dev);
|
||||||
|
if (ret) {
|
||||||
|
drm_mode_config_cleanup(dev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Just to be sure */
|
/* Just to be sure */
|
||||||
dev->mode_config.num_fb = 0;
|
dev->mode_config.num_fb = 0;
|
||||||
|
@@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
|
|||||||
init_completion(&entity->entity_idle);
|
init_completion(&entity->entity_idle);
|
||||||
|
|
||||||
/* We start in an idle state. */
|
/* We start in an idle state. */
|
||||||
complete(&entity->entity_idle);
|
complete_all(&entity->entity_idle);
|
||||||
|
|
||||||
spin_lock_init(&entity->rq_lock);
|
spin_lock_init(&entity->rq_lock);
|
||||||
spsc_queue_init(&entity->job_queue);
|
spsc_queue_init(&entity->job_queue);
|
||||||
|
@@ -987,7 +987,7 @@ static int drm_sched_main(void *param)
|
|||||||
sched_job = drm_sched_entity_pop_job(entity);
|
sched_job = drm_sched_entity_pop_job(entity);
|
||||||
|
|
||||||
if (!sched_job) {
|
if (!sched_job) {
|
||||||
complete(&entity->entity_idle);
|
complete_all(&entity->entity_idle);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -998,7 +998,7 @@ static int drm_sched_main(void *param)
|
|||||||
|
|
||||||
trace_drm_run_job(sched_job, entity);
|
trace_drm_run_job(sched_job, entity);
|
||||||
fence = sched->ops->run_job(sched_job);
|
fence = sched->ops->run_job(sched_job);
|
||||||
complete(&entity->entity_idle);
|
complete_all(&entity->entity_idle);
|
||||||
drm_sched_fence_scheduled(s_fence);
|
drm_sched_fence_scheduled(s_fence);
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(fence)) {
|
if (!IS_ERR_OR_NULL(fence)) {
|
||||||
|
@@ -405,6 +405,7 @@ int drm_gem_object_init(struct drm_device *dev,
|
|||||||
struct drm_gem_object *obj, size_t size);
|
struct drm_gem_object *obj, size_t size);
|
||||||
void drm_gem_private_object_init(struct drm_device *dev,
|
void drm_gem_private_object_init(struct drm_device *dev,
|
||||||
struct drm_gem_object *obj, size_t size);
|
struct drm_gem_object *obj, size_t size);
|
||||||
|
void drm_gem_private_object_fini(struct drm_gem_object *obj);
|
||||||
void drm_gem_vm_open(struct vm_area_struct *vma);
|
void drm_gem_vm_open(struct vm_area_struct *vma);
|
||||||
void drm_gem_vm_close(struct vm_area_struct *vma);
|
void drm_gem_vm_close(struct vm_area_struct *vma);
|
||||||
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||||
|
Reference in New Issue
Block a user