Merge tag 'tee-cleanup-for-5.7' of https://git.linaro.org/people/jens.wiklander/linux-tee into arm/drivers

Cleanup shared memory handing in TEE subsystem
The highlights are:
- Removing redundant or unused fields in struct tee_shm
- Only assign userspace shm IDs for shared memory objects originating from
  user space

* tag 'tee-cleanup-for-5.7' of https://git.linaro.org/people/jens.wiklander/linux-tee:
  tee: tee_shm_op_mmap(): use TEE_SHM_USER_MAPPED
  tee: remove redundant teedev in struct tee_shm
  tee: don't assign shm id for private shms
  tee: remove unused tee_shm_priv_alloc()
  tee: remove linked list of struct tee_shm

Link: https://lore.kernel.org/r/20200228140925.GA12393@jade
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann
2020-03-26 14:30:44 +01:00
4 changed files with 27 additions and 81 deletions

View File

@@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev)
kref_init(&ctx->refcount); kref_init(&ctx->refcount);
ctx->teedev = teedev; ctx->teedev = teedev;
INIT_LIST_HEAD(&ctx->list_shm);
rc = teedev->desc->ops->open(ctx); rc = teedev->desc->ops->open(ctx);
if (rc) if (rc)
goto err; goto err;

View File

@@ -37,7 +37,8 @@ struct tee_shm_pool {
* @num_users: number of active users of this device * @num_users: number of active users of this device
* @c_no_user: completion used when unregistering the device * @c_no_user: completion used when unregistering the device
* @mutex: mutex protecting @num_users and @idr * @mutex: mutex protecting @num_users and @idr
* @idr: register of shared memory object allocated on this device * @idr: register of user space shared memory objects allocated or
* registered on this device
* @pool: shared memory pool * @pool: shared memory pool
*/ */
struct tee_device { struct tee_device {

View File

@@ -13,13 +13,13 @@
static void tee_shm_release(struct tee_shm *shm) static void tee_shm_release(struct tee_shm *shm)
{ {
struct tee_device *teedev = shm->teedev; struct tee_device *teedev = shm->ctx->teedev;
mutex_lock(&teedev->mutex); if (shm->flags & TEE_SHM_DMA_BUF) {
idr_remove(&teedev->idr, shm->id); mutex_lock(&teedev->mutex);
if (shm->ctx) idr_remove(&teedev->idr, shm->id);
list_del(&shm->link); mutex_unlock(&teedev->mutex);
mutex_unlock(&teedev->mutex); }
if (shm->flags & TEE_SHM_POOL) { if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm; struct tee_shm_pool_mgr *poolm;
@@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm)
kfree(shm->pages); kfree(shm->pages);
} }
if (shm->ctx) teedev_ctx_put(shm->ctx);
teedev_ctx_put(shm->ctx);
kfree(shm); kfree(shm);
@@ -77,7 +76,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start; size_t size = vma->vm_end - vma->vm_start;
/* Refuse sharing shared memory provided by application */ /* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_REGISTER) if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL; return -EINVAL;
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
@@ -91,20 +90,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.mmap = tee_shm_op_mmap, .mmap = tee_shm_op_mmap,
}; };
static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
struct tee_device *teedev,
size_t size, u32 flags)
{ {
struct tee_device *teedev = ctx->teedev;
struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm; struct tee_shm *shm;
void *ret; void *ret;
int rc; int rc;
if (ctx && ctx->teedev != teedev) {
dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
return ERR_PTR(-EINVAL);
}
if (!(flags & TEE_SHM_MAPPED)) { if (!(flags & TEE_SHM_MAPPED)) {
dev_err(teedev->dev.parent, dev_err(teedev->dev.parent,
"only mapped allocations supported\n"); "only mapped allocations supported\n");
@@ -132,7 +125,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
} }
shm->flags = flags | TEE_SHM_POOL; shm->flags = flags | TEE_SHM_POOL;
shm->teedev = teedev;
shm->ctx = ctx; shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF) if (flags & TEE_SHM_DMA_BUF)
poolm = teedev->pool->dma_buf_mgr; poolm = teedev->pool->dma_buf_mgr;
@@ -145,17 +137,18 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
goto err_kfree; goto err_kfree;
} }
mutex_lock(&teedev->mutex);
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
if (shm->id < 0) {
ret = ERR_PTR(shm->id);
goto err_pool_free;
}
if (flags & TEE_SHM_DMA_BUF) { if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info); DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
mutex_lock(&teedev->mutex);
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
if (shm->id < 0) {
ret = ERR_PTR(shm->id);
goto err_pool_free;
}
exp_info.ops = &tee_shm_dma_buf_ops; exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size; exp_info.size = shm->size;
exp_info.flags = O_RDWR; exp_info.flags = O_RDWR;
@@ -168,18 +161,16 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
} }
} }
if (ctx) { if (ctx)
teedev_ctx_get(ctx); teedev_ctx_get(ctx);
mutex_lock(&teedev->mutex);
list_add_tail(&shm->link, &ctx->list_shm);
mutex_unlock(&teedev->mutex);
}
return shm; return shm;
err_rem: err_rem:
mutex_lock(&teedev->mutex); if (flags & TEE_SHM_DMA_BUF) {
idr_remove(&teedev->idr, shm->id); mutex_lock(&teedev->mutex);
mutex_unlock(&teedev->mutex); idr_remove(&teedev->idr, shm->id);
mutex_unlock(&teedev->mutex);
}
err_pool_free: err_pool_free:
poolm->ops->free(poolm, shm); poolm->ops->free(poolm, shm);
err_kfree: err_kfree:
@@ -188,31 +179,8 @@ err_dev_put:
tee_device_put(teedev); tee_device_put(teedev);
return ret; return ret;
} }
/**
* tee_shm_alloc() - Allocate shared memory
* @ctx: Context that allocates the shared memory
* @size: Requested size of shared memory
* @flags: Flags setting properties for the requested shared memory.
*
* Memory allocated as global shared memory is automatically freed when the
* TEE file pointer is closed. The @flags field uses the bits defined by
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
* associated with a dma-buf handle, else driver private memory.
*/
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
}
EXPORT_SYMBOL_GPL(tee_shm_alloc); EXPORT_SYMBOL_GPL(tee_shm_alloc);
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
{
return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
}
EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags) size_t length, u32 flags)
{ {
@@ -245,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
} }
shm->flags = flags | TEE_SHM_REGISTER; shm->flags = flags | TEE_SHM_REGISTER;
shm->teedev = teedev;
shm->ctx = ctx; shm->ctx = ctx;
shm->id = -1; shm->id = -1;
addr = untagged_addr(addr); addr = untagged_addr(addr);
@@ -301,10 +268,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
} }
} }
mutex_lock(&teedev->mutex);
list_add_tail(&shm->link, &ctx->list_shm);
mutex_unlock(&teedev->mutex);
return shm; return shm;
err: err:
if (shm) { if (shm) {

View File

@@ -49,7 +49,6 @@ struct tee_shm_pool;
*/ */
struct tee_context { struct tee_context {
struct tee_device *teedev; struct tee_device *teedev;
struct list_head list_shm;
void *data; void *data;
struct kref refcount; struct kref refcount;
bool releasing; bool releasing;
@@ -168,9 +167,7 @@ void tee_device_unregister(struct tee_device *teedev);
/** /**
* struct tee_shm - shared memory object * struct tee_shm - shared memory object
* @teedev: device used to allocate the object * @ctx: context using the object
* @ctx: context using the object, if NULL the context is gone
* @link link element
* @paddr: physical address of the shared memory * @paddr: physical address of the shared memory
* @kaddr: virtual address of the shared memory * @kaddr: virtual address of the shared memory
* @size: size of shared memory * @size: size of shared memory
@@ -185,9 +182,7 @@ void tee_device_unregister(struct tee_device *teedev);
* subsystem and from drivers that implements their own shm pool manager. * subsystem and from drivers that implements their own shm pool manager.
*/ */
struct tee_shm { struct tee_shm {
struct tee_device *teedev;
struct tee_context *ctx; struct tee_context *ctx;
struct list_head link;
phys_addr_t paddr; phys_addr_t paddr;
void *kaddr; void *kaddr;
size_t size; size_t size;
@@ -318,18 +313,6 @@ void *tee_get_drvdata(struct tee_device *teedev);
*/ */
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
/**
* tee_shm_priv_alloc() - Allocate shared memory privately
* @dev: Device that allocates the shared memory
* @size: Requested size of shared memory
*
* Allocates shared memory buffer that is not associated with any client
* context. Such buffers are owned by TEE driver and used for internal calls.
*
* @returns a pointer to 'struct tee_shm'
*/
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
/** /**
* tee_shm_register() - Register shared memory buffer * tee_shm_register() - Register shared memory buffer
* @ctx: Context that registers the shared memory * @ctx: Context that registers the shared memory