mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-12 16:55:21 +02:00
drm/msm: Add priv->mm_lock to protect active/inactive lists
Rather than relying on the big dev->struct_mutex hammer, introduce a more specific lock for protecting the bo lists. Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Jordan Crouse <jcrouse@codeaurora.org> Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com> Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
2a86efb1bf
commit
d984457b31
|
@ -112,6 +112,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct msm_gpu *gpu = priv->gpu;
|
struct msm_gpu *gpu = priv->gpu;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = mutex_lock_interruptible(&priv->mm_lock);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (gpu) {
|
if (gpu) {
|
||||||
seq_printf(m, "Active Objects (%s):\n", gpu->name);
|
seq_printf(m, "Active Objects (%s):\n", gpu->name);
|
||||||
|
@ -121,6 +126,8 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
|
||||||
seq_printf(m, "Inactive Objects:\n");
|
seq_printf(m, "Inactive Objects:\n");
|
||||||
msm_gem_describe_objects(&priv->inactive_list, m);
|
msm_gem_describe_objects(&priv->inactive_list, m);
|
||||||
|
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <uapi/linux/sched/types.h>
|
#include <uapi/linux/sched/types.h>
|
||||||
|
|
||||||
|
@ -441,6 +442,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||||
init_llist_head(&priv->free_list);
|
init_llist_head(&priv->free_list);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&priv->inactive_list);
|
INIT_LIST_HEAD(&priv->inactive_list);
|
||||||
|
mutex_init(&priv->mm_lock);
|
||||||
|
|
||||||
|
/* Teach lockdep about lock ordering wrt. shrinker: */
|
||||||
|
fs_reclaim_acquire(GFP_KERNEL);
|
||||||
|
might_lock(&priv->mm_lock);
|
||||||
|
fs_reclaim_release(GFP_KERNEL);
|
||||||
|
|
||||||
drm_mode_config_init(ddev);
|
drm_mode_config_init(ddev);
|
||||||
|
|
||||||
|
|
|
@ -174,8 +174,19 @@ struct msm_drm_private {
|
||||||
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
|
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
|
||||||
struct msm_perf_state *perf;
|
struct msm_perf_state *perf;
|
||||||
|
|
||||||
/* list of GEM objects: */
|
/*
|
||||||
|
* List of inactive GEM objects. Every bo is either in the inactive_list
|
||||||
|
* or gpu->active_list (for the gpu it is active on[1])
|
||||||
|
*
|
||||||
|
* These lists are protected by mm_lock. If struct_mutex is involved, it
|
||||||
|
* should be aquired prior to mm_lock. One should *not* hold mm_lock in
|
||||||
|
* get_pages()/vmap()/etc paths, as they can trigger the shrinker.
|
||||||
|
*
|
||||||
|
* [1] if someone ever added support for the old 2d cores, there could be
|
||||||
|
* more than one gpu object
|
||||||
|
*/
|
||||||
struct list_head inactive_list;
|
struct list_head inactive_list;
|
||||||
|
struct mutex mm_lock;
|
||||||
|
|
||||||
/* worker for delayed free of objects: */
|
/* worker for delayed free of objects: */
|
||||||
struct work_struct free_work;
|
struct work_struct free_work;
|
||||||
|
|
|
@ -767,13 +767,17 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||||
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
|
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
|
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
|
||||||
|
|
||||||
if (!atomic_fetch_inc(&msm_obj->active_count)) {
|
if (!atomic_fetch_inc(&msm_obj->active_count)) {
|
||||||
|
mutex_lock(&priv->mm_lock);
|
||||||
msm_obj->gpu = gpu;
|
msm_obj->gpu = gpu;
|
||||||
list_del_init(&msm_obj->mm_list);
|
list_del_init(&msm_obj->mm_list);
|
||||||
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
|
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -782,12 +786,14 @@ void msm_gem_active_put(struct drm_gem_object *obj)
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
might_sleep();
|
||||||
|
|
||||||
if (!atomic_dec_return(&msm_obj->active_count)) {
|
if (!atomic_dec_return(&msm_obj->active_count)) {
|
||||||
|
mutex_lock(&priv->mm_lock);
|
||||||
msm_obj->gpu = NULL;
|
msm_obj->gpu = NULL;
|
||||||
list_del_init(&msm_obj->mm_list);
|
list_del_init(&msm_obj->mm_list);
|
||||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -942,13 +948,16 @@ static void free_object(struct msm_gem_object *msm_obj)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *obj = &msm_obj->base;
|
struct drm_gem_object *obj = &msm_obj->base;
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||||
|
|
||||||
/* object should not be on active list: */
|
/* object should not be on active list: */
|
||||||
WARN_ON(is_active(msm_obj));
|
WARN_ON(is_active(msm_obj));
|
||||||
|
|
||||||
|
mutex_lock(&priv->mm_lock);
|
||||||
list_del(&msm_obj->mm_list);
|
list_del(&msm_obj->mm_list);
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
|
|
||||||
msm_gem_lock(obj);
|
msm_gem_lock(obj);
|
||||||
|
|
||||||
|
@ -1127,14 +1136,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||||
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
|
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (struct_mutex_locked) {
|
mutex_lock(&priv->mm_lock);
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
mutex_unlock(&priv->mm_lock);
|
||||||
} else {
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
|
|
||||||
|
@ -1202,9 +1206,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
|
|
||||||
msm_gem_unlock(obj);
|
msm_gem_unlock(obj);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&priv->mm_lock);
|
||||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&priv->mm_lock);
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,8 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
if (!msm_gem_shrinker_lock(dev, &unlock))
|
if (!msm_gem_shrinker_lock(dev, &unlock))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
mutex_lock(&priv->mm_lock);
|
||||||
|
|
||||||
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
||||||
if (!msm_gem_trylock(&msm_obj->base))
|
if (!msm_gem_trylock(&msm_obj->base))
|
||||||
continue;
|
continue;
|
||||||
|
@ -59,6 +61,8 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
msm_gem_unlock(&msm_obj->base);
|
msm_gem_unlock(&msm_obj->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
|
|
||||||
if (unlock)
|
if (unlock)
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
@ -78,6 +82,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
if (!msm_gem_shrinker_lock(dev, &unlock))
|
if (!msm_gem_shrinker_lock(dev, &unlock))
|
||||||
return SHRINK_STOP;
|
return SHRINK_STOP;
|
||||||
|
|
||||||
|
mutex_lock(&priv->mm_lock);
|
||||||
|
|
||||||
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
||||||
if (freed >= sc->nr_to_scan)
|
if (freed >= sc->nr_to_scan)
|
||||||
break;
|
break;
|
||||||
|
@ -90,6 +96,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
msm_gem_unlock(&msm_obj->base);
|
msm_gem_unlock(&msm_obj->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
|
|
||||||
if (unlock)
|
if (unlock)
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
@ -112,6 +120,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||||
if (!msm_gem_shrinker_lock(dev, &unlock))
|
if (!msm_gem_shrinker_lock(dev, &unlock))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
mutex_lock(&priv->mm_lock);
|
||||||
|
|
||||||
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
||||||
if (!msm_gem_trylock(&msm_obj->base))
|
if (!msm_gem_trylock(&msm_obj->base))
|
||||||
continue;
|
continue;
|
||||||
|
@ -129,6 +139,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&priv->mm_lock);
|
||||||
|
|
||||||
if (unlock)
|
if (unlock)
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,10 @@ struct msm_gpu {
|
||||||
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
|
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
|
||||||
int nr_rings;
|
int nr_rings;
|
||||||
|
|
||||||
/* list of GEM active objects: */
|
/*
|
||||||
|
* List of GEM active objects on this gpu. Protected by
|
||||||
|
* msm_drm_private::mm_lock
|
||||||
|
*/
|
||||||
struct list_head active_list;
|
struct list_head active_list;
|
||||||
|
|
||||||
/* does gpu need hw_init? */
|
/* does gpu need hw_init? */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user