*/
if (!_xfs_mru_cache_migrate(mru, now)) {
mru->time_zero = now;
- if (!mru->next_reap)
- mru->next_reap = mru->grp_count * mru->grp_time;
+ if (!mru->queued) {
+ mru->queued = 1;
+ queue_delayed_work(xfs_mru_reap_wq, &mru->work,
+ mru->grp_count * mru->grp_time);
+ }
} else {
grp = (now - mru->time_zero) / mru->grp_time;
grp = (mru->lru_grp + grp) % mru->grp_count;
* list need to be deleted. For each element this involves removing it from the
* data store, removing it from the reap list, calling the client's free
* function and deleting the element from the element zone.
+ *
+ * We get called holding the mru->lock, which we drop and then reacquire.
+ * Sparse need special help with this to tell it we know what we are doing.
*/
STATIC void
_xfs_mru_cache_clear_reap_list(
- xfs_mru_cache_t *mru)
+ xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock)
+
{
xfs_mru_cache_elem_t *elem, *next;
struct list_head tmp;
*/
list_move(&elem->list_node, &tmp);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
list_for_each_entry_safe(elem, next, &tmp, list_node) {
kmem_zone_free(xfs_mru_elem_zone, elem);
}
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
}
/*
struct work_struct *work)
{
xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
- unsigned long now;
+ unsigned long now, next;
ASSERT(mru && mru->lists);
if (!mru || !mru->lists)
return;
- mutex_spinlock(&mru->lock);
- now = jiffies;
- if (mru->reap_all ||
- (mru->next_reap && time_after(now, mru->next_reap))) {
- if (mru->reap_all)
- now += mru->grp_count * mru->grp_time * 2;
- mru->next_reap = _xfs_mru_cache_migrate(mru, now);
- _xfs_mru_cache_clear_reap_list(mru);
+ spin_lock(&mru->lock);
+ next = _xfs_mru_cache_migrate(mru, jiffies);
+ _xfs_mru_cache_clear_reap_list(mru);
+
+ mru->queued = next;
+ if ((mru->queued > 0)) {
+ now = jiffies;
+ if (next <= now)
+ next = 0;
+ else
+ next -= now;
+ queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
}
- /*
- * the process that triggered the reap_all is responsible
- * for restating the periodic reap if it is required.
- */
- if (!mru->reap_all)
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
- mru->reap_all = 0;
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}
int
xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t),
"xfs_mru_cache_elem");
if (!xfs_mru_elem_zone)
- return ENOMEM;
+ goto out;
xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache");
- if (!xfs_mru_reap_wq) {
- kmem_zone_destroy(xfs_mru_elem_zone);
- return ENOMEM;
- }
+ if (!xfs_mru_reap_wq)
+ goto out_destroy_mru_elem_zone;
return 0;
+
+ out_destroy_mru_elem_zone:
+ kmem_zone_destroy(xfs_mru_elem_zone);
+ out:
+ return -ENOMEM;
}
void
/* An extra list is needed to avoid reaping up to a grp_time early. */
mru->grp_count = grp_count + 1;
- mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
+ mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
if (!mru->lists) {
err = ENOMEM;
*/
INIT_RADIX_TREE(&mru->store, GFP_ATOMIC);
INIT_LIST_HEAD(&mru->reap_list);
- spinlock_init(&mru->lock, "xfs_mru_cache");
+ spin_lock_init(&mru->lock);
INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap);
mru->grp_time = grp_time;
mru->free_func = free_func;
- /* start up the reaper event */
- mru->next_reap = 0;
- mru->reap_all = 0;
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
-
*mrup = mru;
exit:
if (err && mru && mru->lists)
- kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
+ kmem_free(mru->lists);
if (err && mru)
- kmem_free(mru, sizeof(*mru));
+ kmem_free(mru);
return err;
}
* Call xfs_mru_cache_flush() to flush out all cached entries, calling their
* free functions as they're deleted. When this function returns, the caller is
* guaranteed that all the free functions for all the elements have finished
- * executing.
- *
- * While we are flushing, we stop the periodic reaper event from triggering.
- * Normally, we want to restart this periodic event, but if we are shutting
- * down the cache we do not want it restarted. hence the restart parameter
- * where 0 = do not restart reaper and 1 = restart reaper.
+ * executing and the reaper is not running.
*/
void
xfs_mru_cache_flush(
- xfs_mru_cache_t *mru,
- int restart)
+ xfs_mru_cache_t *mru)
{
if (!mru || !mru->lists)
return;
- cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
-
- mutex_spinlock(&mru->lock);
- mru->reap_all = 1;
- mutex_spinunlock(&mru->lock, 0);
+ spin_lock(&mru->lock);
+ if (mru->queued) {
+ spin_unlock(&mru->lock);
+ cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
+ spin_lock(&mru->lock);
+ }
- queue_work(xfs_mru_reap_wq, &mru->work.work);
- flush_workqueue(xfs_mru_reap_wq);
+ _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
+ _xfs_mru_cache_clear_reap_list(mru);
- mutex_spinlock(&mru->lock);
- WARN_ON_ONCE(mru->reap_all != 0);
- mru->reap_all = 0;
- if (restart)
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}
void
if (!mru || !mru->lists)
return;
- /* we don't want the reaper to restart here */
- xfs_mru_cache_flush(mru, 0);
+ xfs_mru_cache_flush(mru);
- kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
- kmem_free(mru, sizeof(*mru));
+ kmem_free(mru->lists);
+ kmem_free(mru);
}
/*
elem->key = key;
elem->value = value;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
radix_tree_insert(&mru->store, key, elem);
radix_tree_preload_end();
_xfs_mru_cache_list_insert(mru, elem);
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return 0;
}
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_delete(&mru->store, key);
if (elem) {
value = elem->value;
list_del(&elem->list_node);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
if (elem)
kmem_zone_free(xfs_mru_elem_zone, elem);
*
* If the element isn't found, this function returns NULL and the spinlock is
* released. xfs_mru_cache_done() should NOT be called when this occurs.
+ *
+ * Because sparse isn't smart enough to know about conditional lock return
+ * status, we need to help it get it right by annotating the path that does
+ * not release the lock.
*/
void *
xfs_mru_cache_lookup(
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key);
if (elem) {
list_del(&elem->list_node);
_xfs_mru_cache_list_insert(mru, elem);
- }
- else
- mutex_spinunlock(&mru->lock, 0);
+ __release(mru_lock); /* help sparse not be stupid */
+ } else
+ spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
}
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key);
if (!elem)
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
+ else
+ __release(mru_lock); /* help sparse not be stupid */
return elem ? elem->value : NULL;
}
*/
void
xfs_mru_cache_done(
- xfs_mru_cache_t *mru)
+ xfs_mru_cache_t *mru) __releases(mru->lock)
{
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}