static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
{
int ret;
- struct ib_pool_fmr *fmr, *next;
+ struct ib_pool_fmr *fmr;
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
#endif
}
- /*
- * The free_list may hold FMRs that have been put there
- * because they haven't reached the max_remap count.
- * Invalidate their mapping as well.
- */
- list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
- if (fmr->remap_count == 0)
- continue;
- hlist_del_init(&fmr->cache_node);
- fmr->remap_count = 0;
- list_add_tail(&fmr->fmr->list, &fmr_list);
- list_move(&fmr->list, &unmap_list);
- }
-
- list_splice(&pool->dirty_list, &unmap_list);
- INIT_LIST_HEAD(&pool->dirty_list);
+ list_splice_init(&pool->dirty_list, &unmap_list);
pool->dirty_len = 0;
spin_unlock_irq(&pool->pool_lock);
i = 0;
list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
+ if (fmr->remap_count) {
+ INIT_LIST_HEAD(&fmr_list);
+ list_add_tail(&fmr->fmr->list, &fmr_list);
+ ib_unmap_fmr(&fmr_list);
+ }
ib_dealloc_fmr(fmr->fmr);
list_del(&fmr->list);
kfree(fmr);
*/
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
{
- int serial = atomic_inc_return(&pool->req_ser);
+ int serial;
+ struct ib_pool_fmr *fmr, *next;
+
+ /*
+ * The free_list holds FMRs that may have been used
+ * but have not been remapped enough times to be dirty.
+ * Put them on the dirty list now so that the cleanup
+ * thread will reap them too.
+ */
+ spin_lock_irq(&pool->pool_lock);
+ list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
+ if (fmr->remap_count > 0)
+ list_move(&fmr->list, &pool->dirty_list);
+ }
+ spin_unlock_irq(&pool->pool_lock);
+ serial = atomic_inc_return(&pool->req_ser);
wake_up_process(pool->thread);
if (wait_event_interruptible(pool->force_wait,