]> err.no Git - varnish/commitdiff
First step in implementing early retirement of objects when the cache fills
authordes <des@d4fa192b-c00b-0410-8231-f00ffab90ce4>
Mon, 25 Jun 2007 17:04:09 +0000 (17:04 +0000)
committerdes <des@d4fa192b-c00b-0410-8231-f00ffab90ce4>
Mon, 25 Jun 2007 17:04:09 +0000 (17:04 +0000)
up: implement a "sloppy" LRU list.  An object is placed on the list (or moved
to the head of the list if it's already on it and hasn't moved recently) by
calling LRU_Enter(), and removed by calling LRU_Remove().  LRU_DiscardSpace()
will iterate through the LRU list, starting at the back, and retire objects
(by adding them to the deathrow list) until the sum of the length of the
retired objects reaches a certain number.  Similarly, LRU_DiscardTime() will
retire objects which haven't moved since a specified cutoff date.  In both
cases, vcl_discard() will be given a chance to inspect the object and veto
its retirement.

Currently, LRU_Enter() and LRU_Remove() are called from HSH_Lookup() and
HSH_Deref() respectively.  There may be better alternatives.

Neither LRU_DiscardSpace() nor LRU_DiscardTime() is currently called from
anywhere.  There are a number of issues to consider: for instance, even if
LRU_DiscardSpace() is called when a high-water mark is reached, there is
still a possibility that the cache might fill up before it has had a chance
to finish and the hangman has had a chance to process the deathrow list.

git-svn-id: svn+ssh://projects.linpro.no/svn/varnish/trunk@1570 d4fa192b-c00b-0410-8231-f00ffab90ce4

varnish-cache/bin/varnishd/Makefile.am
varnish-cache/bin/varnishd/cache.h
varnish-cache/bin/varnishd/cache_expire.c
varnish-cache/bin/varnishd/cache_hash.c
varnish-cache/bin/varnishd/cache_lru.c [new file with mode: 0644]

index 4a3773e0d0d1ee4d150f8f04463e99f37b9d550e..07fffc8ad061cb23213f55f1e100fa1a55bc50ed 100644 (file)
@@ -19,6 +19,7 @@ varnishd_SOURCES = \
        cache_fetch.c \
        cache_hash.c \
        cache_http.c \
+       cache_lru.c \
        cache_main.c \
        cache_pool.c \
        cache_pipe.c \
index 00ce1d1206c750f92c8157f86c9fbec75cd645b2..52fe1dc8d3e4e90c4f9dd4478b83b2023554e392 100644 (file)
@@ -254,6 +254,9 @@ struct object {
        TAILQ_HEAD(, storage)   store;
 
        TAILQ_HEAD(, sess)      waitinglist;
+
+       time_t                  lru_stamp;
+       TAILQ_ENTRY(object)     lru;
 };
 
 struct objhead {
@@ -372,6 +375,7 @@ void CLI_Init(void);
 void EXP_Insert(struct object *o);
 void EXP_Init(void);
 void EXP_TTLchange(struct object *o);
+void EXP_Retire(struct object *o);
 
 /* cache_fetch.c */
 int Fetch(struct sess *sp);
@@ -473,6 +477,12 @@ void VCL_Refresh(struct VCL_conf **vcc);
 void VCL_Rel(struct VCL_conf **vcc);
 void VCL_Get(struct VCL_conf **vcc);
 
+/* cache_lru.c */
+void LRU_Enter(struct object *o, time_t stamp);
+void LRU_Remove(struct object *o);
+void LRU_DiscardSpace(struct sess *sp, uint64_t quota);
+void LRU_DiscardTime(struct sess *sp, time_t cutoff);
+
 #define VCL_RET_MAC(l,u,b,n)
 #define VCL_MET_MAC(l,u,b) void VCL_##l##_method(struct sess *);
 #include "vcl_returns.h"
index f77f80a30fc949f1d1eb4b8ed9a554e75beb4b4a..4ffe636ddf51fca5dc29da66736e30b8a3d0aea2 100644 (file)
@@ -74,6 +74,15 @@ EXP_TTLchange(struct object *o)
        UNLOCK(&exp_mtx);
 }
 
+void
+EXP_Retire(struct object *o)
+{
+       LOCK(&exp_mtx);
+       TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
+       VSL_stats->n_deathrow++;
+       UNLOCK(&exp_mtx);
+}
+
 /*--------------------------------------------------------------------
  * This thread monitors deathrow and kills objects when they time out.
  */
@@ -174,10 +183,7 @@ exp_prefetch(void *arg)
                VCL_timeout_method(sp);
 
                if (sp->handling == VCL_RET_DISCARD) {
-                       LOCK(&exp_mtx);
-                       TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
-                       VSL_stats->n_deathrow++;
-                       UNLOCK(&exp_mtx);
+                       EXP_Retire(o);
                        continue;
                }
                assert(sp->handling == VCL_RET_DISCARD);
index 080989b29e700be3e57f628abfcf36e4a1e34022..0fde92fad62065c7db4e36c7ca120be156437fd2 100644 (file)
@@ -166,6 +166,7 @@ VSLR(SLT_Debug, sp->fd, sp->hash_b, sp->hash_e);
        if (o != NULL) {
                UNLOCK(&oh->mtx);
                (void)hash->deref(oh);
+               LRU_Enter(o, sp->t_req.tv_sec);
                return (o);
        }
 
@@ -177,6 +178,7 @@ VSLR(SLT_Debug, sp->fd, sp->hash_b, sp->hash_e);
        /* NB: do not deref objhead the new object inherits our reference */
        UNLOCK(&oh->mtx);
        BAN_NewObj(o);
+       LRU_Enter(o, sp->t_req.tv_sec);
        return (o);
 }
 
@@ -258,6 +260,7 @@ HSH_Deref(struct object *o)
                free(o->vary);
 
        HSH_Freestore(o);
+       LRU_Remove(o);
        FREE_OBJ(o);
        VSL_stats->n_object--;
 
diff --git a/varnish-cache/bin/varnishd/cache_lru.c b/varnish-cache/bin/varnishd/cache_lru.c
new file mode 100644 (file)
index 0000000..00368b7
--- /dev/null
@@ -0,0 +1,147 @@
+/*-
+ * Copyright (c) 2007 Linpro AS
+ * All rights reserved.
+ *
+ * Author: Dag-Erling Smørgav <des@linpro.no>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "shmlog.h"
+#include "cache.h"
+#include "queue.h"
+
+/*
+ * For performance reasons, objects are only moved to the head of the LRU
+ * list when they've been in their current position for at least LRU_DELAY
+ * seconds, rather than on every access.  This should probably be a
+ * run-time parameter.
+ */
+#define LRU_DELAY 2
+
+static pthread_mutex_t lru_mtx = PTHREAD_MUTEX_INITIALIZER;
+static TAILQ_HEAD(lru_head, object) lru_list;
+
+/*
+ * Enter an object into the LRU list, or move it to the head of the list
+ * if it's already in it and hasn't moved in a while.
+ */
+void
+LRU_Enter(struct object *o, time_t stamp)
+{
+
+       CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+       assert(stamp > 0);
+       if (o->lru_stamp < stamp - LRU_DELAY && o != lru_list.tqh_first) {
+               // VSL(SLT_LRU_enter, 0, "%u %u %u", o->xid, o->lru_stamp, stamp);
+               pthread_mutex_lock(&lru_mtx);
+               if (o->lru_stamp != 0)
+                       TAILQ_REMOVE(&lru_list, o, lru);
+               TAILQ_INSERT_HEAD(&lru_list, o, lru);
+               o->lru_stamp = stamp;
+               pthread_mutex_unlock(&lru_mtx);
+       }
+}
+
+/*
+ * Remove an object from the LRU list.
+ */
+void
+LRU_Remove(struct object *o)
+{
+
+       CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+       if (o->lru_stamp != 0) {
+               // VSL(SLT_LRU_remove, 0, "%u", o->xid);
+               pthread_mutex_lock(&lru_mtx);
+               TAILQ_REMOVE(&lru_list, o, lru);
+               pthread_mutex_unlock(&lru_mtx);
+       }
+}
+
+/*
+ * Walk through the LRU list, starting at the back, and retire objects
+ * until our quota is reached or we run out of objects to retire.
+ */
+void
+LRU_DiscardSpace(struct sess *sp, uint64_t quota)
+{
+       struct object *o, *so;
+
+       pthread_mutex_lock(&lru_mtx);
+       while ((o = TAILQ_LAST(&lru_list, lru_head))) {
+               TAILQ_REMOVE(&lru_list, o, lru);
+               so = sp->obj;
+               sp->obj = o;
+               VCL_discard_method(sp);
+               sp->obj = so;
+               if (sp->handling == VCL_RET_DISCARD) {
+                       /* discard: place on deathrow */
+                       EXP_Retire(o);
+                       o->lru_stamp = 0;
+                       if (o->len > quota)
+                               break;
+                       quota -= o->len;
+               } else {
+                       /* keep: move to front of list */
+                       if ((so = TAILQ_FIRST(&lru_list)))
+                               o->lru_stamp = so->lru_stamp;
+                       TAILQ_INSERT_HEAD(&lru_list, o, lru);
+               }
+       }
+       pthread_mutex_unlock(&lru_mtx);
+}
+
+/*
+ * Walk through the LRU list, starting at the back, and retire objects
+ * that haven't been accessed since the specified cutoff date.
+ */
+void
+LRU_DiscardTime(struct sess *sp, time_t cutoff)
+{
+       struct object *o, *so;
+
+       pthread_mutex_lock(&lru_mtx);
+       while ((o = TAILQ_LAST(&lru_list, lru_head))) {
+               if (o->lru_stamp >= cutoff)
+                       break;
+               TAILQ_REMOVE(&lru_list, o, lru);
+               so = sp->obj;
+               sp->obj = o;
+               VCL_discard_method(sp);
+               sp->obj = so;
+               if (sp->handling == VCL_RET_DISCARD) {
+                       /* discard: place on deathrow */
+                       EXP_Retire(o);
+               } else {
+                       /* keep: move to front of list */
+                       if ((so = TAILQ_FIRST(&lru_list)) && so->lru_stamp > cutoff)
+                               o->lru_stamp = so->lru_stamp;
+                       else
+                               o->lru_stamp = cutoff;
+                       TAILQ_INSERT_HEAD(&lru_list, o, lru);
+               }
+       }
+       pthread_mutex_unlock(&lru_mtx);
+}