mgt_vcc.c \
rfc2616.c \
shmlog.c \
+ stevedore.c \
storage_file.c \
storage_malloc.c \
tcp.c \
void EXP_Insert(struct object *o);
void EXP_Init(void);
void EXP_TTLchange(struct object *o);
-void EXP_Retire(struct object *o);
+void EXP_Terminate(struct object *o);
/* cache_fetch.c */
int Fetch(struct sess *sp);
void VCL_Get(struct VCL_conf **vcc);
/* cache_lru.c */
+// void LRU_Init(void);
void LRU_Enter(struct object *o, time_t stamp);
void LRU_Remove(struct object *o);
-void LRU_DiscardSpace(struct sess *sp, uint64_t quota);
-void LRU_DiscardTime(struct sess *sp, time_t cutoff);
+int LRU_DiscardOne(void);
+int LRU_DiscardSpace(int64_t quota);
+int LRU_DiscardTime(time_t cutoff);
#define VCL_RET_MAC(l,u,b,n)
#define VCL_MET_MAC(l,u,b) void VCL_##l##_method(struct sess *);
UNLOCK(&exp_mtx);
}
+/*
+ * Immediately destroy an object. Do not wait for it to expire or trickle
+ * through death row; yank it
+ */
void
-EXP_Retire(struct object *o)
+EXP_Terminate(struct object *o)
{
LOCK(&exp_mtx);
- TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
- VSL_stats->n_deathrow++;
+ if (o->lru_stamp)
+ LRU_Remove(o);
+ if (o->heap_idx)
+ binheap_delete(exp_heap, o->heap_idx);
+ if (o->deathrow.tqe_next) {
+ TAILQ_REMOVE(&exp_deathrow, o, deathrow);
+ VSL_stats->n_deathrow--;
+ }
UNLOCK(&exp_mtx);
+ VSL(SLT_Terminate, 0, "%u", o->xid);
+ HSH_Deref(o);
}
/*--------------------------------------------------------------------
VCL_timeout_method(sp);
if (sp->handling == VCL_RET_DISCARD) {
- EXP_Retire(o);
+ LOCK(&exp_mtx);
+ TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
+ VSL_stats->n_deathrow++;
+ UNLOCK(&exp_mtx);
continue;
}
assert(sp->handling == VCL_RET_DISCARD);
if (cl == 0)
return (0);
- st = stevedore->alloc(stevedore, cl);
- XXXAN(st->stevedore);
+ st = STV_alloc(cl);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
st->len = cl;
sp->obj->len = cl;
/* Get some storage if we don't have any */
if (st == NULL || st->len == st->space) {
v = u;
- if (u < params->fetch_chunksize * 1024 &&
- stevedore->trim != NULL)
+ if (u < params->fetch_chunksize * 1024)
v = params->fetch_chunksize * 1024;
- st = stevedore->alloc(stevedore, v);
- XXXAN(st->stevedore);
+ st = STV_alloc(v);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
}
v = st->space - st->len;
if (st != NULL && st->len == 0) {
TAILQ_REMOVE(&sp->obj->store, st, list);
- stevedore->free(st);
- } else if (st != NULL && stevedore->trim != NULL)
- stevedore->trim(st, st->len);
+ STV_free(st);
+ } else if (st != NULL)
+ STV_trim(st, st->len);
return (0);
}
st = NULL;
while (1) {
if (v == 0) {
- st = stevedore->alloc(stevedore,
- params->fetch_chunksize * 1024);
- XXXAN(st->stevedore);
+ st = STV_alloc(params->fetch_chunksize * 1024);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
p = st->ptr + st->len;
v = st->space - st->len;
if (st->len == 0) {
TAILQ_REMOVE(&sp->obj->store, st, list);
- stevedore->free(st);
- } else if (stevedore->trim != NULL)
- stevedore->trim(st, st->len);
+ STV_free(st);
+ } else
+ STV_trim(st, st->len);
return (1);
}
while (!TAILQ_EMPTY(&sp->obj->store)) {
st = TAILQ_FIRST(&sp->obj->store);
TAILQ_REMOVE(&sp->obj->store, st, list);
- stevedore->free(st);
+ STV_free(st);
}
close(vc->fd);
VBE_ClosedFd(sp->wrk, vc, 1);
TAILQ_FOREACH_SAFE(st, &o->store, list, stn) {
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC);
TAILQ_REMOVE(&o->store, st, list);
- st->stevedore->free(st);
+ STV_free(st);
}
}
free(o->vary);
HSH_Freestore(o);
- LRU_Remove(o);
FREE_OBJ(o);
VSL_stats->n_object--;
*/
#define LRU_DELAY 2
+TAILQ_HEAD(lru_head, object);
+
+static struct lru_head lru_list = TAILQ_HEAD_INITIALIZER(lru_list);
static pthread_mutex_t lru_mtx = PTHREAD_MUTEX_INITIALIZER;
-static TAILQ_HEAD(lru_head, object) lru_list;
+static struct sess *lru_session;
+static struct worker lru_worker;
+
+/*
+ * Initialize the LRU data structures.
+ */
+static inline void
+LRU_Init(void)
+{
+ if (lru_session == NULL) {
+ lru_session = SES_New(NULL, 0);
+ XXXAN(lru_session);
+ lru_session->wrk = &lru_worker;
+ lru_worker.magic = WORKER_MAGIC;
+ lru_worker.wlp = lru_worker.wlog;
+ lru_worker.wle = lru_worker.wlog + sizeof lru_worker.wlog;
+ VCL_Get(&lru_session->vcl);
+ } else {
+ VCL_Refresh(&lru_session->vcl);
+ }
+}
/*
* Enter an object into the LRU list, or move it to the head of the list
assert(stamp > 0);
if (o->lru_stamp < stamp - LRU_DELAY && o != lru_list.tqh_first) {
// VSL(SLT_LRU_enter, 0, "%u %u %u", o->xid, o->lru_stamp, stamp);
- pthread_mutex_lock(&lru_mtx);
+ LOCK(&lru_mtx);
if (o->lru_stamp != 0)
TAILQ_REMOVE(&lru_list, o, lru);
TAILQ_INSERT_HEAD(&lru_list, o, lru);
o->lru_stamp = stamp;
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
}
}
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
if (o->lru_stamp != 0) {
// VSL(SLT_LRU_remove, 0, "%u", o->xid);
- pthread_mutex_lock(&lru_mtx);
+ LOCK(&lru_mtx);
TAILQ_REMOVE(&lru_list, o, lru);
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
}
}
+/*
+ * With the LRU lock held, call VCL_discard(). Depending on the result,
+ * either insert the object at the head of the list or dereference it.
+ */
+static int
+LRU_DiscardLocked(struct object *o)
+{
+ struct object *so;
+
+ if (o->busy)
+ return (0);
+
+ /* XXX this is a really bad place to do this */
+ LRU_Init();
+
+ CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
+ TAILQ_REMOVE(&lru_list, o, lru);
+
+ lru_session->obj = o;
+ VCL_discard_method(lru_session);
+
+ if (lru_session->handling == VCL_RET_DISCARD) {
+ /* discard: release object */
+ VSL(SLT_ExpKill, 0, "%u %d", o->xid, o->lru_stamp);
+ o->lru_stamp = 0;
+ EXP_Terminate(o);
+ return (1);
+ } else {
+ /* keep: move to front of list */
+ if ((so = TAILQ_FIRST(&lru_list)))
+ o->lru_stamp = so->lru_stamp;
+ TAILQ_INSERT_HEAD(&lru_list, o, lru);
+ return (0);
+ }
+}
+
+/*
+ * Walk through the LRU list, starting at the back, and check each object
+ * until we find one that can be retired. Return the number of objects
+ * that were discarded.
+ */
+int
+LRU_DiscardOne(void)
+{
+ struct object *first = TAILQ_FIRST(&lru_list);
+ struct object *o;
+ int count = 0;
+
+ LOCK(&lru_mtx);
+ while (!count && (o = TAILQ_LAST(&lru_list, lru_head))) {
+ if (LRU_DiscardLocked(o))
+ ++count;
+ if (o == first) {
+ /* full circle */
+ break;
+ }
+ }
+ UNLOCK(&lru_mtx);
+ return (0);
+}
+
/*
* Walk through the LRU list, starting at the back, and retire objects
- * until our quota is reached or we run out of objects to retire.
+ * until our quota is reached or we run out of objects to retire. Return
+ * the number of objects that were discarded.
*/
-void
-LRU_DiscardSpace(struct sess *sp, uint64_t quota)
+int
+LRU_DiscardSpace(int64_t quota)
{
- struct object *o, *so;
+ struct object *first = TAILQ_FIRST(&lru_list);
+ struct object *o;
+ unsigned int len;
+ int count = 0;
- pthread_mutex_lock(&lru_mtx);
- while ((o = TAILQ_LAST(&lru_list, lru_head))) {
- TAILQ_REMOVE(&lru_list, o, lru);
- so = sp->obj;
- sp->obj = o;
- VCL_discard_method(sp);
- sp->obj = so;
- if (sp->handling == VCL_RET_DISCARD) {
- /* discard: place on deathrow */
- EXP_Retire(o);
- o->lru_stamp = 0;
- if (o->len > quota)
- break;
- quota -= o->len;
- } else {
- /* keep: move to front of list */
- if ((so = TAILQ_FIRST(&lru_list)))
- o->lru_stamp = so->lru_stamp;
- TAILQ_INSERT_HEAD(&lru_list, o, lru);
+ LOCK(&lru_mtx);
+ while (quota > 0 && (o = TAILQ_LAST(&lru_list, lru_head))) {
+ len = o->len;
+ if (LRU_DiscardLocked(o)) {
+ quota -= len;
+ ++count;
+ }
+ if (o == first) {
+ /* full circle */
+ break;
}
}
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
+ return (count);
}
/*
* Walk through the LRU list, starting at the back, and retire objects
- * that haven't been accessed since the specified cutoff date.
+ * that haven't been accessed since the specified cutoff date. Return the
+ * number of objects that were discarded.
*/
-void
-LRU_DiscardTime(struct sess *sp, time_t cutoff)
+int
+LRU_DiscardTime(time_t cutoff)
{
- struct object *o, *so;
+ struct object *first = TAILQ_FIRST(&lru_list);
+ struct object *o;
+ int count = 0;
- pthread_mutex_lock(&lru_mtx);
- while ((o = TAILQ_LAST(&lru_list, lru_head))) {
- if (o->lru_stamp >= cutoff)
+ LOCK(&lru_mtx);
+ while ((o = TAILQ_LAST(&lru_list, lru_head)) && o->lru_stamp <= cutoff) {
+ if (LRU_DiscardLocked(o))
+ ++count;
+ if (o == first) {
+ /* full circle */
break;
- TAILQ_REMOVE(&lru_list, o, lru);
- so = sp->obj;
- sp->obj = o;
- VCL_discard_method(sp);
- sp->obj = so;
- if (sp->handling == VCL_RET_DISCARD) {
- /* discard: place on deathrow */
- EXP_Retire(o);
- } else {
- /* keep: move to front of list */
- if ((so = TAILQ_FIRST(&lru_list)) && so->lru_stamp > cutoff)
- o->lru_stamp = so->lru_stamp;
- else
- o->lru_stamp = cutoff;
- TAILQ_INSERT_HEAD(&lru_list, o, lru);
}
}
- pthread_mutex_unlock(&lru_mtx);
+ UNLOCK(&lru_mtx);
+ return (count);
}
/* allocate space for body */
/* XXX what if the object already has a body? */
- st = stevedore->alloc(stevedore, 1024);
+ st = STV_alloc(1024);
XXXAN(st->stevedore);
TAILQ_INSERT_TAIL(&sp->obj->store, st, list);
--- /dev/null
+/*-
+ * Copyright (c) 2007 Linpro AS
+ * All rights reserved.
+ *
+ * Author: Dag-Erling Smørgav <des@linpro.no>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "cache.h"
+
+struct storage *
+STV_alloc(size_t size)
+{
+ struct storage *st;
+
+ AN(stevedore);
+ AN(stevedore->alloc);
+ do {
+ if ((st = stevedore->alloc(stevedore, size)) == NULL)
+ LRU_DiscardOne();
+ } while (st == NULL);
+ return (st);
+}
+
+void
+STV_trim(struct storage *st, size_t size)
+{
+
+ AN(st->stevedore);
+ if (st->stevedore->trim)
+ st->stevedore->trim(st, size);
+}
+
+void
+STV_free(struct storage *st)
+{
+
+ AN(st->stevedore);
+ AN(stevedore->free);
+ st->stevedore->free(st);
+}
/* private fields */
void *priv;
};
+
+struct storage *STV_alloc(size_t size);
+void STV_trim(struct storage *st, size_t size);
+void STV_free(struct storage *st);
LOCK(&sc->mtx);
VSL_stats->sm_nreq++;
smf = alloc_smf(sc, size);
+ if (smf == NULL) {
+ UNLOCK(&sc->mtx);
+ return (NULL);
+ }
CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
VSL_stats->sm_nobj++;
VSL_stats->sm_balloc += smf->size;
VSL_stats->sm_nreq++;
sma = calloc(sizeof *sma, 1);
- XXXAN(sma);
+ if (sma == NULL)
+ return (NULL);
sma->s.priv = sma;
sma->s.ptr = malloc(size);
XXXAN(sma->s.ptr);
{
struct sma *sma;
+ CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
sma = s->priv;
VSL_stats->sm_nobj--;
VSL_stats->sm_balloc -= sma->s.space;
free(sma);
}
+static void
+sma_trim(struct storage *s, size_t size)
+{
+ struct sma *sma;
+ void *p;
+
+ CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
+ sma = s->priv;
+ if ((p = realloc(sma->s.ptr, size)) != NULL) {
+ VSL_stats->sm_balloc -= sma->s.space;
+ sma->s.ptr = p;
+ sma->s.space = size;
+ VSL_stats->sm_balloc += sma->s.space;
+ }
+}
+
struct stevedore sma_stevedore = {
.name = "malloc",
.alloc = sma_alloc,
- .free = sma_free
+ .free = sma_free,
+ .trim = sma_trim,
};
SLTM(ExpPick)
SLTM(ExpKill)
SLTM(WorkThread)
+SLTM(Terminate)