From: phk Date: Fri, 14 Jul 2006 13:33:26 +0000 (+0000) Subject: When during a lookup we encounter a busy object, queue the session on X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c2f15afdcf0fec88ce9db44c5e79fb60a23062f7;p=varnish When during a lookup we encounter a busy object, queue the session on the objects waitinglist and disembark the worker thread so it can do something sensible in the mean time. This feature is unimportant in normal operation, but crucial to resource management if a popular URL suddenly takes a long time to reply from the backend. Without this bit if semi-nasty code, we would tie up one worker thread per client while waiting for the backend to come to it's senses. git-svn-id: svn+ssh://projects.linpro.no/svn/varnish/trunk@478 d4fa192b-c00b-0410-8231-f00ffab90ce4 --- diff --git a/varnish-cache/bin/varnishd/cache.h b/varnish-cache/bin/varnishd/cache.h index abfb85f9..0d8b29d9 100644 --- a/varnish-cache/bin/varnishd/cache.h +++ b/varnish-cache/bin/varnishd/cache.h @@ -120,7 +120,6 @@ struct object { unsigned refcnt; unsigned xid; struct objhead *objhead; - pthread_cond_t cv; unsigned heap_idx; unsigned ban_seq; @@ -145,6 +144,8 @@ struct object { TAILQ_ENTRY(object) deathrow; TAILQ_HEAD(, storage) store; + + TAILQ_HEAD(, sess) waitinglist; }; struct objhead { @@ -257,7 +258,7 @@ int FetchBody(struct worker *w, struct sess *sp); int FetchHeaders(struct worker *w, struct sess *sp); /* cache_hash.c */ -struct object *HSH_Lookup(struct worker *w, struct http *h); +struct object *HSH_Lookup(struct sess *sp); void HSH_Unbusy(struct object *o); void HSH_Deref(struct object *o); void HSH_Init(void); diff --git a/varnish-cache/bin/varnishd/cache_acceptor.c b/varnish-cache/bin/varnishd/cache_acceptor.c index 79fb0e37..57a3ffbb 100644 --- a/varnish-cache/bin/varnishd/cache_acceptor.c +++ b/varnish-cache/bin/varnishd/cache_acceptor.c @@ -32,6 +32,7 @@ static struct event tick_e; static struct timeval tick_rate; static pthread_t vca_thread; +static unsigned xids; static struct event accept_e[2 * HERITAGE_NSOCKS]; static TAILQ_HEAD(,sess) sesshead = TAILQ_HEAD_INITIALIZER(sesshead); @@ -154,6 +155,9 @@ vca_callback(void *arg, int bad) return; } sp->step = STP_RECV; + VSL_stats->client_req++; + sp->xid = xids++; + VSL(SLT_XID, sp->fd, "%u", sp->xid); WRK_QueueSession(sp); } @@ -295,4 +299,6 @@ VCA_Init(void) tick_rate.tv_sec = 1; tick_rate.tv_usec = 0; AZ(pthread_create(&vca_thread, NULL, vca_main, NULL)); + srandomdev(); + xids = random(); } diff --git a/varnish-cache/bin/varnishd/cache_center.c b/varnish-cache/bin/varnishd/cache_center.c index d5e19114..1fc90c24 100644 --- a/varnish-cache/bin/varnishd/cache_center.c +++ b/varnish-cache/bin/varnishd/cache_center.c @@ -273,17 +273,20 @@ cnt_hit(struct sess *sp) /*-------------------------------------------------------------------- * Look up request in hash table * + * LOOKUP consists of two substates so that we can reenter if we + * encounter a busy object. + * DOT subgraph cluster_lookup { DOT lookup [ DOT shape=ellipse DOT label="find obj in cache" DOT ] DOT LOOKUP -> lookup [style=bold] -DOT lookup2 [ +DOT lookup3 [ DOT shape=ellipse DOT label="Insert new busy object" DOT ] -DOT lookup -> lookup2 [style=bold] +DOT lookup -> lookup3 [style=bold] DOT } DOT lookup -> HIT [label="hit", style=bold] DOT lookup2 -> MISS [label="miss", style=bold] @@ -292,22 +295,48 @@ DOT lookup2 -> MISS [label="miss", style=bold] static int cnt_lookup(struct sess *sp) { + sp->obj = NULL; + sp->step = STP_LOOKUP2; + return (0); +} - sp->obj = HSH_Lookup(sp->wrk, sp->http); +static int +cnt_lookup2(struct sess *sp) +{ + struct object *o; + + /* + * We don't assign to sp->obj directly because it is used + * to store state when we encounter a busy object. + */ + o = HSH_Lookup(sp); + + /* If we encountered busy-object, disembark worker thread */ + if (o == NULL) { + VSL(SLT_Debug, sp->fd, + "on waiting list on obj %u", sp->obj->xid); + return (1); + } + + sp->obj = o; + + /* If we inserted a new object it's a miss */ if (sp->obj->busy) { VSL_stats->cache_miss++; sp->step = STP_MISS; return (0); } + + /* Account separately for pass and cache objects */ if (sp->obj->pass) { VSL_stats->cache_hitpass++; VSL(SLT_HitPass, sp->fd, "%u", sp->obj->xid); - sp->step = STP_HIT; - return (0); - } - VSL_stats->cache_hit++; - VSL(SLT_Hit, sp->fd, "%u", sp->obj->xid); + } else { + VSL_stats->cache_hit++; + VSL(SLT_Hit, sp->fd, "%u", sp->obj->xid); + } sp->step = STP_HIT; +HERE(); return (0); } diff --git a/varnish-cache/bin/varnishd/cache_hash.c b/varnish-cache/bin/varnishd/cache_hash.c index b345f021..ca9adc97 100644 --- a/varnish-cache/bin/varnishd/cache_hash.c +++ b/varnish-cache/bin/varnishd/cache_hash.c @@ -40,13 +40,18 @@ static struct hash_slinger *hash; struct object * -HSH_Lookup(struct worker *w, struct http *h) +HSH_Lookup(struct sess *sp) { + struct worker *w; + struct http *h; struct objhead *oh; struct object *o; char *b, *c; assert(hash != NULL); + w = sp->wrk; + h = sp->http; + /* Precreate an objhead and object in case we need them */ if (w->nobjhead == NULL) { w->nobjhead = calloc(sizeof *w->nobjhead, 1); @@ -61,21 +66,32 @@ HSH_Lookup(struct worker *w, struct http *h) w->nobj->busy = 1; w->nobj->refcnt = 1; TAILQ_INIT(&w->nobj->store); - AZ(pthread_cond_init(&w->nobj->cv, NULL)); + TAILQ_INIT(&w->nobj->waitinglist); VSL_stats->n_object++; } assert(http_GetURL(h, &b)); if (!http_GetHdr(h, "Host", &c)) c = b; + if (sp->obj != NULL) { + o = sp->obj; + oh = o->objhead; + AZ(pthread_mutex_lock(&oh->mtx)); + goto were_back; + } oh = hash->lookup(b, c, w->nobjhead); if (oh == w->nobjhead) w->nobjhead = NULL; AZ(pthread_mutex_lock(&oh->mtx)); TAILQ_FOREACH(o, &oh->objects, list) { o->refcnt++; - if (o->busy) - AZ(pthread_cond_wait(&o->cv, &oh->mtx)); + if (o->busy) { + TAILQ_INSERT_TAIL(&o->waitinglist, sp, list); + sp->obj = o; + AZ(pthread_mutex_unlock(&oh->mtx)); + return (NULL); + } + were_back: /* XXX: check ttl */ /* XXX: check Vary: */ if (!o->cacheable) { @@ -110,13 +126,22 @@ HSH_Lookup(struct worker *w, struct http *h) void HSH_Unbusy(struct object *o) { + struct sess *sp; + assert(o != NULL); + assert(o->refcnt > 0); if (o->cacheable) EXP_Insert(o); AZ(pthread_mutex_lock(&o->objhead->mtx)); o->busy = 0; AZ(pthread_mutex_unlock(&o->objhead->mtx)); - AZ(pthread_cond_broadcast(&o->cv)); + while (1) { + sp = TAILQ_FIRST(&o->waitinglist); + if (sp == NULL) + break; + TAILQ_REMOVE(&o->waitinglist, sp, list); + WRK_QueueSession(sp); + } } void @@ -143,7 +168,6 @@ HSH_Deref(struct object *o) free(o->header); VSL_stats->n_header--; } - AZ(pthread_cond_destroy(&o->cv)); TAILQ_FOREACH_SAFE(st, &o->store, list, stn) { TAILQ_REMOVE(&o->store, st, list); diff --git a/varnish-cache/bin/varnishd/cache_pool.c b/varnish-cache/bin/varnishd/cache_pool.c index d776b577..664bf73a 100644 --- a/varnish-cache/bin/varnishd/cache_pool.c +++ b/varnish-cache/bin/varnishd/cache_pool.c @@ -17,7 +17,6 @@ #include "cache.h" static pthread_mutex_t wrk_mtx; -static unsigned xids; /* Number of work requests queued in excess of worker threads available */ static unsigned wrk_overflow; @@ -105,15 +104,7 @@ WRK_QueueSession(struct sess *sp) sp->t_req = time(NULL); - /* - * No locking necessary, we're serialized in the acceptor thread - * XXX: still ? - */ - sp->xid = xids++; - VSL(SLT_XID, sp->fd, "%u", sp->xid); - sp->workreq.sess = sp; - VSL_stats->client_req++; AZ(pthread_mutex_lock(&wrk_mtx)); TAILQ_INSERT_TAIL(&wrk_reqhead, &sp->workreq, list); @@ -171,6 +162,4 @@ WRK_Init(void) AZ(pthread_create(&tp, NULL, wrk_thread, &i)); AZ(pthread_detach(tp)); } - srandomdev(); - xids = random(); } diff --git a/varnish-cache/bin/varnishd/steps.h b/varnish-cache/bin/varnishd/steps.h index e1c3b856..1e2a9bd7 100644 --- a/varnish-cache/bin/varnishd/steps.h +++ b/varnish-cache/bin/varnishd/steps.h @@ -5,6 +5,7 @@ STEP(pipe, PIPE) STEP(pass, PASS) STEP(passbody, PASSBODY) STEP(lookup, LOOKUP) +STEP(lookup2, LOOKUP2) STEP(miss, MISS) STEP(hit, HIT) STEP(fetch, FETCH)