* (if any) while we have the lock anyway.
*/
vc2 = NULL;
- AZ(pthread_mutex_lock(&vbemtx));
+ LOCK(&vbemtx);
vc = TAILQ_FIRST(&bp->connlist);
if (vc != NULL) {
assert(vc->fd >= 0);
TAILQ_REMOVE(&vbe_head, vc2, list);
}
}
- AZ(pthread_mutex_unlock(&vbemtx));
+ UNLOCK(&vbemtx);
if (vc == NULL)
break;
if (vc->fd < 0) {
assert(vc->backend == NULL);
vc->fd = vbe_connect(bp);
- AZ(pthread_mutex_lock(&vbemtx));
+ LOCK(&vbemtx);
if (vc->fd < 0) {
vc->backend = NULL;
TAILQ_INSERT_HEAD(&vbe_head, vc, list);
} else {
vc->backend = bp;
}
- AZ(pthread_mutex_unlock(&vbemtx));
+ UNLOCK(&vbemtx);
} else {
assert(vc->fd >= 0);
assert(vc->backend == bp);
AZ(close(vc->fd));
vc->fd = -1;
vc->backend = NULL;
- AZ(pthread_mutex_lock(&vbemtx));
+ LOCK(&vbemtx);
TAILQ_INSERT_HEAD(&vbe_head, vc, list);
VSL_stats->backend_unused++;
- AZ(pthread_mutex_unlock(&vbemtx));
+ UNLOCK(&vbemtx);
}
/* Recycle a connection ----------------------------------------------*/
assert(vc->backend != NULL);
VSL_stats->backend_recycle++;
VSL(SLT_BackendReuse, vc->fd, "%s", vc->backend->vcl_name);
- AZ(pthread_mutex_lock(&vbemtx));
+ LOCK(&vbemtx);
TAILQ_INSERT_HEAD(&vc->backend->connlist, vc, list);
- AZ(pthread_mutex_unlock(&vbemtx));
+ UNLOCK(&vbemtx);
}
/*--------------------------------------------------------------------*/
{
assert(o->heap_idx == 0);
- AZ(pthread_mutex_lock(&exp_mtx));
+ LOCK(&exp_mtx);
binheap_insert(exp_heap, o);
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
}
void
EXP_TTLchange(struct object *o)
{
assert(o->heap_idx != 0);
- AZ(pthread_mutex_lock(&exp_mtx));
+ LOCK(&exp_mtx);
binheap_delete(exp_heap, o->heap_idx);
binheap_insert(exp_heap, o);
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
}
/*--------------------------------------------------------------------
while (1) {
t = time(NULL);
- AZ(pthread_mutex_lock(&exp_mtx));
+ LOCK(&exp_mtx);
TAILQ_FOREACH(o, &exp_deathrow, deathrow) {
CHECK_OBJ(o, OBJECT_MAGIC);
if (o->ttl >= t) {
break;
}
if (o == NULL) {
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
AZ(sleep(1));
continue;
}
TAILQ_REMOVE(&exp_deathrow, o, deathrow);
VSL_stats->n_deathrow--;
VSL_stats->n_expired++;
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
VSL(SLT_ExpKill, 0, "%u %d", o->xid, (int)(o->ttl - t));
HSH_Deref(o);
}
assert(sp != NULL);
while (1) {
t = time(NULL);
- AZ(pthread_mutex_lock(&exp_mtx));
+ LOCK(&exp_mtx);
o = binheap_root(exp_heap);
if (o != NULL)
CHECK_OBJ(o, OBJECT_MAGIC);
if (o == NULL || o->ttl > t + expearly) {
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
AZ(sleep(1));
continue;
}
if (o2 != NULL)
assert(o2->ttl >= o->ttl);
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
VSL(SLT_ExpPick, 0, "%u", o->xid);
sp->vcl = VCL_Get();
VCL_Rel(sp->vcl);
if (sp->handling == VCL_RET_DISCARD) {
- AZ(pthread_mutex_lock(&exp_mtx));
+ LOCK(&exp_mtx);
TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
VSL_stats->n_deathrow++;
- AZ(pthread_mutex_unlock(&exp_mtx));
+ UNLOCK(&exp_mtx);
continue;
}
assert(sp->handling == VCL_RET_DISCARD);
o = sp->obj;
oh = o->objhead;
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
- AZ(pthread_mutex_lock(&oh->mtx));
+ LOCK(&oh->mtx);
goto were_back;
}
oh = hash->lookup(url, host, w->nobjhead);
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
if (oh == w->nobjhead)
w->nobjhead = NULL;
- AZ(pthread_mutex_lock(&oh->mtx));
+ LOCK(&oh->mtx);
TAILQ_FOREACH(o, &oh->objects, list) {
o->refcnt++;
if (o->busy) {
TAILQ_INSERT_TAIL(&o->waitinglist, sp, list);
sp->obj = o;
- AZ(pthread_mutex_unlock(&oh->mtx));
+ UNLOCK(&oh->mtx);
return (NULL);
}
were_back:
o->refcnt--;
}
if (o != NULL) {
- AZ(pthread_mutex_unlock(&oh->mtx));
+ UNLOCK(&oh->mtx);
(void)hash->deref(oh);
return (o);
}
o->objhead = oh;
TAILQ_INSERT_TAIL(&oh->objects, o, list);
/* NB: do not deref objhead the new object inherits our reference */
- AZ(pthread_mutex_unlock(&oh->mtx));
+ UNLOCK(&oh->mtx);
BAN_NewObj(o);
return (o);
}
assert(o->refcnt > 0);
if (o->cacheable)
EXP_Insert(o);
- AZ(pthread_mutex_lock(&o->objhead->mtx));
+ LOCK(&o->objhead->mtx);
o->busy = 0;
- AZ(pthread_mutex_unlock(&o->objhead->mtx));
+ UNLOCK(&o->objhead->mtx);
while (1) {
sp = TAILQ_FIRST(&o->waitinglist);
if (sp == NULL)
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
oh = o->objhead;
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
- AZ(pthread_mutex_lock(&oh->mtx));
+ LOCK(&oh->mtx);
assert(o->refcnt > 0);
o->refcnt++;
- AZ(pthread_mutex_unlock(&oh->mtx));
+ UNLOCK(&oh->mtx);
}
void
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
/* drop ref on object */
- AZ(pthread_mutex_lock(&oh->mtx));
+ LOCK(&oh->mtx);
assert(o->refcnt > 0);
r = --o->refcnt;
if (!r)
TAILQ_REMOVE(&oh->objects, o, list);
- AZ(pthread_mutex_unlock(&oh->mtx));
+ UNLOCK(&oh->mtx);
/* If still referenced, done */
if (r != 0)
VSL_stats->n_wrk_busy++;
TAILQ_REMOVE(&wrk_reqhead, wrq, list);
VSL_stats->n_wrk_queue--;
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
CHECK_OBJ_NOTNULL(wrq->sess, SESS_MAGIC);
wrq->sess->wrk = w;
w->wrq = wrq;
if (w->nobjhead != NULL)
CHECK_OBJ(w->nobjhead, OBJHEAD_MAGIC);
w->wrq = NULL;
- AZ(pthread_mutex_lock(&wrk_mtx));
+ LOCK(&wrk_mtx);
VSL_stats->n_wrk_busy--;
}
AZ(pthread_cond_init(&w->cv, NULL));
- AZ(pthread_mutex_lock(&wrk_mtx));
+ LOCK(&wrk_mtx);
w->nbr = VSL_stats->n_wrk;
VSL_stats->n_wrk_create++;
VSL(SLT_WorkThread, 0, "%u born", w->nbr);
if (pthread_cond_timedwait(&w->cv, &wrk_mtx, &ts)) {
VSL_stats->n_wrk--;
TAILQ_REMOVE(&wrk_idle, w, list);
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
VSL(SLT_WorkThread, 0, "%u suicide", w->nbr);
AZ(pthread_cond_destroy(&w->cv));
return (NULL);
sp->workreq.sess = sp;
- AZ(pthread_mutex_lock(&wrk_mtx));
+ LOCK(&wrk_mtx);
TAILQ_INSERT_TAIL(&wrk_reqhead, &sp->workreq, list);
VSL_stats->n_wrk_queue++;
AZ(pthread_cond_signal(&w->cv));
TAILQ_REMOVE(&wrk_idle, w, list);
TAILQ_INSERT_TAIL(&wrk_busy, w, list);
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
return;
}
/* Can we create more threads ? */
if (VSL_stats->n_wrk >= params->wthread_max) {
VSL_stats->n_wrk_max++;
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
return;
}
/* Try to create a thread */
VSL_stats->n_wrk++;
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
if (!pthread_create(&tp, NULL, wrk_thread, NULL)) {
AZ(pthread_detach(tp));
errno, strerror(errno));
/* Register overflow */
- AZ(pthread_mutex_lock(&wrk_mtx));
+ LOCK(&wrk_mtx);
VSL_stats->n_wrk--;
VSL_stats->n_wrk_failed++;
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
}
(void)av;
(void)priv;
struct worker *w;
- AZ(pthread_mutex_lock(&wrk_mtx));
+ LOCK(&wrk_mtx);
t = time(NULL);
TAILQ_FOREACH(w, &wrk_busy, list) {
cli_out(cli, "\n");
TAILQ_FOREACH(w, &wrk_idle, list)
u++;
cli_out(cli, "%u idle workers\n", u);
- AZ(pthread_mutex_unlock(&wrk_mtx));
+ UNLOCK(&wrk_mtx);
}
ch = &srcaddr_hash[v];
now = time(NULL);
- AZ(pthread_mutex_lock(&ses_mtx));
+ LOCK(&ses_mtx);
c3 = NULL;
TAILQ_FOREACH_SAFE(c, ch, list, c2) {
if (c->hash == u && !strcmp(c->addr, sp->addr)) {
VSL_stats->n_srcaddr--;
free(c3);
}
- AZ(pthread_mutex_unlock(&ses_mtx));
+ UNLOCK(&ses_mtx);
return;
}
if (c->nref > 0 || c->ttl > now)
TAILQ_INSERT_TAIL(ch, c3, list);
sp->srcaddr = c3;
}
- AZ(pthread_mutex_unlock(&ses_mtx));
+ UNLOCK(&ses_mtx);
}
static void
ses_sum_acct(&sp->acct, a);
- AZ(pthread_mutex_lock(&ses_mtx));
+ LOCK(&ses_mtx);
ses_sum_acct(b, a);
VSL(SLT_StatAddr, 0, "%s 0 %d %ju %ju %ju %ju %ju %ju %ju",
sp->srcaddr->addr, time(NULL) - b->first,
VSL_stats->s_fetch += a->fetch;
VSL_stats->s_hdrbytes += a->hdrbytes;
VSL_stats->s_bodybytes += a->bodybytes;
- AZ(pthread_mutex_unlock(&ses_mtx));
+ UNLOCK(&ses_mtx);
memset(a, 0, sizeof *a);
}
return;
}
assert(sp->srcaddr != NULL);
- AZ(pthread_mutex_lock(&ses_mtx));
+ LOCK(&ses_mtx);
assert(sp->srcaddr->nref > 0);
sp->srcaddr->nref--;
if (sp->srcaddr->nref == 0)
VSL_stats->n_srcaddr_act--;
sp->srcaddr = NULL;
- AZ(pthread_mutex_unlock(&ses_mtx));
+ UNLOCK(&ses_mtx);
}
/*--------------------------------------------------------------------*/
* If that queue is empty, flip queues holding the lock
* and try the new unlocked queue.
*/
- AZ(pthread_mutex_lock(&ses_mem_mtx));
+ LOCK(&ses_mem_mtx);
ses_qp = 1 - ses_qp;
- AZ(pthread_mutex_unlock(&ses_mem_mtx));
+ UNLOCK(&ses_mem_mtx);
sm = TAILQ_FIRST(&ses_free_mem[ses_qp]);
}
if (sm != NULL) {
VSL_stats->n_sess_mem--;
free(sm);
} else {
- AZ(pthread_mutex_lock(&ses_mem_mtx));
+ LOCK(&ses_mem_mtx);
TAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list);
- AZ(pthread_mutex_unlock(&ses_mem_mtx));
+ UNLOCK(&ses_mem_mtx);
}
}
{
struct VCL_conf *vc;
- AZ(pthread_mutex_lock(&vcl_mtx));
+ LOCK(&vcl_mtx);
assert(vcl_active != NULL);
vc = vcl_active->conf;
assert(vc != NULL);
vc->busy++;
- AZ(pthread_mutex_unlock(&vcl_mtx));
+ UNLOCK(&vcl_mtx);
return (vc);
}
{
struct vcls *vcl;
- AZ(pthread_mutex_lock(&vcl_mtx));
+ LOCK(&vcl_mtx);
assert(vc->busy > 0);
vc->busy--;
vcl = vc->priv; /* XXX miniobj */
} else {
vcl = NULL;
}
- AZ(pthread_mutex_unlock(&vcl_mtx));
+ UNLOCK(&vcl_mtx);
if (vcl != NULL) {
/* XXX: dispose of vcl */
}
vcl->name = strdup(name);
assert(vcl->name != NULL);
TAILQ_INSERT_TAIL(&vcl_head, vcl, list);
- AZ(pthread_mutex_lock(&vcl_mtx));
+ LOCK(&vcl_mtx);
if (vcl_active == NULL)
vcl_active = vcl;
- AZ(pthread_mutex_unlock(&vcl_mtx));
+ UNLOCK(&vcl_mtx);
if (cli == NULL)
fprintf(stderr, "Loaded \"%s\" as \"%s\"\n", fn , name);
else
cli_out(cli, "VCL %s already discarded", av[2]);
return;
}
- AZ(pthread_mutex_lock(&vcl_mtx));
+ LOCK(&vcl_mtx);
if (vcl == vcl_active) {
- AZ(pthread_mutex_unlock(&vcl_mtx));
+ UNLOCK(&vcl_mtx);
cli_result(cli, CLIS_PARAM);
cli_out(cli, "VCL %s is the active VCL", av[2]);
return;
TAILQ_REMOVE(&vcl_head, vcl, list);
else
vcl = NULL;
- AZ(pthread_mutex_unlock(&vcl_mtx));
+ UNLOCK(&vcl_mtx);
if (vcl != NULL) {
/* XXX dispose of vcl */
}
cli_result(cli, CLIS_PARAM);
return;
}
- AZ(pthread_mutex_lock(&vcl_mtx));
+ LOCK(&vcl_mtx);
vcl_active = vcl;
- AZ(pthread_mutex_unlock(&vcl_mtx));
+ UNLOCK(&vcl_mtx);
}
/*--------------------------------------------------------------------*/
he2 = NULL;
for (r = 0; r < 2; r++ ) {
- AZ(pthread_mutex_lock(&hp->mtx));
+ LOCK(&hp->mtx);
TAILQ_FOREACH(he, &hp->head, list) {
CHECK_OBJ_NOTNULL(he, HCL_ENTRY_MAGIC);
if (kl < he->klen)
continue;
he->refcnt++;
noh = he->oh;
- AZ(pthread_mutex_unlock(&hp->mtx));
+ UNLOCK(&hp->mtx);
if (he2 != NULL)
free(he2);
return (noh);
}
if (noh == NULL) {
- AZ(pthread_mutex_unlock(&hp->mtx));
+ UNLOCK(&hp->mtx);
return (NULL);
}
if (he2 != NULL) {
TAILQ_INSERT_TAIL(&hp->head, he2, list);
he2->refcnt++;
noh = he2->oh;
- AZ(pthread_mutex_unlock(&hp->mtx));
+ UNLOCK(&hp->mtx);
return (noh);
}
- AZ(pthread_mutex_unlock(&hp->mtx));
+ UNLOCK(&hp->mtx);
i = sizeof *he2 + kl;
he2 = calloc(i, 1);
assert(he->refcnt > 0);
assert(he->hash < hcl_nhash);
assert(hp == &hcl_head[he->hash]);
- AZ(pthread_mutex_lock(&hp->mtx));
+ LOCK(&hp->mtx);
if (--he->refcnt == 0)
TAILQ_REMOVE(&hp->head, he, list);
else
he = NULL;
- AZ(pthread_mutex_unlock(&hp->mtx));
+ UNLOCK(&hp->mtx);
if (he == NULL)
return (1);
free(he);
struct hsl_entry *he, *he2;
int i;
- AZ(pthread_mutex_lock(&hsl_mutex));
+ LOCK(&hsl_mutex);
TAILQ_FOREACH(he, &hsl_head, list) {
i = strcmp(key1, he->key1);
if (i < 0)
he->refcnt++;
nobj = he->obj;
nobj->hashpriv = he;
- AZ(pthread_mutex_unlock(&hsl_mutex));
+ UNLOCK(&hsl_mutex);
return (nobj);
}
if (nobj == NULL) {
- AZ(pthread_mutex_unlock(&hsl_mutex));
+ UNLOCK(&hsl_mutex);
return (NULL);
}
he2 = calloc(sizeof *he2, 1);
TAILQ_INSERT_BEFORE(he, he2, list);
else
TAILQ_INSERT_TAIL(&hsl_head, he2, list);
- AZ(pthread_mutex_unlock(&hsl_mutex));
+ UNLOCK(&hsl_mutex);
return (nobj);
}
assert(obj->hashpriv != NULL);
he = obj->hashpriv;
- AZ(pthread_mutex_lock(&hsl_mutex));
+ LOCK(&hsl_mutex);
if (--he->refcnt == 0) {
free(he->key1);
free(he->key2);
ret = 0;
} else
ret = 1;
- AZ(pthread_mutex_unlock(&hsl_mutex));
+ UNLOCK(&hsl_mutex);
return (ret);
}
}
/* Only hold the lock while we find our space */
- AZ(pthread_mutex_lock(&vsl_mtx));
+ LOCK(&vsl_mtx);
assert(loghead->ptr < loghead->size);
/* Wrap if necessary */
loghead->ptr += 5 + l;
p[5 + l] = SLT_ENDMARKER;
assert(loghead->ptr < loghead->size);
- AZ(pthread_mutex_unlock(&vsl_mtx));
+ UNLOCK(&vsl_mtx);
p[1] = l & 0xff;
p[2] = (id >> 8) & 0xff;
return;
}
- AZ(pthread_mutex_lock(&vsl_mtx));
+ LOCK(&vsl_mtx);
assert(loghead->ptr < loghead->size);
/* Wrap if we cannot fit a full size record */
loghead->ptr += 5 + n;
assert(loghead->ptr < loghead->size);
- AZ(pthread_mutex_unlock(&vsl_mtx));
+ UNLOCK(&vsl_mtx);
va_end(ap);
}
size += (sc->pagesize - 1);
size &= ~(sc->pagesize - 1);
- AZ(pthread_mutex_lock(&sc->mtx));
+ LOCK(&sc->mtx);
smf = alloc_smf(sc, size);
CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
- AZ(pthread_mutex_unlock(&sc->mtx));
+ UNLOCK(&sc->mtx);
assert(smf != NULL);
assert(smf->size == size);
smf->s.space = size;
size += (sc->pagesize - 1);
size &= ~(sc->pagesize - 1);
if (smf->size > size) {
- AZ(pthread_mutex_lock(&sc->mtx));
+ LOCK(&sc->mtx);
trim_smf(smf, size);
assert(smf->size == size);
- AZ(pthread_mutex_unlock(&sc->mtx));
+ UNLOCK(&sc->mtx);
smf->s.space = size;
}
}
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
sc = smf->sc;
- AZ(pthread_mutex_lock(&sc->mtx));
+ LOCK(&sc->mtx);
free_smf(smf);
- AZ(pthread_mutex_unlock(&sc->mtx));
+ UNLOCK(&sc->mtx);
}
/*--------------------------------------------------------------------*/
#define AN(foo) do { assert((foo) != NULL); } while (0)
#define XXXAZ(foo) do { xxxassert((foo) == 0); } while (0)
#define XXXAN(foo) do { xxxassert((foo) != NULL); } while (0)
+
+#define LOCK(foo) AZ(pthread_mutex_lock(foo))
+#define UNLOCK(foo) AZ(pthread_mutex_unlock(foo))