cache_http.c \
cache_httpconn.c \
cache_main.c \
+ cache_lck.c \
cache_panic.c \
cache_pipe.c \
cache_pool.c \
struct vrt_backend;
struct cli_proto;
struct ban;
+struct lock { void *priv; }; // Opaque
/*--------------------------------------------------------------------*/
#define OBJHEAD_MAGIC 0x1b96615d
void *hashpriv;
- pthread_mutex_t mtx;
+ struct lock mtx;
VTAILQ_HEAD(,object) objects;
char *hash;
unsigned hashlen;
void THR_SetSession(const struct sess *sp);
const struct sess * THR_GetSession(void);
+/* cache_lck.c */
+
+/* Internal functions, call only through macros below */
+void Lck__Lock(struct lock *lck, const char *p, const char *f, int l);
+void Lck__Unlock(struct lock *lck, const char *p, const char *f, int l);
+int Lck__Trylock(struct lock *lck, const char *p, const char *f, int l);
+void Lck__New(struct lock *lck, const char *w);
+void Lck__Assert(struct lock *lck, int held);
+
+/* public interface: */
+void LCK_Init(void);
+void Lck_Delete(struct lock *lck);
+void Lck_CondWait(pthread_cond_t *cond, struct lock *lck);
+
+#define Lck_New(a) Lck__New(a, #a);
+#define Lck_Lock(a) Lck__Lock(a, __func__, __FILE__, __LINE__)
+#define Lck_Unlock(a) Lck__Unlock(a, __func__, __FILE__, __LINE__)
+#define Lck_Trylock(a) Lck__Trylock(a, __func__, __FILE__, __LINE__)
+#define Lck_AssertHeld(a) Lck__Assert(a, 1)
+#define Lck_AssertNotHeld(a) Lck__Assert(a, 0)
+
/* cache_panic.c */
void PAN_Init(void);
struct vsb *SMS_Makesynth(struct object *obj);
void SMS_Finish(struct object *obj);
-#define MTX pthread_mutex_t
-#define MTX_INIT(foo) AZ(pthread_mutex_init(foo, NULL))
-#define MTX_DESTROY(foo) AZ(pthread_mutex_destroy(foo))
-
-#ifdef __flexelint_v9__
-#define TRYLOCK(foo, r) \
-do { \
- (r) = pthread_mutex_trylock(foo); \
-} while (0)
-#define LOCK(foo) \
-do { \
- AZ(pthread_mutex_lock(foo)); \
-} while (0)
-#define UNLOCK(foo) \
-do { \
- AZ(pthread_mutex_unlock(foo)); \
-} while (0)
-
-#else
-#define TRYLOCK(foo, r) \
-do { \
- (r) = pthread_mutex_trylock(foo); \
- assert(r == 0 || r == EBUSY); \
- if (params->diag_bitmap & 0x8) { \
- VSL(SLT_Debug, 0, \
- "MTX_TRYLOCK(%s,%s,%d," #foo ") = %d", \
- __func__, __FILE__, __LINE__, (r)); \
- } \
-} while (0)
-#define LOCK(foo) \
-do { \
- if (!(params->diag_bitmap & 0x18)) { \
- AZ(pthread_mutex_lock(foo)); \
- } else { \
- int ixjd = pthread_mutex_trylock(foo); \
- assert(ixjd == 0 || ixjd == EBUSY); \
- if (ixjd) { \
- VSL(SLT_Debug, 0, \
- "MTX_CONTEST(%s,%s,%d," #foo ")", \
- __func__, __FILE__, __LINE__); \
- AZ(pthread_mutex_lock(foo)); \
- } else if (params->diag_bitmap & 0x8) { \
- VSL(SLT_Debug, 0, \
- "MTX_LOCK(%s,%s,%d," #foo ")", \
- __func__, __FILE__, __LINE__); \
- } \
- } \
-} while (0)
-#define UNLOCK(foo) \
-do { \
- AZ(pthread_mutex_unlock(foo)); \
- if (params->diag_bitmap & 0x8) \
- VSL(SLT_Debug, 0, \
- "MTX_UNLOCK(%s,%s,%d," #foo ")", \
- __func__, __FILE__, __LINE__); \
-} while (0)
-#endif
-
-#if defined(HAVE_PTHREAD_MUTEX_ISOWNED_NP)
-#define ALOCKED(mutex) AN(pthread_mutex_isowned_np((mutex)))
-#elif defined(DIAGNOSTICS)
-#define ALOCKED(mutex) AN(pthread_mutex_trylock((mutex)))
-#else
-#define ALOCKED(mutex) (void)(mutex)
-#endif
-
/*
* A normal pointer difference is signed, but we never want a negative value
* so this little tool will make sure we don't get that.
struct bereq *bereq;
volatile unsigned len;
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
bereq = VTAILQ_FIRST(&bereq_head);
if (bereq != NULL)
VTAILQ_REMOVE(&bereq_head, bereq, list);
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
if (bereq != NULL) {
CHECK_OBJ(bereq, BEREQ_MAGIC);
} else {
CHECK_OBJ_NOTNULL(bereq, BEREQ_MAGIC);
WS_Reset(bereq->ws, NULL);
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
VTAILQ_INSERT_HEAD(&bereq_head, bereq, list);
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
}
/*--------------------------------------------------------------------
vc = VTAILQ_FIRST(&vbe_conns);
if (vc != NULL) {
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
vc = VTAILQ_FIRST(&vbe_conns);
if (vc != NULL) {
VSL_stats->backend_unused--;
VTAILQ_REMOVE(&vbe_conns, vc, list);
}
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
}
if (vc != NULL)
return (vc);
assert(vc->fd < 0);
if (params->cache_vbe_conns) {
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
VTAILQ_INSERT_HEAD(&vbe_conns, vc, list);
VSL_stats->backend_unused++;
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
} else {
VSL_stats->n_vbe_conn--;
free(vc);
{
int s;
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
bp->refcount++;
bp->n_conn++; /* It mostly works */
- UNLOCK(&bp->mtx);
+ Lck_Unlock(&bp->mtx);
s = -1;
assert(bp->ipv6 != NULL || bp->ipv4 != NULL);
s = VBE_TryConnect(sp, PF_INET6, bp->ipv6, bp->ipv6len, bp);
if (s < 0) {
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
bp->n_conn--;
bp->refcount--; /* Only keep ref on success */
- UNLOCK(&bp->mtx);
+ Lck_Unlock(&bp->mtx);
}
return (s);
}
/* first look for vbe_conn's we can recycle */
while (1) {
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
vc = VTAILQ_FIRST(&bp->connlist);
if (vc != NULL) {
bp->refcount++;
assert(vc->fd >= 0);
VTAILQ_REMOVE(&bp->connlist, vc, list);
}
- UNLOCK(&bp->mtx);
+ Lck_Unlock(&bp->mtx);
if (vc == NULL)
break;
if (VBE_CheckFd(vc->fd)) {
bp = sp->vbe->backend;
WSL(sp->wrk, SLT_BackendReuse, sp->vbe->fd, "%s", bp->vcl_name);
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
VSL_stats->backend_recycle++;
VTAILQ_INSERT_HEAD(&bp->connlist, sp->vbe, list);
sp->vbe = NULL;
VTAILQ_ENTRY(backend) list;
int refcount;
- pthread_mutex_t mtx;
+ struct lock mtx;
struct sockaddr *ipv4;
socklen_t ipv4len;
struct vbe_conn *VBE_GetVbe(struct sess *sp, struct backend *bp);
/* cache_backend_cfg.c */
-extern MTX VBE_mtx;
+extern struct lock VBE_mtx;
void VBE_DropRefConn(struct backend *);
void VBE_DropRef(struct backend *);
void VBE_DropRefLocked(struct backend *b);
#include "cache_backend.h"
#include "cli_priv.h"
-MTX VBE_mtx;
+struct lock VBE_mtx;
/*
* The list of backends is not locked, it is only ever accessed from
assert(b->refcount > 0);
i = --b->refcount;
- UNLOCK(&b->mtx);
+ Lck_Unlock(&b->mtx);
if (i > 0)
return;
CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC);
- LOCK(&b->mtx);
+ Lck_Lock(&b->mtx);
VBE_DropRefLocked(b);
}
CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC);
- LOCK(&b->mtx);
+ Lck_Lock(&b->mtx);
assert(b->n_conn > 0);
b->n_conn--;
VBE_DropRefLocked(b);
/* Create new backend */
ALLOC_OBJ(b, BACKEND_MAGIC);
XXXAN(b);
- MTX_INIT(&b->mtx);
+ Lck_New(&b->mtx);
b->refcount = 1;
VTAILQ_INIT(&b->connlist);
VBE_Init(void)
{
- MTX_INIT(&VBE_mtx);
+ Lck_New(&VBE_mtx);
CLI_AddFuncs(DEBUG_CLI, debug_cmds);
}
};
static VTAILQ_HEAD(banhead,ban) ban_head = VTAILQ_HEAD_INITIALIZER(ban_head);
-static MTX ban_mtx;
+static struct lock ban_mtx;
/*
* We maintain ban_start as a pointer to the first element of the list
b->hash = hash;
b->ban = strdup(regexp);
AN(b->ban);
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
VTAILQ_INSERT_HEAD(&ban_head, b, list);
ban_start = b;
VSL_stats->n_purge++;
be->refcount++;
} else
be = NULL;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
if (be == NULL)
return (0);
bi->flags |= BAN_F_GONE;
pcount++;
}
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
be->refcount--;
/* XXX: We should check if the tail can be removed */
VSL_stats->n_purge_dups += pcount;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
return (0);
}
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
AZ(o->ban);
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
o->ban = ban_start;
ban_start->refcount++;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
}
void
if (o->ban == NULL)
return;
CHECK_OBJ_NOTNULL(o->ban, BAN_MAGIC);
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
o->ban->refcount--;
o->ban = NULL;
} else {
b = NULL;
}
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
if (b != NULL) {
free(b->ban);
regfree(&b->regexp);
break;
}
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
o->ban->refcount--;
if (b == o->ban) /* not banned */
b0->refcount++;
VSL_stats->n_purge_obj_test++;
VSL_stats->n_purge_re_test += tests;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
if (b == o->ban) { /* not banned */
o->ban = b0;
BAN_Init(void)
{
- MTX_INIT(&ban_mtx);
+ Lck_New(&ban_mtx);
CLI_AddFuncs(PUBLIC_CLI, ban_cmds);
/* Add an initial ban, since the list can never be empty */
(void)BAN_Add(NULL, ".", 0);
#include "vlu.h"
#include "vsb.h"
-pthread_t cli_thread;
-static MTX cli_mtx;
+pthread_t cli_thread;
+static struct lock cli_mtx;
/*
* The CLI commandlist is split in three:
case DEBUG_CLI: cp = &ccf_debug_cli; break;
default: INCOMPL();
}
- LOCK(&cli_mtx);
+ Lck_Lock(&cli_mtx);
c = cli_concat(*cp, p);
AN(c);
free(*cp);
*cp = c;
- UNLOCK(&cli_mtx);
+ Lck_Unlock(&cli_mtx);
}
/*--------------------------------------------------------------------
VCL_Poll();
VBE_Poll();
vsb_clear(cli->sb);
- LOCK(&cli_mtx);
+ Lck_Lock(&cli_mtx);
cli_dispatch(cli, ccf_master_cli, p);
if (cli->result == CLIS_UNKNOWN) {
vsb_clear(cli->sb);
cli->result = CLIS_OK;
cli_dispatch(cli, ccf_debug_cli, p);
}
- UNLOCK(&cli_mtx);
+ Lck_Unlock(&cli_mtx);
vsb_finish(cli->sb);
AZ(vsb_overflowed(cli->sb));
i = cli_writeres(heritage.cli_out, cli);
CLI_Init(void)
{
- MTX_INIT(&cli_mtx);
+ Lck_New(&cli_mtx);
cli_thread = pthread_self();
CLI_AddFuncs(MASTER_CLI, master_cmds);
static pthread_t exp_thread;
static struct binheap *exp_heap;
-static MTX exp_mtx;
+static struct lock exp_mtx;
static VTAILQ_HEAD(,objexp) lru = VTAILQ_HEAD_INITIALIZER(lru);
/*
assert(o->entered != 0 && !isnan(o->entered));
oe->lru_stamp = o->entered;
update_object_when(o);
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
VTAILQ_INSERT_TAIL(&lru, oe, list);
oe->on_lru = 1;
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
}
/*--------------------------------------------------------------------
void
EXP_Touch(const struct object *o, double now)
{
- int i;
struct objexp *oe;
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
CHECK_OBJ_NOTNULL(oe, OBJEXP_MAGIC);
if (oe->lru_stamp + params->lru_timeout > now)
return;
- TRYLOCK(&exp_mtx, i);
- if (i)
+ if (Lck_Trylock(&exp_mtx))
return;
if (oe->on_lru) {
VTAILQ_REMOVE(&lru, oe, list);
oe->lru_stamp = now;
VSL_stats->n_lru_moved++;
}
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
}
/*--------------------------------------------------------------------
return;
CHECK_OBJ_NOTNULL(oe, OBJEXP_MAGIC);
update_object_when(o);
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
assert(oe->timer_idx != BINHEAP_NOIDX);
binheap_delete(exp_heap, oe->timer_idx); /* XXX: binheap_shuffle() ? */
assert(oe->timer_idx == BINHEAP_NOIDX);
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
}
VCL_Get(&sp->vcl);
t = TIM_real();
while (1) {
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
oe = binheap_root(exp_heap);
CHECK_OBJ_ORNULL(oe, OBJEXP_MAGIC);
if (oe == NULL || oe->timer_when > t) { /* XXX: > or >= ? */
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
WSL_Flush(&ww, 0);
AZ(sleep(1));
VCL_Refresh(&sp->vcl);
}
assert(oe->on_lru);
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
WSL(&ww, SLT_ExpPick, 0, "%u %s", o->xid, oe->timer_what);
o->xid);
}
update_object_when(o);
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
} else {
assert(oe->timer_what == tmr_ttl);
sp->obj = o;
assert(sp->handling == VCL_RET_DISCARD);
WSL(&ww, SLT_ExpKill, 0,
"%u %d", o->xid, (int)(o->ttl - t));
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
VTAILQ_REMOVE(&lru, o->objexp, list);
oe->on_lru = 0;
VSL_stats->n_expired++;
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
del_objexp(o);
HSH_Deref(o);
}
* NB: Checking refcount here is no guarantee that it does not gain
* another ref while we ponder its destiny without the lock held.
*/
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
VTAILQ_FOREACH(oe, &lru, list) {
CHECK_OBJ_NOTNULL(oe, OBJEXP_MAGIC);
if (oe->timer_idx == BINHEAP_NOIDX) /* exp_timer has it */
assert(oe->timer_idx == BINHEAP_NOIDX);
VSL_stats->n_lru_nuked++;
}
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
if (oe == NULL)
return (-1);
assert(sp->handling == VCL_RET_KEEP);
/* Insert in binheap and lru again */
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
VSL_stats->n_lru_nuked--; /* It was premature */
VSL_stats->n_lru_saved++;
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
VTAILQ_INSERT_TAIL(&lru, oe, list);
oe->on_lru = 1;
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
return (0);
}
EXP_Init(void)
{
- MTX_INIT(&exp_mtx);
+ Lck_New(&exp_mtx);
exp_heap = binheap_new(NULL, object_cmp, object_update);
XXXAN(exp_heap);
AZ(pthread_create(&exp_thread, NULL, exp_timer, NULL));
w->nobjhead->magic = OBJHEAD_MAGIC;
VTAILQ_INIT(&w->nobjhead->objects);
VTAILQ_INIT(&w->nobjhead->waitinglist);
- MTX_INIT(&w->nobjhead->mtx);
+ Lck_New(&w->nobjhead->mtx);
VSL_stats->n_objecthead++;
} else
CHECK_OBJ_NOTNULL(w->nobjhead, OBJHEAD_MAGIC);
oh = sp->objhead;
sp->objhead = NULL;
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
} else {
oh = hash->lookup(sp, w->nobjhead);
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
if (oh == w->nobjhead)
w->nobjhead = NULL;
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
busy_o = NULL;
o->refcnt++;
if (o->hits < INT_MAX)
o->hits++;
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
if (params->log_hash)
WSP(sp, SLT_Hash, "%s", oh->hash);
(void)hash->deref(oh);
if (sp->esis == 0)
VTAILQ_INSERT_TAIL(&oh->waitinglist, sp, list);
sp->objhead = oh;
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
return (NULL);
}
o->parent = grace_o;
grace_o->refcnt++;
}
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
if (params->log_hash)
WSP(sp, SLT_Hash, "%s", oh->hash);
/*
oh = o->objhead;
if (oh != NULL) {
CHECK_OBJ(oh, OBJHEAD_MAGIC);
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
o->busy = 0;
if (oh != NULL)
if (parent != NULL)
parent->child = NULL;
if (oh != NULL)
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
if (parent != NULL)
HSH_Deref(parent);
}
oh = o->objhead;
if (oh != NULL) {
CHECK_OBJ(oh, OBJHEAD_MAGIC);
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
assert(o->refcnt > 0);
o->refcnt++;
if (oh != NULL)
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
}
void
CHECK_OBJ(oh, OBJHEAD_MAGIC);
/* drop ref on object */
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
assert(o->refcnt > 0);
r = --o->refcnt;
if (oh != NULL) {
if (!r)
VTAILQ_REMOVE(&oh->objects, o, list);
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
}
/* If still referenced, done */
if (hash->deref(oh))
return;
assert(VTAILQ_EMPTY(&oh->objects));
- MTX_DESTROY(&oh->mtx);
+ Lck_Delete(&oh->mtx);
VSL_stats->n_objecthead--;
free(oh->hash);
FREE_OBJ(oh);
THR_SetName("cache-main");
+ VSL_Init(); /* First, LCK needs it. */
+
+ LCK_Init(); /* Locking, must be first */
+
PAN_Init();
CLI_Init();
Fetch_Init();
VBE_Init();
VBP_Init();
- VSL_Init();
WRK_Init();
EXP_Init();
struct wq {
unsigned magic;
#define WQ_MAGIC 0x606658fa
- MTX mtx;
+ struct lock mtx;
struct workerhead idle;
VTAILQ_HEAD(, workreq) overflow;
unsigned nthr;
static unsigned nthr_max;
static pthread_cond_t herder_cond;
-static MTX herder_mtx;
+static struct lock herder_mtx;
/*--------------------------------------------------------------------
* Write data to fd
VSL(SLT_WorkThread, 0, "%p start", w);
- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
qp->nthr++;
while (1) {
CHECK_OBJ_NOTNULL(w, WORKER_MAGIC);
if (isnan(w->lastused))
w->lastused = TIM_real();
VTAILQ_INSERT_HEAD(&qp->idle, w, list);
- AZ(pthread_cond_wait(&w->cond, &qp->mtx));
+ Lck_CondWait(&w->cond, &qp->mtx);
}
if (w->wrq == NULL)
break;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
AN(w->wrq);
AN(w->wrq->func);
w->lastused = NAN;
w->wrq->func(w, w->wrq->priv);
w->wrq = NULL;
- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
}
qp->nthr--;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
VSL(SLT_WorkThread, 0, "%p end", w);
if (w->vcl != NULL)
if (w->srcaddr != NULL)
free(w->srcaddr);
if (w->nobjhead != NULL) {
- MTX_DESTROY(&w->nobjhead->mtx);
+ Lck_Delete(&w->nobjhead->mtx);
FREE_OBJ(w->nobjhead);
}
if (w->nobj!= NULL)
qp = wq[onq];
nq = onq;
- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
/* If there are idle threads, we tickle the first one into action */
w = VTAILQ_FIRST(&qp->idle);
if (w != NULL) {
VTAILQ_REMOVE(&qp->idle, w, list);
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
w->wrq = wrq;
AZ(pthread_cond_signal(&w->cond));
return (0);
/* If we have too much in the overflow already, refuse. */
if (qp->nqueue > ovfl_max) {
qp->ndrop++;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
return (-1);
}
VTAILQ_INSERT_TAIL(&qp->overflow, wrq, list);
qp->noverflow++;
qp->nqueue++;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
AZ(pthread_cond_signal(&herder_cond));
return (0);
}
wq[u] = calloc(sizeof *wq[u], 1);
XXXAN(wq[u]);
wq[u]->magic = WQ_MAGIC;
- MTX_INIT(&wq[u]->mtx);
+ Lck_New(&wq[u]->mtx);
VTAILQ_INIT(&wq[u]->overflow);
VTAILQ_INIT(&wq[u]->idle);
}
{
struct worker *w = NULL;
- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
vs->n_wrk += qp->nthr;
vs->n_wrk_queue += qp->nqueue;
vs->n_wrk_drop += qp->ndrop;
else
w = NULL;
}
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
/* And give it a kiss on the cheek... */
if (w != NULL) {
* We cannot avoid getting a mutex, so we have a
* bogo mutex just for POSIX_STUPIDITY
*/
- AZ(pthread_mutex_lock(&herder_mtx));
- AZ(pthread_cond_wait(&herder_cond, &herder_mtx));
- AZ(pthread_mutex_unlock(&herder_mtx));
+ Lck_Lock(&herder_mtx);
+ Lck_CondWait(&herder_cond, &herder_mtx);
+ Lck_Unlock(&herder_mtx);
wrk_breed_flock(wq[u]);
}
}
pthread_t tp;
AZ(pthread_cond_init(&herder_cond, NULL));
- AZ(pthread_mutex_init(&herder_mtx, NULL));
+ Lck_New(&herder_mtx);
wrk_addpools(params->wthread_pools);
AZ(pthread_create(&tp, NULL, wrk_herdtimer_thread, NULL));
};
static unsigned ses_qp;
-static MTX ses_mem_mtx;
+static struct lock ses_mem_mtx;
/*--------------------------------------------------------------------*/
unsigned magic;
#define SRCADDRHEAD_MAGIC 0x38231a8b
VTAILQ_HEAD(,srcaddr) head;
- MTX mtx;
+ struct lock mtx;
} *srchash;
static unsigned nsrchash;
-static MTX stat_mtx;
+static struct lock stat_mtx;
/*--------------------------------------------------------------------
* Assign a srcaddr to this session.
XXXAN(sp->wrk->srcaddr);
}
- LOCK(&ch->mtx);
+ Lck_Lock(&ch->mtx);
c3 = NULL;
VTAILQ_FOREACH_SAFE(c, &ch->head, list, c2) {
if (c->hash == u && !strcmp(c->addr, sp->addr)) {
VTAILQ_REMOVE(&ch->head, c3, list);
VSL_stats->n_srcaddr--;
}
- UNLOCK(&ch->mtx);
+ Lck_Unlock(&ch->mtx);
if (c3 != NULL)
free(c3);
return;
VSL_stats->n_srcaddr_act++;
VTAILQ_INSERT_TAIL(&ch->head, c3, list);
sp->srcaddr = c3;
- UNLOCK(&ch->mtx);
+ Lck_Unlock(&ch->mtx);
}
/*--------------------------------------------------------------------*/
CHECK_OBJ(sp->srcaddr, SRCADDR_MAGIC);
ch = sp->srcaddr->sah;
CHECK_OBJ(ch, SRCADDRHEAD_MAGIC);
- LOCK(&ch->mtx);
+ Lck_Lock(&ch->mtx);
assert(sp->srcaddr->nref > 0);
sp->srcaddr->nref--;
if (sp->srcaddr->nref == 0)
VSL_stats->n_srcaddr_act--;
sp->srcaddr = NULL;
- UNLOCK(&ch->mtx);
+ Lck_Unlock(&ch->mtx);
}
/*--------------------------------------------------------------------*/
if (sp->srcaddr != NULL) {
/* XXX: only report once per second ? */
CHECK_OBJ(sp->srcaddr, SRCADDR_MAGIC);
- LOCK(&sp->srcaddr->sah->mtx);
+ Lck_Lock(&sp->srcaddr->sah->mtx);
ses_sum_acct(&sp->srcaddr->acct, a);
b = sp->srcaddr->acct;
- UNLOCK(&sp->srcaddr->sah->mtx);
+ Lck_Unlock(&sp->srcaddr->sah->mtx);
WSL(sp->wrk, SLT_StatAddr, 0,
"%s 0 %.0f %ju %ju %ju %ju %ju %ju %ju",
sp->srcaddr->addr, sp->t_end - b.first,
b.sess, b.req, b.pipe, b.pass,
b.fetch, b.hdrbytes, b.bodybytes);
}
- LOCK(&stat_mtx);
+ Lck_Lock(&stat_mtx);
#define ACCT(foo) VSL_stats->s_##foo += a->foo;
#include "acct_fields.h"
#undef ACCT
- UNLOCK(&stat_mtx);
+ Lck_Unlock(&stat_mtx);
memset(a, 0, sizeof *a);
}
* If that queue is empty, flip queues holding the lock
* and try the new unlocked queue.
*/
- LOCK(&ses_mem_mtx);
+ Lck_Lock(&ses_mem_mtx);
ses_qp = 1 - ses_qp;
- UNLOCK(&ses_mem_mtx);
+ Lck_Unlock(&ses_mem_mtx);
sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
}
if (sm != NULL) {
VSL_stats->n_sess_mem--;
free(sm);
} else {
- LOCK(&ses_mem_mtx);
+ Lck_Lock(&ses_mem_mtx);
VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list);
- UNLOCK(&ses_mem_mtx);
+ Lck_Unlock(&ses_mem_mtx);
}
}
for (i = 0; i < nsrchash; i++) {
srchash[i].magic = SRCADDRHEAD_MAGIC;
VTAILQ_INIT(&srchash[i].head);
- MTX_INIT(&srchash[i].mtx);
+ Lck_New(&srchash[i].mtx);
}
- MTX_INIT(&stat_mtx);
- MTX_INIT(&ses_mem_mtx);
+ Lck_New(&stat_mtx);
+ Lck_New(&ses_mem_mtx);
}
VTAILQ_HEAD_INITIALIZER(vcl_head);
-static MTX vcl_mtx;
+static struct lock vcl_mtx;
static struct vcls *vcl_active; /* protected by vcl_mtx */
/*--------------------------------------------------------------------*/
VCL_Get(struct VCL_conf **vcc)
{
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
AN(vcl_active);
*vcc = vcl_active->conf;
AN(*vcc);
AZ((*vcc)->discard);
(*vcc)->busy++;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
}
void
vc = *vcc;
*vcc = NULL;
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
assert(vc->busy > 0);
vc->busy--;
/*
* We do not garbage collect discarded VCL's here, that happens
* in VCL_Poll() which is called from the CLI thread.
*/
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
}
/*--------------------------------------------------------------------*/
}
REPLACE(vcl->name, name);
VTAILQ_INSERT_TAIL(&vcl_head, vcl, list);
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
if (vcl_active == NULL)
vcl_active = vcl;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
cli_out(cli, "Loaded \"%s\" as \"%s\"", fn , name);
vcl->conf->init_func(cli);
VSL_stats->n_vcl++;
cli_out(cli, "VCL '%s' unknown", av[2]);
return;
}
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
if (vcl == vcl_active) {
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
cli_result(cli, CLIS_PARAM);
cli_out(cli, "VCL %s is the active VCL", av[2]);
return;
VSL_stats->n_vcl_discard++;
VSL_stats->n_vcl_avail--;
vcl->conf->discard = 1;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
if (vcl->conf->busy == 0)
VCL_Nuke(vcl);
}
cli_result(cli, CLIS_PARAM);
return;
}
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
vcl_active = vcl;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
}
/*--------------------------------------------------------------------*/
{
CLI_AddFuncs(MASTER_CLI, vcl_cmds);
- MTX_INIT(&vcl_mtx);
+ Lck_New(&vcl_mtx);
}
unsigned magic;
#define HCL_HEAD_MAGIC 0x0f327016
VTAILQ_HEAD(, hcl_entry) head;
- MTX mtx;
+ struct lock mtx;
};
static unsigned hcl_nhash = 16383;
for (u = 0; u < hcl_nhash; u++) {
VTAILQ_INIT(&hcl_head[u].head);
- MTX_INIT(&hcl_head[u].mtx);
+ Lck_New(&hcl_head[u].mtx);
hcl_head[u].magic = HCL_HEAD_MAGIC;
}
}
he2 = NULL;
for (r = 0; r < 2; r++ ) {
- LOCK(&hp->mtx);
+ Lck_Lock(&hp->mtx);
VTAILQ_FOREACH(he, &hp->head, list) {
CHECK_OBJ_NOTNULL(he, HCL_ENTRY_MAGIC);
if (sp->lhashptr < he->oh->hashlen)
break;
he->refcnt++;
roh = he->oh;
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
/*
* If we loose the race, we need to clean up
* the work we did for our second attempt.
return (roh);
}
if (noh == NULL) {
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
return (NULL);
}
if (he2 != NULL) {
VTAILQ_INSERT_TAIL(&hp->head, he2, list);
he2->refcnt++;
noh = he2->oh;
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
return (noh);
}
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
he2 = calloc(sizeof *he2, 1);
XXXAN(he2);
assert(he->refcnt > 0);
assert(he->hash < hcl_nhash);
assert(hp == &hcl_head[he->hash]);
- LOCK(&hp->mtx);
+ Lck_Lock(&hp->mtx);
if (--he->refcnt == 0)
VTAILQ_REMOVE(&hp->head, he, list);
else
he = NULL;
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
if (he == NULL)
return (1);
free(he);
};
static VTAILQ_HEAD(, hsl_entry) hsl_head = VTAILQ_HEAD_INITIALIZER(hsl_head);
-static MTX hsl_mutex;
+static struct lock hsl_mtx;
/*--------------------------------------------------------------------
* The ->init method is called during process start and allows
hsl_start(void)
{
- MTX_INIT(&hsl_mutex);
+ Lck_New(&hsl_mtx);
}
/*--------------------------------------------------------------------
struct hsl_entry *he, *he2;
int i;
- LOCK(&hsl_mutex);
+ Lck_Lock(&hsl_mtx);
VTAILQ_FOREACH(he, &hsl_head, list) {
i = HSH_Compare(sp, he->obj);
if (i < 0)
break;
he->refcnt++;
nobj = he->obj;
- UNLOCK(&hsl_mutex);
+ Lck_Unlock(&hsl_mtx);
return (nobj);
}
if (nobj != NULL) {
else
VTAILQ_INSERT_TAIL(&hsl_head, he2, list);
}
- UNLOCK(&hsl_mutex);
+ Lck_Unlock(&hsl_mtx);
return (nobj);
}
AN(obj->hashpriv);
he = obj->hashpriv;
- LOCK(&hsl_mutex);
+ Lck_Lock(&hsl_mtx);
if (--he->refcnt == 0) {
VTAILQ_REMOVE(&hsl_head, he, list);
free(he);
ret = 0;
} else
ret = 1;
- UNLOCK(&hsl_mutex);
+ Lck_Unlock(&hsl_mtx);
return (ret);
}
struct varnish_stats *VSL_stats;
static struct shmloghead *loghead;
static unsigned char *logstart;
-static MTX vsl_mtx;
+static pthread_mutex_t vsl_mtx;
static void
assert(loghead->hdrsize == sizeof *loghead);
/* XXX more check sanity of loghead ? */
logstart = (unsigned char *)loghead + loghead->start;
- MTX_INIT(&vsl_mtx);
+ AZ(pthread_mutex_init(&vsl_mtx, NULL));
loghead->starttime = TIM_real();
loghead->panicstr[0] = '\0';
memset(VSL_stats, 0, sizeof *VSL_stats);
struct smfhead order;
struct smfhead free[NBUCKET];
struct smfhead used;
- MTX mtx;
+ struct lock mtx;
};
/*--------------------------------------------------------------------*/
/* XXX */
if (sum < MINPAGES * (off_t)getpagesize())
exit (2);
- MTX_INIT(&sc->mtx);
+ Lck_New(&sc->mtx);
VSL_stats->sm_bfree += sc->filesize;
}
assert(size > 0);
size += (sc->pagesize - 1);
size &= ~(sc->pagesize - 1);
- LOCK(&sc->mtx);
+ Lck_Lock(&sc->mtx);
VSL_stats->sm_nreq++;
smf = alloc_smf(sc, size);
if (smf == NULL) {
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
return (NULL);
}
CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
VSL_stats->sm_nobj++;
VSL_stats->sm_balloc += smf->size;
VSL_stats->sm_bfree -= smf->size;
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
CHECK_OBJ_NOTNULL(&smf->s, STORAGE_MAGIC); /*lint !e774 */
XXXAN(smf);
assert(smf->size == size);
size += (sc->pagesize - 1);
size &= ~(sc->pagesize - 1);
if (smf->size > size) {
- LOCK(&sc->mtx);
+ Lck_Lock(&sc->mtx);
VSL_stats->sm_balloc -= (smf->size - size);
VSL_stats->sm_bfree += (smf->size - size);
trim_smf(smf, size);
assert(smf->size == size);
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
smf->s.space = size;
}
}
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
sc = smf->sc;
- LOCK(&sc->mtx);
+ Lck_Lock(&sc->mtx);
VSL_stats->sm_nobj--;
VSL_stats->sm_balloc -= smf->size;
VSL_stats->sm_bfree += smf->size;
free_smf(smf);
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
}
/*--------------------------------------------------------------------*/
#include "stevedore.h"
static size_t sma_max = SIZE_MAX;
-static MTX sma_mtx;
+static struct lock sma_mtx;
struct sma {
struct storage s;
{
struct sma *sma;
- LOCK(&sma_mtx);
+ Lck_Lock(&sma_mtx);
VSL_stats->sma_nreq++;
if (VSL_stats->sma_nbytes + size > sma_max)
size = 0;
VSL_stats->sma_nbytes += size;
VSL_stats->sma_balloc += size;
}
- UNLOCK(&sma_mtx);
+ Lck_Unlock(&sma_mtx);
if (size == 0)
return (NULL);
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
sma = s->priv;
assert(sma->sz == sma->s.space);
- LOCK(&sma_mtx);
+ Lck_Lock(&sma_mtx);
VSL_stats->sma_nobj--;
VSL_stats->sma_nbytes -= sma->sz;
VSL_stats->sma_bfree += sma->sz;
- UNLOCK(&sma_mtx);
+ Lck_Unlock(&sma_mtx);
free(sma->s.ptr);
free(sma);
}
sma = s->priv;
assert(sma->sz == sma->s.space);
if ((p = realloc(sma->s.ptr, size)) != NULL) {
- LOCK(&sma_mtx);
+ Lck_Lock(&sma_mtx);
VSL_stats->sma_nbytes -= (sma->sz - size);
VSL_stats->sma_bfree += sma->sz - size;
sma->sz = size;
- UNLOCK(&sma_mtx);
+ Lck_Unlock(&sma_mtx);
sma->s.ptr = p;
sma->s.space = size;
}
sma_open(const struct stevedore *st)
{
(void)st;
- AZ(pthread_mutex_init(&sma_mtx, NULL));
+ Lck_New(&sma_mtx);
}
struct stevedore sma_stevedore = {
#include "vsb.h"
#include "stevedore.h"
-static MTX sms_mtx;
+static struct lock sms_mtx;
static void
sms_free(struct storage *sto)
{
CHECK_OBJ_NOTNULL(sto, STORAGE_MAGIC);
- LOCK(&sms_mtx);
+ Lck_Lock(&sms_mtx);
VSL_stats->sms_nobj--;
VSL_stats->sms_nbytes -= sto->len;
VSL_stats->sms_bfree += sto->len;
- UNLOCK(&sms_mtx);
+ Lck_Unlock(&sms_mtx);
vsb_delete(sto->priv);
free(sto);
}
SMS_Init(void)
{
- AZ(pthread_mutex_init(&sms_mtx, NULL));
+ Lck_New(&sms_mtx);
}
static struct stevedore sms_stevedore = {
HSH_Freestore(obj);
obj->len = 0;
- LOCK(&sms_mtx);
+ Lck_Lock(&sms_mtx);
VSL_stats->sms_nreq++;
VSL_stats->sms_nobj++;
- UNLOCK(&sms_mtx);
+ Lck_Unlock(&sms_mtx);
sto = calloc(sizeof *sto, 1);
XXXAN(sto);
{
struct smu *smu;
- LOCK(&smu_mtx);
+ Lck_Lock(&smu_mtx);
VSL_stats->sma_nreq++;
if (VSL_stats->sma_nbytes + size > smu_max)
size = 0;
VSL_stats->sma_nbytes += size;
VSL_stats->sma_balloc += size;
}
- UNLOCK(&smu_mtx);
+ Lck_Unlock(&smu_mtx);
if (size == 0)
return (NULL);
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
smu = s->priv;
assert(smu->sz == smu->s.space);
- LOCK(&smu_mtx);
+ Lck_Lock(&smu_mtx);
VSL_stats->sma_nobj--;
VSL_stats->sma_nbytes -= smu->sz;
VSL_stats->sma_bfree += smu->sz;
- UNLOCK(&smu_mtx);
+ Lck_Unlock(&smu_mtx);
umem_free(smu->s.ptr, smu->s.space);
umem_free(smu, sizeof *smu);
}
if ((p = umem_alloc(size, UMEM_DEFAULT)) != NULL) {
memcpy(p, smu->s.ptr, size);
umem_free(smu->s.ptr, smu->s.space);
- LOCK(&smu_mtx);
+ Lck_Lock(&smu_mtx);
VSL_stats->sma_nbytes -= (smu->sz - size);
VSL_stats->sma_bfree += smu->sz - size;
smu->sz = size;
- UNLOCK(&smu_mtx);
+ Lck_Unlock(&smu_mtx);
smu->s.ptr = p;
smu->s.space = size;
}