From a3689f88b536ab4eee2e5902335b084fdc999418 Mon Sep 17 00:00:00 2001 From: des Date: Tue, 25 Sep 2007 08:48:14 +0000 Subject: [PATCH] The previous commit made things worse rather than better, as some systems have a that differs in small but important details from what we expect. Replace our "queue.h" (which was taken from NetBSD) with a new "vqueue.h" which is based on FreeBSD's with the debugging taken out and a "V" prefix added to everything. git-svn-id: svn+ssh://projects.linpro.no/svn/varnish/trunk@2033 d4fa192b-c00b-0410-8231-f00ffab90ce4 --- varnish-cache/bin/varnishd/cache.h | 32 +- varnish-cache/bin/varnishd/cache_acceptor.c | 6 +- .../bin/varnishd/cache_acceptor_epoll.c | 10 +- .../bin/varnishd/cache_acceptor_kqueue.c | 12 +- .../bin/varnishd/cache_acceptor_poll.c | 10 +- varnish-cache/bin/varnishd/cache_backend.c | 24 +- .../bin/varnishd/cache_backend_random.c | 12 +- .../bin/varnishd/cache_backend_round_robin.c | 12 +- .../bin/varnishd/cache_backend_simple.c | 14 +- varnish-cache/bin/varnishd/cache_ban.c | 8 +- varnish-cache/bin/varnishd/cache_expire.c | 24 +- varnish-cache/bin/varnishd/cache_fetch.c | 18 +- varnish-cache/bin/varnishd/cache_hash.c | 24 +- varnish-cache/bin/varnishd/cache_pool.c | 24 +- varnish-cache/bin/varnishd/cache_response.c | 2 +- varnish-cache/bin/varnishd/cache_session.c | 34 +- varnish-cache/bin/varnishd/cache_synthetic.c | 2 +- varnish-cache/bin/varnishd/cache_vcl.c | 16 +- varnish-cache/bin/varnishd/hash_classic.c | 14 +- varnish-cache/bin/varnishd/hash_simple_list.c | 12 +- varnish-cache/bin/varnishd/heritage.h | 10 +- varnish-cache/bin/varnishd/mgt_child.c | 6 +- varnish-cache/bin/varnishd/mgt_event.c | 20 +- varnish-cache/bin/varnishd/mgt_event.h | 8 +- varnish-cache/bin/varnishd/mgt_param.c | 16 +- varnish-cache/bin/varnishd/mgt_vcc.c | 26 +- varnish-cache/bin/varnishd/stevedore.h | 6 +- varnish-cache/bin/varnishd/storage_file.c | 58 +- varnish-cache/bin/varnishd/varnishd.c | 2 +- .../bin/varnishreplay/varnishreplay.c | 22 +- varnish-cache/bin/varnishtop/varnishtop.c | 32 +- varnish-cache/configure.ac | 1 - varnish-cache/include/Makefile.am | 2 +- varnish-cache/include/queue.h | 676 ------------------ varnish-cache/include/vqueue.h | 495 +++++++++++++ varnish-cache/lib/libvcl/vcc_backend.c | 2 +- varnish-cache/lib/libvcl/vcc_compile.c | 56 +- varnish-cache/lib/libvcl/vcc_compile.h | 24 +- varnish-cache/lib/libvcl/vcc_token.c | 12 +- varnish-cache/lib/libvcl/vcc_xref.c | 40 +- 40 files changed, 801 insertions(+), 1023 deletions(-) delete mode 100644 varnish-cache/include/queue.h create mode 100644 varnish-cache/include/vqueue.h diff --git a/varnish-cache/bin/varnishd/cache.h b/varnish-cache/bin/varnishd/cache.h index bec6b2dc..499c2ef2 100644 --- a/varnish-cache/bin/varnishd/cache.h +++ b/varnish-cache/bin/varnishd/cache.h @@ -36,11 +36,7 @@ #include #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "vsb.h" @@ -159,7 +155,7 @@ struct worker { int pipe[2]; - TAILQ_ENTRY(worker) list; + VTAILQ_ENTRY(worker) list; struct workreq *wrq; int *wfd; @@ -178,7 +174,7 @@ struct worker { }; struct workreq { - TAILQ_ENTRY(workreq) list; + VTAILQ_ENTRY(workreq) list; struct sess *sess; }; @@ -189,7 +185,7 @@ struct workreq { struct bereq { unsigned magic; #define BEREQ_MAGIC 0x3b6d250c - TAILQ_ENTRY(bereq) list; + VTAILQ_ENTRY(bereq) list; void *space; unsigned len; struct http http[1]; @@ -200,7 +196,7 @@ struct bereq { struct storage { unsigned magic; #define STORAGE_MAGIC 0x1a4e51c0 - TAILQ_ENTRY(storage) list; + VTAILQ_ENTRY(storage) list; struct stevedore *stevedore; void *priv; @@ -243,13 +239,13 @@ struct object { double last_modified; struct http http; - TAILQ_ENTRY(object) list; + VTAILQ_ENTRY(object) list; - TAILQ_ENTRY(object) deathrow; + VTAILQ_ENTRY(object) deathrow; - TAILQ_HEAD(, storage) store; + VTAILQ_HEAD(, storage) store; - TAILQ_HEAD(, sess) waitinglist; + VTAILQ_HEAD(, sess) waitinglist; double lru_stamp; }; @@ -260,7 +256,7 @@ struct objhead { void *hashpriv; pthread_mutex_t mtx; - TAILQ_HEAD(,object) objects; + VTAILQ_HEAD(,object) objects; char *hash; unsigned hashlen; }; @@ -302,7 +298,7 @@ struct sess { int err_code; const char *err_reason; - TAILQ_ENTRY(sess) list; + VTAILQ_ENTRY(sess) list; struct backend *backend; struct bereq *bereq; @@ -328,7 +324,7 @@ struct sess { struct vbe_conn { unsigned magic; #define VBE_CONN_MAGIC 0x0c5e6592 - TAILQ_ENTRY(vbe_conn) list; + VTAILQ_ENTRY(vbe_conn) list; struct backend *backend; int fd; void *priv; @@ -361,7 +357,7 @@ struct backend { #define BACKEND_MAGIC 0x64c4c7c6 char *vcl_name; - TAILQ_ENTRY(backend) list; + VTAILQ_ENTRY(backend) list; int refcount; pthread_mutex_t mtx; @@ -377,7 +373,7 @@ struct backend { * NB: This list is not locked, it is only ever manipulated from the * cachers CLI thread. */ -TAILQ_HEAD(backendlist, backend); +VTAILQ_HEAD(backendlist, backend); /* Prototypes etc ----------------------------------------------------*/ diff --git a/varnish-cache/bin/varnishd/cache_acceptor.c b/varnish-cache/bin/varnishd/cache_acceptor.c index 0bc27882..c24d091a 100644 --- a/varnish-cache/bin/varnishd/cache_acceptor.c +++ b/varnish-cache/bin/varnishd/cache_acceptor.c @@ -145,7 +145,7 @@ vca_acct(void *arg) pfd = calloc(sizeof *pfd, heritage.nsocks); AN(pfd); i = 0; - TAILQ_FOREACH(ls, &heritage.socks, list) { + VTAILQ_FOREACH(ls, &heritage.socks, list) { AZ(setsockopt(ls->sock, SOL_SOCKET, SO_LINGER, &linger, sizeof linger)); pfd[i].events = POLLIN; @@ -157,7 +157,7 @@ vca_acct(void *arg) if (params->send_timeout != tv_sndtimeo.tv_sec) { need_test = 1; tv_sndtimeo.tv_sec = params->send_timeout; - TAILQ_FOREACH(ls, &heritage.socks, list) + VTAILQ_FOREACH(ls, &heritage.socks, list) AZ(setsockopt(ls->sock, SOL_SOCKET, SO_SNDTIMEO, &tv_sndtimeo, sizeof tv_sndtimeo)); @@ -165,7 +165,7 @@ vca_acct(void *arg) if (params->sess_timeout != tv_rcvtimeo.tv_sec) { need_test = 1; tv_rcvtimeo.tv_sec = params->sess_timeout; - TAILQ_FOREACH(ls, &heritage.socks, list) + VTAILQ_FOREACH(ls, &heritage.socks, list) AZ(setsockopt(ls->sock, SOL_SOCKET, SO_RCVTIMEO, &tv_rcvtimeo, sizeof tv_rcvtimeo)); diff --git a/varnish-cache/bin/varnishd/cache_acceptor_epoll.c b/varnish-cache/bin/varnishd/cache_acceptor_epoll.c index ec9fed1d..0b9dd32c 100644 --- a/varnish-cache/bin/varnishd/cache_acceptor_epoll.c +++ b/varnish-cache/bin/varnishd/cache_acceptor_epoll.c @@ -49,7 +49,7 @@ static pthread_t vca_epoll_thread; static int epfd = -1; -static TAILQ_HEAD(,sess) sesshead = TAILQ_HEAD_INITIALIZER(sesshead); +static VTAILQ_HEAD(,sess) sesshead = VTAILQ_HEAD_INITIALIZER(sesshead); static void vca_add(int fd, void *data) @@ -86,13 +86,13 @@ vca_main(void *arg) i = read(vca_pipes[0], &sp, sizeof sp); assert(i == sizeof sp); CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); - TAILQ_INSERT_TAIL(&sesshead, sp, list); + VTAILQ_INSERT_TAIL(&sesshead, sp, list); vca_add(sp->fd, sp); } else { CAST_OBJ_NOTNULL(sp, ev.data.ptr, SESS_MAGIC); i = vca_pollsession(sp); if (i >= 0) { - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); if (sp->fd != -1) vca_del(sp->fd); if (i == 0) @@ -104,11 +104,11 @@ vca_main(void *arg) } /* check for timeouts */ deadline = TIM_real() - params->sess_timeout; - TAILQ_FOREACH_SAFE(sp, &sesshead, list, sp2) { + VTAILQ_FOREACH_SAFE(sp, &sesshead, list, sp2) { CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); if (sp->t_open > deadline) continue; - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); vca_del(sp->fd); vca_close_session(sp, "timeout"); SES_Delete(sp); diff --git a/varnish-cache/bin/varnishd/cache_acceptor_kqueue.c b/varnish-cache/bin/varnishd/cache_acceptor_kqueue.c index 92edd145..42ac6ba9 100644 --- a/varnish-cache/bin/varnishd/cache_acceptor_kqueue.c +++ b/varnish-cache/bin/varnishd/cache_acceptor_kqueue.c @@ -51,7 +51,7 @@ static pthread_t vca_kqueue_thread; static int kq = -1; -static TAILQ_HEAD(,sess) sesshead = TAILQ_HEAD_INITIALIZER(sesshead); +static VTAILQ_HEAD(,sess) sesshead = VTAILQ_HEAD_INITIALIZER(sesshead); #define NKEV 100 @@ -88,7 +88,7 @@ vca_kev(struct kevent *kp) while (i >= sizeof ss[0]) { CHECK_OBJ_NOTNULL(ss[j], SESS_MAGIC); assert(ss[j]->fd >= 0); - TAILQ_INSERT_TAIL(&sesshead, ss[j], list); + VTAILQ_INSERT_TAIL(&sesshead, ss[j], list); vca_kq_sess(ss[j], EV_ADD); j++; i -= sizeof ss[0]; @@ -101,7 +101,7 @@ vca_kev(struct kevent *kp) i = vca_pollsession(sp); if (i == -1) return; - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); if (i == 0) { vca_kq_sess(sp, EV_DELETE); vca_handover(sp, i); @@ -110,7 +110,7 @@ vca_kev(struct kevent *kp) } return; } else if (kp->flags == EV_EOF) { - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); vca_close_session(sp, "EOF"); SES_Delete(sp); return; @@ -157,12 +157,12 @@ vca_kqueue_main(void *arg) continue; deadline = TIM_real() - params->sess_timeout; for (;;) { - sp = TAILQ_FIRST(&sesshead); + sp = VTAILQ_FIRST(&sesshead); if (sp == NULL) break; if (sp->t_open > deadline) break; - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); vca_close_session(sp, "timeout"); SES_Delete(sp); } diff --git a/varnish-cache/bin/varnishd/cache_acceptor_poll.c b/varnish-cache/bin/varnishd/cache_acceptor_poll.c index 1e2bda21..dc28f427 100644 --- a/varnish-cache/bin/varnishd/cache_acceptor_poll.c +++ b/varnish-cache/bin/varnishd/cache_acceptor_poll.c @@ -51,7 +51,7 @@ static pthread_t vca_poll_thread; static struct pollfd *pollfd; static unsigned npoll; -static TAILQ_HEAD(,sess) sesshead = TAILQ_HEAD_INITIALIZER(sesshead); +static VTAILQ_HEAD(,sess) sesshead = VTAILQ_HEAD_INITIALIZER(sesshead); /*--------------------------------------------------------------------*/ @@ -116,11 +116,11 @@ vca_main(void *arg) i = read(vca_pipes[0], &sp, sizeof sp); assert(i == sizeof sp); CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); - TAILQ_INSERT_TAIL(&sesshead, sp, list); + VTAILQ_INSERT_TAIL(&sesshead, sp, list); vca_poll(sp->fd); } deadline = TIM_real() - params->sess_timeout; - TAILQ_FOREACH_SAFE(sp, &sesshead, list, sp2) { + VTAILQ_FOREACH_SAFE(sp, &sesshead, list, sp2) { if (v == 0) break; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); @@ -130,7 +130,7 @@ vca_main(void *arg) i = vca_pollsession(sp); if (i < 0) continue; - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); vca_unpoll(fd); if (i == 0) vca_handover(sp, i); @@ -140,7 +140,7 @@ vca_main(void *arg) } if (sp->t_open > deadline) continue; - TAILQ_REMOVE(&sesshead, sp, list); + VTAILQ_REMOVE(&sesshead, sp, list); vca_unpoll(fd); vca_close_session(sp, "timeout"); SES_Delete(sp); diff --git a/varnish-cache/bin/varnishd/cache_backend.c b/varnish-cache/bin/varnishd/cache_backend.c index 48572df8..ec12bf29 100644 --- a/varnish-cache/bin/varnishd/cache_backend.c +++ b/varnish-cache/bin/varnishd/cache_backend.c @@ -44,12 +44,12 @@ #include "shmlog.h" #include "cache.h" -static TAILQ_HEAD(,bereq) bereq_head = TAILQ_HEAD_INITIALIZER(bereq_head); -static TAILQ_HEAD(,vbe_conn) vbe_head = TAILQ_HEAD_INITIALIZER(vbe_head); +static VTAILQ_HEAD(,bereq) bereq_head = VTAILQ_HEAD_INITIALIZER(bereq_head); +static VTAILQ_HEAD(,vbe_conn) vbe_head = VTAILQ_HEAD_INITIALIZER(vbe_head); static MTX VBE_mtx; -struct backendlist backendlist = TAILQ_HEAD_INITIALIZER(backendlist); +struct backendlist backendlist = VTAILQ_HEAD_INITIALIZER(backendlist); /*-------------------------------------------------------------------- @@ -135,9 +135,9 @@ VBE_new_bereq(void) volatile unsigned len; LOCK(&VBE_mtx); - bereq = TAILQ_FIRST(&bereq_head); + bereq = VTAILQ_FIRST(&bereq_head); if (bereq != NULL) - TAILQ_REMOVE(&bereq_head, bereq, list); + VTAILQ_REMOVE(&bereq_head, bereq, list); UNLOCK(&VBE_mtx); if (bereq != NULL) { CHECK_OBJ(bereq, BEREQ_MAGIC); @@ -163,7 +163,7 @@ VBE_free_bereq(struct bereq *bereq) CHECK_OBJ_NOTNULL(bereq, BEREQ_MAGIC); LOCK(&VBE_mtx); - TAILQ_INSERT_HEAD(&bereq_head, bereq, list); + VTAILQ_INSERT_HEAD(&bereq_head, bereq, list); UNLOCK(&VBE_mtx); } @@ -174,13 +174,13 @@ VBE_NewConn(void) { struct vbe_conn *vc; - vc = TAILQ_FIRST(&vbe_head); + vc = VTAILQ_FIRST(&vbe_head); if (vc != NULL) { LOCK(&VBE_mtx); - vc = TAILQ_FIRST(&vbe_head); + vc = VTAILQ_FIRST(&vbe_head); if (vc != NULL) { VSL_stats->backend_unused--; - TAILQ_REMOVE(&vbe_head, vc, list); + VTAILQ_REMOVE(&vbe_head, vc, list); } else { VSL_stats->n_vbe_conn++; } @@ -206,7 +206,7 @@ VBE_ReleaseConn(struct vbe_conn *vc) assert(vc->backend == NULL); assert(vc->fd < 0); LOCK(&VBE_mtx); - TAILQ_INSERT_HEAD(&vbe_head, vc, list); + VTAILQ_INSERT_HEAD(&vbe_head, vc, list); VSL_stats->backend_unused++; UNLOCK(&VBE_mtx); } @@ -229,7 +229,7 @@ VBE_NewBackend(struct backend_method *method) b->last_check = TIM_mono(); b->minute_limit = 1; - TAILQ_INSERT_TAIL(&backendlist, b, list); + VTAILQ_INSERT_TAIL(&backendlist, b, list); return (b); } @@ -244,7 +244,7 @@ VBE_DropRefLocked(struct backend *b) i = --b->refcount; if (i == 0) - TAILQ_REMOVE(&backendlist, b, list); + VTAILQ_REMOVE(&backendlist, b, list); UNLOCK(&b->mtx); if (i) return; diff --git a/varnish-cache/bin/varnishd/cache_backend_random.c b/varnish-cache/bin/varnishd/cache_backend_random.c index edc10d07..8fa0e4f4 100644 --- a/varnish-cache/bin/varnishd/cache_backend_random.c +++ b/varnish-cache/bin/varnishd/cache_backend_random.c @@ -71,7 +71,7 @@ struct brspec { double dnsttl; double dnstime; unsigned dnsseq; - TAILQ_HEAD(, vbe_conn) connlist; + VTAILQ_HEAD(, vbe_conn) connlist; int health; }; @@ -255,12 +255,12 @@ ber_nextfd(struct sess *sp) while (1) { LOCK(&bp->mtx); - vc = TAILQ_FIRST(&bs->connlist); + vc = VTAILQ_FIRST(&bs->connlist); if (vc != NULL) { bp->refcount++; assert(vc->backend == bp); assert(vc->fd >= 0); - TAILQ_REMOVE(&bs->connlist, vc, list); + VTAILQ_REMOVE(&bs->connlist, vc, list); } UNLOCK(&bp->mtx); if (vc == NULL) @@ -343,7 +343,7 @@ ber_RecycleFd(struct worker *w, struct vbe_conn *vc) WSL(w, SLT_BackendReuse, vc->fd, "%s", vc->backend->vcl_name); LOCK(&vc->backend->mtx); VSL_stats->backend_recycle++; - TAILQ_INSERT_HEAD(&bs->connlist, vc, list); + VTAILQ_INSERT_HEAD(&bs->connlist, vc, list); VBE_DropRefLocked(vc->backend); } @@ -366,10 +366,10 @@ ber_Cleanup(struct backend *b) free(bs->hostname); freeaddrinfo(bs->addr); while (1) { - vbe = TAILQ_FIRST(&bs->connlist); + vbe = VTAILQ_FIRST(&bs->connlist); if (vbe == NULL) break; - TAILQ_REMOVE(&bs->connlist, vbe, list); + VTAILQ_REMOVE(&bs->connlist, vbe, list); if (vbe->fd >= 0) close(vbe->fd); free(vbe); diff --git a/varnish-cache/bin/varnishd/cache_backend_round_robin.c b/varnish-cache/bin/varnishd/cache_backend_round_robin.c index 36f4f842..001d2650 100644 --- a/varnish-cache/bin/varnishd/cache_backend_round_robin.c +++ b/varnish-cache/bin/varnishd/cache_backend_round_robin.c @@ -70,7 +70,7 @@ struct bspec { double dnsttl; double dnstime; unsigned dnsseq; - TAILQ_HEAD(, vbe_conn) connlist; + VTAILQ_HEAD(, vbe_conn) connlist; int health; }; @@ -241,12 +241,12 @@ brr_nextfd(struct sess *sp) while (1) { LOCK(&bp->mtx); - vc = TAILQ_FIRST(&bs->connlist); + vc = VTAILQ_FIRST(&bs->connlist); if (vc != NULL) { bp->refcount++; assert(vc->backend == bp); assert(vc->fd >= 0); - TAILQ_REMOVE(&bs->connlist, vc, list); + VTAILQ_REMOVE(&bs->connlist, vc, list); } UNLOCK(&bp->mtx); if (vc == NULL) @@ -328,7 +328,7 @@ brr_RecycleFd(struct worker *w, struct vbe_conn *vc) WSL(w, SLT_BackendReuse, vc->fd, "%s", vc->backend->vcl_name); LOCK(&vc->backend->mtx); VSL_stats->backend_recycle++; - TAILQ_INSERT_HEAD(&bs->connlist, vc, list); + VTAILQ_INSERT_HEAD(&bs->connlist, vc, list); VBE_DropRefLocked(vc->backend); } @@ -351,10 +351,10 @@ brr_Cleanup(struct backend *b) free(bs->hostname); freeaddrinfo(bs->addr); while (1) { - vbe = TAILQ_FIRST(&bs->connlist); + vbe = VTAILQ_FIRST(&bs->connlist); if (vbe == NULL) break; - TAILQ_REMOVE(&bs->connlist, vbe, list); + VTAILQ_REMOVE(&bs->connlist, vbe, list); if (vbe->fd >= 0) close(vbe->fd); free(vbe); diff --git a/varnish-cache/bin/varnishd/cache_backend_simple.c b/varnish-cache/bin/varnishd/cache_backend_simple.c index 2eb2ed92..869a741a 100644 --- a/varnish-cache/bin/varnishd/cache_backend_simple.c +++ b/varnish-cache/bin/varnishd/cache_backend_simple.c @@ -55,7 +55,7 @@ struct bes { double dnsttl; double dnstime; unsigned dnsseq; - TAILQ_HEAD(, vbe_conn) connlist; + VTAILQ_HEAD(, vbe_conn) connlist; }; /*-------------------------------------------------------------------- @@ -213,12 +213,12 @@ bes_nextfd(struct sess *sp) CAST_OBJ_NOTNULL(bes, bp->priv, BES_MAGIC); while (1) { LOCK(&bp->mtx); - vc = TAILQ_FIRST(&bes->connlist); + vc = VTAILQ_FIRST(&bes->connlist); if (vc != NULL) { bp->refcount++; assert(vc->backend == bp); assert(vc->fd >= 0); - TAILQ_REMOVE(&bes->connlist, vc, list); + VTAILQ_REMOVE(&bes->connlist, vc, list); } UNLOCK(&bp->mtx); if (vc == NULL) @@ -302,7 +302,7 @@ bes_RecycleFd(struct worker *w, struct vbe_conn *vc) WSL(w, SLT_BackendReuse, vc->fd, "%s", vc->backend->vcl_name); LOCK(&vc->backend->mtx); VSL_stats->backend_recycle++; - TAILQ_INSERT_HEAD(&bes->connlist, vc, list); + VTAILQ_INSERT_HEAD(&bes->connlist, vc, list); VBE_DropRefLocked(vc->backend); } @@ -320,10 +320,10 @@ bes_Cleanup(struct backend *b) free(bes->hostname); freeaddrinfo(bes->addr); while (1) { - vbe = TAILQ_FIRST(&bes->connlist); + vbe = VTAILQ_FIRST(&bes->connlist); if (vbe == NULL) break; - TAILQ_REMOVE(&bes->connlist, vbe, list); + VTAILQ_REMOVE(&bes->connlist, vbe, list); if (vbe->fd >= 0) AZ(close(vbe->fd)); FREE_OBJ(vbe); @@ -366,7 +366,7 @@ VRT_init_simple_backend(struct backend **bp, struct vrt_simple_backend *t) /* * Scan existing backends to see if we can recycle one of them. */ - TAILQ_FOREACH(b, &backendlist, list) { + VTAILQ_FOREACH(b, &backendlist, list) { CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC); if (b->method != &backend_method_simple) continue; diff --git a/varnish-cache/bin/varnishd/cache_ban.c b/varnish-cache/bin/varnishd/cache_ban.c index 7577aaf4..2cd7ffa8 100644 --- a/varnish-cache/bin/varnishd/cache_ban.c +++ b/varnish-cache/bin/varnishd/cache_ban.c @@ -42,14 +42,14 @@ #include "cache.h" struct ban { - TAILQ_ENTRY(ban) list; + VTAILQ_ENTRY(ban) list; unsigned gen; regex_t regexp; char *ban; int hash; }; -static TAILQ_HEAD(,ban) ban_head = TAILQ_HEAD_INITIALIZER(ban_head); +static VTAILQ_HEAD(,ban) ban_head = VTAILQ_HEAD_INITIALIZER(ban_head); static unsigned ban_next; static struct ban *ban_start; @@ -72,7 +72,7 @@ AddBan(const char *regexp, int hash) b->hash = hash; b->gen = ++ban_next; b->ban = strdup(regexp); - TAILQ_INSERT_HEAD(&ban_head, b, list); + VTAILQ_INSERT_HEAD(&ban_head, b, list); ban_start = b; } @@ -92,7 +92,7 @@ BAN_CheckObject(struct object *o, const char *url, const char *hash) b0 = ban_start; for (b = b0; b != NULL && b->gen > o->ban_seq; - b = TAILQ_NEXT(b, list)) { + b = VTAILQ_NEXT(b, list)) { i = regexec(&b->regexp, b->hash ? hash : url, 0, NULL, 0); if (!i) return (1); diff --git a/varnish-cache/bin/varnishd/cache_expire.c b/varnish-cache/bin/varnishd/cache_expire.c index de536256..f2c953e0 100644 --- a/varnish-cache/bin/varnishd/cache_expire.c +++ b/varnish-cache/bin/varnishd/cache_expire.c @@ -51,8 +51,8 @@ static pthread_t exp_thread; static struct binheap *exp_heap; static MTX exp_mtx; static unsigned expearly = 30; -static TAILQ_HEAD(,object) exp_deathrow = TAILQ_HEAD_INITIALIZER(exp_deathrow); -static TAILQ_HEAD(,object) exp_lru = TAILQ_HEAD_INITIALIZER(exp_lru); +static VTAILQ_HEAD(,object) exp_deathrow = VTAILQ_HEAD_INITIALIZER(exp_deathrow); +static VTAILQ_HEAD(,object) exp_lru = VTAILQ_HEAD_INITIALIZER(exp_lru); /* * This is a magic marker for the objects currently on the SIOP [look it up] @@ -71,7 +71,7 @@ EXP_Insert(struct object *o) assert(o->heap_idx == 0); LOCK(&exp_mtx); binheap_insert(exp_heap, o); - TAILQ_INSERT_TAIL(&exp_lru, o, deathrow); + VTAILQ_INSERT_TAIL(&exp_lru, o, deathrow); UNLOCK(&exp_mtx); } @@ -83,8 +83,8 @@ EXP_Touch(struct object *o, double now) if (o->lru_stamp + params->lru_timeout < now) { LOCK(&exp_mtx); /* XXX: should be ..._TRY */ if (o->heap_idx != lru_target && o->heap_idx != 0) { - TAILQ_REMOVE(&exp_lru, o, deathrow); - TAILQ_INSERT_TAIL(&exp_lru, o, deathrow); + VTAILQ_REMOVE(&exp_lru, o, deathrow); + VTAILQ_INSERT_TAIL(&exp_lru, o, deathrow); o->lru_stamp = now; } UNLOCK(&exp_mtx); @@ -119,7 +119,7 @@ exp_hangman(void *arg) t = TIM_real(); while (1) { LOCK(&exp_mtx); - TAILQ_FOREACH(o, &exp_deathrow, deathrow) { + VTAILQ_FOREACH(o, &exp_deathrow, deathrow) { CHECK_OBJ(o, OBJECT_MAGIC); if (o->ttl >= t) { o = NULL; @@ -139,7 +139,7 @@ exp_hangman(void *arg) t = TIM_real(); continue; } - TAILQ_REMOVE(&exp_deathrow, o, deathrow); + VTAILQ_REMOVE(&exp_deathrow, o, deathrow); VSL_stats->n_deathrow--; VSL_stats->n_expired++; UNLOCK(&exp_mtx); @@ -206,8 +206,8 @@ exp_prefetch(void *arg) if (sp->handling == VCL_RET_DISCARD) { LOCK(&exp_mtx); - TAILQ_REMOVE(&exp_lru, o, deathrow); - TAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow); + VTAILQ_REMOVE(&exp_lru, o, deathrow); + VTAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow); VSL_stats->n_deathrow++; UNLOCK(&exp_mtx); continue; @@ -252,7 +252,7 @@ EXP_NukeOne(struct sess *sp) /* Find the first currently unused object on the LRU */ LOCK(&exp_mtx); - TAILQ_FOREACH(o, &exp_lru, deathrow) + VTAILQ_FOREACH(o, &exp_lru, deathrow) if (o->refcnt == 1) break; if (o != NULL) { @@ -260,7 +260,7 @@ EXP_NukeOne(struct sess *sp) * Take it off the binheap while we chew. This effectively * means that we own the EXP refcnt on this object. */ - TAILQ_REMOVE(&exp_lru, o, deathrow); + VTAILQ_REMOVE(&exp_lru, o, deathrow); binheap_delete(exp_heap, o->heap_idx); assert(o->heap_idx == 0); o->heap_idx = lru_target; @@ -297,7 +297,7 @@ EXP_NukeOne(struct sess *sp) o->heap_idx = 0; o->lru_stamp = sp->wrk->used; binheap_insert(exp_heap, o); - TAILQ_INSERT_TAIL(&exp_lru, o, deathrow); + VTAILQ_INSERT_TAIL(&exp_lru, o, deathrow); UNLOCK(&exp_mtx); return (0); } diff --git a/varnish-cache/bin/varnishd/cache_fetch.c b/varnish-cache/bin/varnishd/cache_fetch.c index 47a94373..ec54afc6 100644 --- a/varnish-cache/bin/varnishd/cache_fetch.c +++ b/varnish-cache/bin/varnishd/cache_fetch.c @@ -61,7 +61,7 @@ fetch_straight(struct sess *sp, int fd, struct http *hp, const char *b) return (0); st = STV_alloc(sp, cl); - TAILQ_INSERT_TAIL(&sp->obj->store, st, list); + VTAILQ_INSERT_TAIL(&sp->obj->store, st, list); st->len = cl; sp->obj->len = cl; p = st->ptr; @@ -150,7 +150,7 @@ fetch_chunked(struct sess *sp, int fd, struct http *hp) if (u < params->fetch_chunksize * 1024) v = params->fetch_chunksize * 1024; st = STV_alloc(sp, v); - TAILQ_INSERT_TAIL(&sp->obj->store, st, list); + VTAILQ_INSERT_TAIL(&sp->obj->store, st, list); } v = st->space - st->len; if (v > u) @@ -195,7 +195,7 @@ fetch_chunked(struct sess *sp, int fd, struct http *hp) } if (st != NULL && st->len == 0) { - TAILQ_REMOVE(&sp->obj->store, st, list); + VTAILQ_REMOVE(&sp->obj->store, st, list); STV_free(st); } else if (st != NULL) STV_trim(st, st->len); @@ -225,7 +225,7 @@ fetch_eof(struct sess *sp, int fd, struct http *hp) while (1) { if (v == 0) { st = STV_alloc(sp, params->fetch_chunksize * 1024); - TAILQ_INSERT_TAIL(&sp->obj->store, st, list); + VTAILQ_INSERT_TAIL(&sp->obj->store, st, list); p = st->ptr + st->len; v = st->space - st->len; } @@ -243,7 +243,7 @@ fetch_eof(struct sess *sp, int fd, struct http *hp) } if (st->len == 0) { - TAILQ_REMOVE(&sp->obj->store, st, list); + VTAILQ_REMOVE(&sp->obj->store, st, list); STV_free(st); } else STV_trim(st, st->len); @@ -371,9 +371,9 @@ Fetch(struct sess *sp) CHECK_OBJ_NOTNULL(sp->backend, BACKEND_MAGIC); if (cls < 0) { - while (!TAILQ_EMPTY(&sp->obj->store)) { - st = TAILQ_FIRST(&sp->obj->store); - TAILQ_REMOVE(&sp->obj->store, st, list); + while (!VTAILQ_EMPTY(&sp->obj->store)) { + st = VTAILQ_FIRST(&sp->obj->store); + VTAILQ_REMOVE(&sp->obj->store, st, list); STV_free(st); } VBE_ClosedFd(sp->wrk, vc); @@ -386,7 +386,7 @@ Fetch(struct sess *sp) unsigned uu; uu = 0; - TAILQ_FOREACH(st, &sp->obj->store, list) + VTAILQ_FOREACH(st, &sp->obj->store, list) uu += st->len; assert(uu == sp->obj->len); } diff --git a/varnish-cache/bin/varnishd/cache_hash.c b/varnish-cache/bin/varnishd/cache_hash.c index eccb479f..68cff7a1 100644 --- a/varnish-cache/bin/varnishd/cache_hash.c +++ b/varnish-cache/bin/varnishd/cache_hash.c @@ -78,7 +78,7 @@ HSH_Prealloc(const struct sess *sp) w->nobjhead = calloc(sizeof *w->nobjhead, 1); XXXAN(w->nobjhead); w->nobjhead->magic = OBJHEAD_MAGIC; - TAILQ_INIT(&w->nobjhead->objects); + VTAILQ_INIT(&w->nobjhead->objects); MTX_INIT(&w->nobjhead->mtx); VSL_stats->n_objecthead++; } else @@ -90,8 +90,8 @@ HSH_Prealloc(const struct sess *sp) w->nobj->http.magic = HTTP_MAGIC; w->nobj->busy = 1; w->nobj->refcnt = 1; - TAILQ_INIT(&w->nobj->store); - TAILQ_INIT(&w->nobj->waitinglist); + VTAILQ_INIT(&w->nobj->store); + VTAILQ_INIT(&w->nobj->waitinglist); VSL_stats->n_object++; } else CHECK_OBJ_NOTNULL(w->nobj, OBJECT_MAGIC); @@ -102,9 +102,9 @@ HSH_Freestore(struct object *o) { struct storage *st, *stn; - TAILQ_FOREACH_SAFE(st, &o->store, list, stn) { + VTAILQ_FOREACH_SAFE(st, &o->store, list, stn) { CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); - TAILQ_REMOVE(&o->store, st, list); + VTAILQ_REMOVE(&o->store, st, list); STV_free(st); } } @@ -186,10 +186,10 @@ HSH_Lookup(struct sess *sp) if (oh == w->nobjhead) w->nobjhead = NULL; LOCK(&oh->mtx); - TAILQ_FOREACH(o, &oh->objects, list) { + VTAILQ_FOREACH(o, &oh->objects, list) { o->refcnt++; if (o->busy) { - TAILQ_INSERT_TAIL(&o->waitinglist, sp, list); + VTAILQ_INSERT_TAIL(&o->waitinglist, sp, list); sp->obj = o; UNLOCK(&oh->mtx); return (NULL); @@ -221,7 +221,7 @@ HSH_Lookup(struct sess *sp) o = w->nobj; w->nobj = NULL; o->objhead = oh; - TAILQ_INSERT_TAIL(&oh->objects, o, list); + VTAILQ_INSERT_TAIL(&oh->objects, o, list); /* NB: do not deref objhead the new object inherits our reference */ UNLOCK(&oh->mtx); BAN_NewObj(o); @@ -253,10 +253,10 @@ HSH_Unbusy(struct object *o) if (oh != NULL) UNLOCK(&oh->mtx); while (1) { - sp = TAILQ_FIRST(&o->waitinglist); + sp = VTAILQ_FIRST(&o->waitinglist); if (sp == NULL) break; - TAILQ_REMOVE(&o->waitinglist, sp, list); + VTAILQ_REMOVE(&o->waitinglist, sp, list); WRK_QueueSession(sp); } } @@ -296,7 +296,7 @@ HSH_Deref(struct object *o) r = --o->refcnt; if (oh != NULL) { if (!r) - TAILQ_REMOVE(&oh->objects, o, list); + VTAILQ_REMOVE(&oh->objects, o, list); UNLOCK(&oh->mtx); } @@ -319,7 +319,7 @@ HSH_Deref(struct object *o) /* Drop our ref on the objhead */ if (hash->deref(oh)) return; - assert(TAILQ_EMPTY(&oh->objects)); + assert(VTAILQ_EMPTY(&oh->objects)); MTX_DESTROY(&oh->mtx); VSL_stats->n_objecthead--; FREE_OBJ(oh); diff --git a/varnish-cache/bin/varnishd/cache_pool.c b/varnish-cache/bin/varnishd/cache_pool.c index d66ed3c7..2a5de71a 100644 --- a/varnish-cache/bin/varnishd/cache_pool.c +++ b/varnish-cache/bin/varnishd/cache_pool.c @@ -57,7 +57,7 @@ #include "cli_priv.h" #include "cache.h" -TAILQ_HEAD(workerhead, worker); +VTAILQ_HEAD(workerhead, worker); /* Number of work requests queued in excess of worker threads available */ @@ -68,7 +68,7 @@ struct wq { }; static MTX tmtx; -static TAILQ_HEAD(, workreq) overflow = TAILQ_HEAD_INITIALIZER(overflow); +static VTAILQ_HEAD(, workreq) overflow = VTAILQ_HEAD_INITIALIZER(overflow); static struct wq **wq; static unsigned nwq; @@ -221,21 +221,21 @@ wrk_thread(void *priv) CHECK_OBJ_NOTNULL(w, WORKER_MAGIC); assert(!isnan(w->used)); - w->wrq = TAILQ_FIRST(&overflow); + w->wrq = VTAILQ_FIRST(&overflow); if (w->wrq != NULL) { LOCK(&tmtx); /* Process overflow requests, if any */ - w->wrq = TAILQ_FIRST(&overflow); + w->wrq = VTAILQ_FIRST(&overflow); if (w->wrq != NULL) { VSL_stats->n_wrk_queue--; - TAILQ_REMOVE(&overflow, w->wrq, list); + VTAILQ_REMOVE(&overflow, w->wrq, list); } UNLOCK(&tmtx); } if (w->wrq == NULL) { LOCK(&qp->mtx); - TAILQ_INSERT_HEAD(&qp->idle, w, list); + VTAILQ_INSERT_HEAD(&qp->idle, w, list); UNLOCK(&qp->mtx); assert(1 == read(w->pipe[0], &c, 1)); } @@ -286,9 +286,9 @@ WRK_QueueSession(struct sess *sp) LOCK(&qp->mtx); /* If there are idle threads, we tickle the first one into action */ - w = TAILQ_FIRST(&qp->idle); + w = VTAILQ_FIRST(&qp->idle); if (w != NULL) { - TAILQ_REMOVE(&qp->idle, w, list); + VTAILQ_REMOVE(&qp->idle, w, list); UNLOCK(&qp->mtx); w->wrq = &sp->workreq; assert(1 == write(w->pipe[1], w, 1)); @@ -321,7 +321,7 @@ WRK_QueueSession(struct sess *sp) * XXX: Not sure how though. Simply closing may be the better * XXX: compromise. */ - TAILQ_INSERT_TAIL(&overflow, &sp->workreq, list); + VTAILQ_INSERT_TAIL(&overflow, &sp->workreq, list); VSL_stats->n_wrk_overflow++; VSL_stats->n_wrk_queue++; /* Can we create more threads ? */ @@ -375,7 +375,7 @@ wrk_addpools(unsigned t) wq[u] = calloc(sizeof *wq[u], 1); XXXAN(wq[u]); MTX_INIT(&wq[u]->mtx); - TAILQ_INIT(&wq[u]->idle); + VTAILQ_INIT(&wq[u]->idle); } free(owq); nwq = t; @@ -401,11 +401,11 @@ wrk_reaperthread(void *priv) for (u = 0; u < nwq; u++) { qp = wq[u]; LOCK(&qp->mtx); - w = TAILQ_LAST(&qp->idle, workerhead); + w = VTAILQ_LAST(&qp->idle, workerhead); if (w != NULL && (w->used + params->wthread_timeout < now || VSL_stats->n_wrk > params->wthread_max)) - TAILQ_REMOVE(&qp->idle, w, list); + VTAILQ_REMOVE(&qp->idle, w, list); else w = NULL; UNLOCK(&qp->mtx); diff --git a/varnish-cache/bin/varnishd/cache_response.c b/varnish-cache/bin/varnishd/cache_response.c index 4b277a5a..8dc4cea5 100644 --- a/varnish-cache/bin/varnishd/cache_response.c +++ b/varnish-cache/bin/varnishd/cache_response.c @@ -158,7 +158,7 @@ RES_WriteObj(struct sess *sp) CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); if (sp->wantbody) { - TAILQ_FOREACH(st, &sp->obj->store, list) { + VTAILQ_FOREACH(st, &sp->obj->store, list) { CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); u += st->len; diff --git a/varnish-cache/bin/varnishd/cache_session.c b/varnish-cache/bin/varnishd/cache_session.c index 1ce8b736..62331e72 100644 --- a/varnish-cache/bin/varnishd/cache_session.c +++ b/varnish-cache/bin/varnishd/cache_session.c @@ -67,13 +67,13 @@ struct sessmem { struct sess sess; struct http http; unsigned workspace; - TAILQ_ENTRY(sessmem) list; + VTAILQ_ENTRY(sessmem) list; struct sockaddr_storage sockaddr[2]; }; -static TAILQ_HEAD(,sessmem) ses_free_mem[2] = { - TAILQ_HEAD_INITIALIZER(ses_free_mem[0]), - TAILQ_HEAD_INITIALIZER(ses_free_mem[1]), +static VTAILQ_HEAD(,sessmem) ses_free_mem[2] = { + VTAILQ_HEAD_INITIALIZER(ses_free_mem[0]), + VTAILQ_HEAD_INITIALIZER(ses_free_mem[1]), }; static unsigned ses_qp; @@ -86,7 +86,7 @@ struct srcaddr { #define SRCADDR_MAGIC 0x375111db unsigned hash; - TAILQ_ENTRY(srcaddr) list; + VTAILQ_ENTRY(srcaddr) list; struct srcaddrhead *sah; char addr[TCP_ADDRBUFSIZE]; @@ -101,7 +101,7 @@ struct srcaddr { static struct srcaddrhead { unsigned magic; #define SRCADDRHEAD_MAGIC 0x38231a8b - TAILQ_HEAD(,srcaddr) head; + VTAILQ_HEAD(,srcaddr) head; MTX mtx; } *srchash; @@ -141,17 +141,17 @@ SES_RefSrcAddr(struct sess *sp) LOCK(&ch->mtx); c3 = NULL; - TAILQ_FOREACH_SAFE(c, &ch->head, list, c2) { + VTAILQ_FOREACH_SAFE(c, &ch->head, list, c2) { if (c->hash == u && !strcmp(c->addr, sp->addr)) { if (c->nref == 0) VSL_stats->n_srcaddr_act++; c->nref++; c->ttl = now + params->srcaddr_ttl; sp->srcaddr = c; - TAILQ_REMOVE(&ch->head, c, list); - TAILQ_INSERT_TAIL(&ch->head, c, list); + VTAILQ_REMOVE(&ch->head, c, list); + VTAILQ_INSERT_TAIL(&ch->head, c, list); if (c3 != NULL) { - TAILQ_REMOVE(&ch->head, c3, list); + VTAILQ_REMOVE(&ch->head, c3, list); VSL_stats->n_srcaddr--; } UNLOCK(&ch->mtx); @@ -169,7 +169,7 @@ SES_RefSrcAddr(struct sess *sp) sp->wrk->srcaddr = NULL; VSL_stats->n_srcaddr++; } else - TAILQ_REMOVE(&ch->head, c3, list); + VTAILQ_REMOVE(&ch->head, c3, list); AN(c3); memset(c3, 0, sizeof *c3); c3->magic = SRCADDR_MAGIC; @@ -180,7 +180,7 @@ SES_RefSrcAddr(struct sess *sp) c3->nref = 1; c3->sah = ch; VSL_stats->n_srcaddr_act++; - TAILQ_INSERT_TAIL(&ch->head, c3, list); + VTAILQ_INSERT_TAIL(&ch->head, c3, list); sp->srcaddr = c3; UNLOCK(&ch->mtx); } @@ -267,7 +267,7 @@ SES_New(struct sockaddr *addr, unsigned len) * thread ever gets here to empty it. */ assert(ses_qp <= 1); - sm = TAILQ_FIRST(&ses_free_mem[ses_qp]); + sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]); if (sm == NULL) { /* * If that queue is empty, flip queues holding the lock @@ -276,10 +276,10 @@ SES_New(struct sockaddr *addr, unsigned len) LOCK(&ses_mem_mtx); ses_qp = 1 - ses_qp; UNLOCK(&ses_mem_mtx); - sm = TAILQ_FIRST(&ses_free_mem[ses_qp]); + sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]); } if (sm != NULL) { - TAILQ_REMOVE(&ses_free_mem[ses_qp], sm, list); + VTAILQ_REMOVE(&ses_free_mem[ses_qp], sm, list); } else { /* * If that fails, alloc new one. @@ -349,7 +349,7 @@ SES_Delete(struct sess *sp) free(sm); } else { LOCK(&ses_mem_mtx); - TAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list); + VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list); UNLOCK(&ses_mem_mtx); } } @@ -366,7 +366,7 @@ SES_Init() XXXAN(srchash); for (i = 0; i < nsrchash; i++) { srchash[i].magic = SRCADDRHEAD_MAGIC; - TAILQ_INIT(&srchash[i].head); + VTAILQ_INIT(&srchash[i].head); MTX_INIT(&srchash[i].mtx); } MTX_INIT(&stat_mtx); diff --git a/varnish-cache/bin/varnishd/cache_synthetic.c b/varnish-cache/bin/varnishd/cache_synthetic.c index 63e5d068..bf267ed7 100644 --- a/varnish-cache/bin/varnishd/cache_synthetic.c +++ b/varnish-cache/bin/varnishd/cache_synthetic.c @@ -88,7 +88,7 @@ SYN_ErrorPage(struct sess *sp, int status, const char *reason, int ttl) /* allocate space for body */ /* XXX what if the object already has a body? */ st = STV_alloc(sp, 1024); - TAILQ_INSERT_TAIL(&sp->obj->store, st, list); + VTAILQ_INSERT_TAIL(&sp->obj->store, st, list); /* generate body */ AN(vsb_new(&vsb, (char *)st->ptr, st->space, VSB_FIXEDLEN)); diff --git a/varnish-cache/bin/varnishd/cache_vcl.c b/varnish-cache/bin/varnishd/cache_vcl.c index b5c2814d..052224b1 100644 --- a/varnish-cache/bin/varnishd/cache_vcl.c +++ b/varnish-cache/bin/varnishd/cache_vcl.c @@ -46,7 +46,7 @@ #include "cache.h" struct vcls { - TAILQ_ENTRY(vcls) list; + VTAILQ_ENTRY(vcls) list; const char *name; void *dlh; struct VCL_conf *conf; @@ -57,8 +57,8 @@ struct vcls { * XXX: Presently all modifications to this list happen from the * CLI event-engine, so no locking is necessary */ -static TAILQ_HEAD(, vcls) vcl_head = - TAILQ_HEAD_INITIALIZER(vcl_head); +static VTAILQ_HEAD(, vcls) vcl_head = + VTAILQ_HEAD_INITIALIZER(vcl_head); static struct vcls *vcl_active; /* protected by vcl_mtx */ @@ -106,7 +106,7 @@ VCL_Rel(struct VCL_conf **vcc) /* XXX: purge backends */ } if (vc->busy == 0 && vcl->discard) { - TAILQ_REMOVE(&vcl_head, vcl, list); + VTAILQ_REMOVE(&vcl_head, vcl, list); } else { vcl = NULL; } @@ -123,7 +123,7 @@ vcl_find(const char *name) { struct vcls *vcl; - TAILQ_FOREACH(vcl, &vcl_head, list) + VTAILQ_FOREACH(vcl, &vcl_head, list) if (!strcmp(vcl->name, name)) return (vcl); return (NULL); @@ -179,7 +179,7 @@ VCL_Load(const char *fn, const char *name, struct cli *cli) vcl->conf->priv = vcl; vcl->name = strdup(name); XXXAN(vcl->name); - TAILQ_INSERT_TAIL(&vcl_head, vcl, list); + VTAILQ_INSERT_TAIL(&vcl_head, vcl, list); LOCK(&vcl_mtx); if (vcl_active == NULL) vcl_active = vcl; @@ -201,7 +201,7 @@ cli_func_config_list(struct cli *cli, char **av, void *priv) (void)av; (void)priv; - TAILQ_FOREACH(vcl, &vcl_head, list) { + VTAILQ_FOREACH(vcl, &vcl_head, list) { cli_out(cli, "%s %6u %s\n", vcl == vcl_active ? "* " : " ", vcl->conf->busy, @@ -247,7 +247,7 @@ cli_func_config_discard(struct cli *cli, char **av, void *priv) } vcl->discard = 1; if (vcl->conf->busy == 0) - TAILQ_REMOVE(&vcl_head, vcl, list); + VTAILQ_REMOVE(&vcl_head, vcl, list); else vcl = NULL; UNLOCK(&vcl_mtx); diff --git a/varnish-cache/bin/varnishd/hash_classic.c b/varnish-cache/bin/varnishd/hash_classic.c index 5ad212ce..449c81d2 100644 --- a/varnish-cache/bin/varnishd/hash_classic.c +++ b/varnish-cache/bin/varnishd/hash_classic.c @@ -44,7 +44,7 @@ struct hcl_entry { unsigned magic; #define HCL_ENTRY_MAGIC 0x0ba707bf - TAILQ_ENTRY(hcl_entry) list; + VTAILQ_ENTRY(hcl_entry) list; struct hcl_hd *head; struct objhead *oh; unsigned refcnt; @@ -55,7 +55,7 @@ struct hcl_entry { struct hcl_hd { unsigned magic; #define HCL_HEAD_MAGIC 0x0f327016 - TAILQ_HEAD(, hcl_entry) head; + VTAILQ_HEAD(, hcl_entry) head; MTX mtx; }; @@ -103,7 +103,7 @@ hcl_start(void) XXXAN(hcl_head); for (u = 0; u < hcl_nhash; u++) { - TAILQ_INIT(&hcl_head[u].head); + VTAILQ_INIT(&hcl_head[u].head); MTX_INIT(&hcl_head[u].mtx); hcl_head[u].magic = HCL_HEAD_MAGIC; } @@ -143,7 +143,7 @@ hcl_lookup(struct sess *sp, struct objhead *noh) for (r = 0; r < 2; r++ ) { LOCK(&hp->mtx); - TAILQ_FOREACH(he, &hp->head, list) { + VTAILQ_FOREACH(he, &hp->head, list) { CHECK_OBJ_NOTNULL(he, HCL_ENTRY_MAGIC); if (sp->lhashptr < he->oh->hashlen) continue; @@ -179,9 +179,9 @@ hcl_lookup(struct sess *sp, struct objhead *noh) } if (he2 != NULL) { if (he != NULL) - TAILQ_INSERT_BEFORE(he, he2, list); + VTAILQ_INSERT_BEFORE(he, he2, list); else - TAILQ_INSERT_TAIL(&hp->head, he2, list); + VTAILQ_INSERT_TAIL(&hp->head, he2, list); he2->refcnt++; noh = he2->oh; UNLOCK(&hp->mtx); @@ -227,7 +227,7 @@ hcl_deref(struct objhead *oh) assert(hp == &hcl_head[he->hash]); LOCK(&hp->mtx); if (--he->refcnt == 0) - TAILQ_REMOVE(&hp->head, he, list); + VTAILQ_REMOVE(&hp->head, he, list); else he = NULL; UNLOCK(&hp->mtx); diff --git a/varnish-cache/bin/varnishd/hash_simple_list.c b/varnish-cache/bin/varnishd/hash_simple_list.c index ba265274..85e91d4d 100644 --- a/varnish-cache/bin/varnishd/hash_simple_list.c +++ b/varnish-cache/bin/varnishd/hash_simple_list.c @@ -43,12 +43,12 @@ /*--------------------------------------------------------------------*/ struct hsl_entry { - TAILQ_ENTRY(hsl_entry) list; + VTAILQ_ENTRY(hsl_entry) list; struct objhead *obj; unsigned refcnt; }; -static TAILQ_HEAD(, hsl_entry) hsl_head = TAILQ_HEAD_INITIALIZER(hsl_head); +static VTAILQ_HEAD(, hsl_entry) hsl_head = VTAILQ_HEAD_INITIALIZER(hsl_head); static MTX hsl_mutex; /*-------------------------------------------------------------------- @@ -77,7 +77,7 @@ hsl_lookup(struct sess *sp, struct objhead *nobj) int i; LOCK(&hsl_mutex); - TAILQ_FOREACH(he, &hsl_head, list) { + VTAILQ_FOREACH(he, &hsl_head, list) { i = HSH_Compare(sp, he->obj); if (i < 0) continue; @@ -104,9 +104,9 @@ hsl_lookup(struct sess *sp, struct objhead *nobj) HSH_Copy(sp, nobj); if (he != NULL) - TAILQ_INSERT_BEFORE(he, he2, list); + VTAILQ_INSERT_BEFORE(he, he2, list); else - TAILQ_INSERT_TAIL(&hsl_head, he2, list); + VTAILQ_INSERT_TAIL(&hsl_head, he2, list); UNLOCK(&hsl_mutex); return (nobj); } @@ -125,7 +125,7 @@ hsl_deref(struct objhead *obj) he = obj->hashpriv; LOCK(&hsl_mutex); if (--he->refcnt == 0) { - TAILQ_REMOVE(&hsl_head, he, list); + VTAILQ_REMOVE(&hsl_head, he, list); free(he); ret = 0; } else diff --git a/varnish-cache/bin/varnishd/heritage.h b/varnish-cache/bin/varnishd/heritage.h index 6c783007..89f24853 100644 --- a/varnish-cache/bin/varnishd/heritage.h +++ b/varnish-cache/bin/varnishd/heritage.h @@ -33,19 +33,15 @@ #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" struct listen_sock { - TAILQ_ENTRY(listen_sock) list; + VTAILQ_ENTRY(listen_sock) list; int sock; struct vss_addr *addr; }; -TAILQ_HEAD(listen_sock_head, listen_sock); +VTAILQ_HEAD(listen_sock_head, listen_sock); struct heritage { diff --git a/varnish-cache/bin/varnishd/mgt_child.c b/varnish-cache/bin/varnishd/mgt_child.c index 7b806ac6..2072ec73 100644 --- a/varnish-cache/bin/varnishd/mgt_child.c +++ b/varnish-cache/bin/varnishd/mgt_child.c @@ -127,14 +127,14 @@ open_sockets(void) struct listen_sock *ls, *ls2; int good = 0; - TAILQ_FOREACH_SAFE(ls, &heritage.socks, list, ls2) { + VTAILQ_FOREACH_SAFE(ls, &heritage.socks, list, ls2) { if (ls->sock >= 0) { good++; continue; } ls->sock = VSS_listen(ls->addr, params->listen_depth); if (ls->sock < 0) { - TAILQ_REMOVE(&heritage.socks, ls, list); + VTAILQ_REMOVE(&heritage.socks, ls, list); free(ls); continue; } @@ -153,7 +153,7 @@ close_sockets(void) { struct listen_sock *ls; - TAILQ_FOREACH(ls, &heritage.socks, list) { + VTAILQ_FOREACH(ls, &heritage.socks, list) { if (ls->sock < 0) continue; close(ls->sock); diff --git a/varnish-cache/bin/varnishd/mgt_event.c b/varnish-cache/bin/varnishd/mgt_event.c index 40fe6a1b..a2909904 100644 --- a/varnish-cache/bin/varnishd/mgt_event.c +++ b/varnish-cache/bin/varnishd/mgt_event.c @@ -60,7 +60,7 @@ static unsigned ev_nsig; struct evbase { unsigned magic; #define EVBASE_MAGIC 0x0cfd976f - TAILQ_HEAD(,ev) events; + VTAILQ_HEAD(,ev) events; struct pollfd *pfd; unsigned npfd; unsigned lpfd; @@ -173,7 +173,7 @@ ev_new_base(void) return (NULL); } evb->magic = EVBASE_MAGIC; - TAILQ_INIT(&evb->events); + VTAILQ_INIT(&evb->events); evb->binheap = binheap_new(evb, ev_bh_cmp, ev_bh_update); return (evb); } @@ -259,9 +259,9 @@ ev_add(struct evbase *evb, struct ev *e) e->__evb = evb; e->__privflags = 0; if (e->fd < 0) - TAILQ_INSERT_TAIL(&evb->events, e, __list); + VTAILQ_INSERT_TAIL(&evb->events, e, __list); else - TAILQ_INSERT_HEAD(&evb->events, e, __list); + VTAILQ_INSERT_HEAD(&evb->events, e, __list); if (e->sig > 0) { assert(es != NULL); @@ -307,7 +307,7 @@ ev_del(struct evbase *evb, struct ev *e) es->happened = 0; } - TAILQ_REMOVE(&evb->events, e, __list); + VTAILQ_REMOVE(&evb->events, e, __list); e->magic = 0; e->__evb = NULL; @@ -339,11 +339,11 @@ ev_compact_pfd(struct evbase *evb) struct ev *ep; p = evb->pfd; - ep = TAILQ_FIRST(&evb->events); + ep = VTAILQ_FIRST(&evb->events); for (u = 0; u < evb->lpfd; u++, p++) { if (p->fd >= 0) continue; - for(; ep != NULL; ep = TAILQ_NEXT(ep, __list)) { + for(; ep != NULL; ep = VTAILQ_NEXT(ep, __list)) { if (ep->fd >= 0 && ep->__poll_idx > u) break; } @@ -441,7 +441,7 @@ ev_schedule_one(struct evbase *evb) return (ev_sched_timeout(evb, e, t)); } evb->disturbed = 0; - TAILQ_FOREACH_SAFE(e, &evb->events, __list, e2) { + VTAILQ_FOREACH_SAFE(e, &evb->events, __list, e2) { if (i == 0) break; if (e->fd < 0) @@ -454,9 +454,9 @@ ev_schedule_one(struct evbase *evb) j = e->callback(e, pfd->revents); i--; if (evb->disturbed) { - TAILQ_FOREACH(e3, &evb->events, __list) { + VTAILQ_FOREACH(e3, &evb->events, __list) { if (e3 == e) { - e3 = TAILQ_NEXT(e, __list); + e3 = VTAILQ_NEXT(e, __list); break; } else if (e3 == e2) break; diff --git a/varnish-cache/bin/varnishd/mgt_event.h b/varnish-cache/bin/varnishd/mgt_event.h index c224cdd9..d0846659 100644 --- a/varnish-cache/bin/varnishd/mgt_event.h +++ b/varnish-cache/bin/varnishd/mgt_event.h @@ -31,11 +31,7 @@ #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" struct ev; struct evbase; @@ -63,7 +59,7 @@ struct ev { /* priv */ double __when; - TAILQ_ENTRY(ev) __list; + VTAILQ_ENTRY(ev) __list; unsigned __binheap_idx; unsigned __privflags; struct evbase *__evb; diff --git a/varnish-cache/bin/varnishd/mgt_param.c b/varnish-cache/bin/varnishd/mgt_param.c index c0cfeb7c..911380b7 100644 --- a/varnish-cache/bin/varnishd/mgt_param.c +++ b/varnish-cache/bin/varnishd/mgt_param.c @@ -383,8 +383,8 @@ clean_listen_sock_head(struct listen_sock_head *lsh) { struct listen_sock *ls, *ls2; - TAILQ_FOREACH_SAFE(ls, lsh, list, ls2) { - TAILQ_REMOVE(lsh, ls, list); + VTAILQ_FOREACH_SAFE(ls, lsh, list, ls2) { + VTAILQ_REMOVE(lsh, ls, list); free(ls->addr); free(ls); } @@ -421,7 +421,7 @@ tweak_listen_address(struct cli *cli, struct parspec *par, const char *arg) FreeArgv(av); return; } - TAILQ_INIT(&lsh); + VTAILQ_INIT(&lsh); for (i = 1; av[i] != NULL; i++) { struct vss_addr **ta; char *host, *port; @@ -445,7 +445,7 @@ tweak_listen_address(struct cli *cli, struct parspec *par, const char *arg) AN(ls); ls->sock = -1; ls->addr = ta[j]; - TAILQ_INSERT_TAIL(&lsh, ls, list); + VTAILQ_INSERT_TAIL(&lsh, ls, list); } free(ta); } @@ -460,10 +460,10 @@ tweak_listen_address(struct cli *cli, struct parspec *par, const char *arg) clean_listen_sock_head(&heritage.socks); heritage.nsocks = 0; - while (!TAILQ_EMPTY(&lsh)) { - ls = TAILQ_FIRST(&lsh); - TAILQ_REMOVE(&lsh, ls, list); - TAILQ_INSERT_TAIL(&heritage.socks, ls, list); + while (!VTAILQ_EMPTY(&lsh)) { + ls = VTAILQ_FIRST(&lsh); + VTAILQ_REMOVE(&lsh, ls, list); + VTAILQ_INSERT_TAIL(&heritage.socks, ls, list); heritage.nsocks++; } } diff --git a/varnish-cache/bin/varnishd/mgt_vcc.c b/varnish-cache/bin/varnishd/mgt_vcc.c index 36a29ef0..c8d27f37 100644 --- a/varnish-cache/bin/varnishd/mgt_vcc.c +++ b/varnish-cache/bin/varnishd/mgt_vcc.c @@ -45,11 +45,7 @@ #endif #include "vsb.h" -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "libvcl.h" #include "cli.h" @@ -63,13 +59,13 @@ #include "vss.h" struct vclprog { - TAILQ_ENTRY(vclprog) list; + VTAILQ_ENTRY(vclprog) list; char *name; char *fname; int active; }; -static TAILQ_HEAD(, vclprog) vclhead = TAILQ_HEAD_INITIALIZER(vclhead); +static VTAILQ_HEAD(, vclprog) vclhead = VTAILQ_HEAD_INITIALIZER(vclhead); char *mgt_cc_cmd; @@ -340,14 +336,14 @@ mgt_vcc_add(const char *name, char *file) vp->name = strdup(name); XXXAN(vp->name); vp->fname = file; - TAILQ_INSERT_TAIL(&vclhead, vp, list); + VTAILQ_INSERT_TAIL(&vclhead, vp, list); return (vp); } static void mgt_vcc_del(struct vclprog *vp) { - TAILQ_REMOVE(&vclhead, vp, list); + VTAILQ_REMOVE(&vclhead, vp, list); printf("unlink %s\n", vp->fname); XXXAZ(unlink(vp->fname)); free(vp->fname); @@ -360,7 +356,7 @@ mgt_vcc_delbyname(const char *name) { struct vclprog *vp; - TAILQ_FOREACH(vp, &vclhead, list) { + VTAILQ_FOREACH(vp, &vclhead, list) { if (!strcmp(name, vp->name)) { mgt_vcc_del(vp); return (0); @@ -430,7 +426,7 @@ mgt_push_vcls_and_start(unsigned *status, char **p) { struct vclprog *vp; - TAILQ_FOREACH(vp, &vclhead, list) { + VTAILQ_FOREACH(vp, &vclhead, list) { if (mgt_cli_askchild(status, p, "vcl.load %s %s\n", vp->name, vp->fname)) return (1); @@ -460,7 +456,7 @@ mgt_vcc_atexit(void) if (getpid() != mgt_pid) return; while (1) { - vp = TAILQ_FIRST(&vclhead); + vp = VTAILQ_FIRST(&vclhead); if (vp == NULL) break; mgt_vcc_del(vp); @@ -543,7 +539,7 @@ mcf_find_vcl(struct cli *cli, const char *name) { struct vclprog *vp; - TAILQ_FOREACH(vp, &vclhead, list) + VTAILQ_FOREACH(vp, &vclhead, list) if (!strcmp(vp->name, name)) break; if (vp == NULL) { @@ -570,7 +566,7 @@ mcf_config_use(struct cli *cli, char **av, void *priv) free(p); } else { vp->active = 2; - TAILQ_FOREACH(vp, &vclhead, list) { + VTAILQ_FOREACH(vp, &vclhead, list) { if (vp->active == 1) vp->active = 0; else if (vp->active == 2) @@ -620,7 +616,7 @@ mcf_config_list(struct cli *cli, char **av, void *priv) cli_out(cli, "%s", p); free(p); } else { - TAILQ_FOREACH(vp, &vclhead, list) { + VTAILQ_FOREACH(vp, &vclhead, list) { cli_out(cli, "%s %6s %s\n", vp->active ? "*" : " ", "N/A", vp->name); diff --git a/varnish-cache/bin/varnishd/stevedore.h b/varnish-cache/bin/varnishd/stevedore.h index 91d45ba4..321796f9 100644 --- a/varnish-cache/bin/varnishd/stevedore.h +++ b/varnish-cache/bin/varnishd/stevedore.h @@ -29,11 +29,7 @@ * $Id$ */ -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" struct stevedore; struct sess; diff --git a/varnish-cache/bin/varnishd/storage_file.c b/varnish-cache/bin/varnishd/storage_file.c index 9ade7be0..0aed4ba7 100644 --- a/varnish-cache/bin/varnishd/storage_file.c +++ b/varnish-cache/bin/varnishd/storage_file.c @@ -83,7 +83,7 @@ /*--------------------------------------------------------------------*/ -TAILQ_HEAD(smfhead, smf); +VTAILQ_HEAD(smfhead, smf); struct smf { unsigned magic; @@ -97,8 +97,8 @@ struct smf { off_t offset; unsigned char *ptr; - TAILQ_ENTRY(smf) order; - TAILQ_ENTRY(smf) status; + VTAILQ_ENTRY(smf) order; + VTAILQ_ENTRY(smf) status; struct smfhead *flist; }; @@ -254,10 +254,10 @@ smf_init(struct stevedore *parent, const char *spec) sc = calloc(sizeof *sc, 1); XXXAN(sc); - TAILQ_INIT(&sc->order); + VTAILQ_INIT(&sc->order); for (u = 0; u < NBUCKET; u++) - TAILQ_INIT(&sc->free[u]); - TAILQ_INIT(&sc->used); + VTAILQ_INIT(&sc->free[u]); + VTAILQ_INIT(&sc->used); sc->pagesize = getpagesize(); parent->priv = sc; @@ -359,7 +359,7 @@ insfree(struct smf_sc *sc, struct smf *sp) } sp->flist = &sc->free[b]; ns = b * sc->pagesize; - TAILQ_FOREACH(sp2, sp->flist, status) { + VTAILQ_FOREACH(sp2, sp->flist, status) { assert(sp2->size >= ns); assert(sp2->alloc == 0); assert(sp2->flist == sp->flist); @@ -367,9 +367,9 @@ insfree(struct smf_sc *sc, struct smf *sp) break; } if (sp2 == NULL) - TAILQ_INSERT_TAIL(sp->flist, sp, status); + VTAILQ_INSERT_TAIL(sp->flist, sp, status); else - TAILQ_INSERT_BEFORE(sp2, sp, status); + VTAILQ_INSERT_BEFORE(sp2, sp, status); } static void @@ -387,7 +387,7 @@ remfree(struct smf_sc *sc, struct smf *sp) VSL_stats->n_smf_frag--; } assert(sp->flist == &sc->free[b]); - TAILQ_REMOVE(sp->flist, sp, status); + VTAILQ_REMOVE(sp->flist, sp, status); sp->flist = NULL; } @@ -406,12 +406,12 @@ alloc_smf(struct smf_sc *sc, size_t bytes) if (b >= NBUCKET) b = NBUCKET - 1; for (sp = NULL; b < NBUCKET - 1; b++) { - sp = TAILQ_FIRST(&sc->free[b]); + sp = VTAILQ_FIRST(&sc->free[b]); if (sp != NULL) break; } if (sp == NULL) { - TAILQ_FOREACH(sp, &sc->free[NBUCKET -1], status) + VTAILQ_FOREACH(sp, &sc->free[NBUCKET -1], status) if (sp->size >= bytes) break; } @@ -423,7 +423,7 @@ alloc_smf(struct smf_sc *sc, size_t bytes) if (sp->size == bytes) { sp->alloc = 1; - TAILQ_INSERT_TAIL(&sc->used, sp, status); + VTAILQ_INSERT_TAIL(&sc->used, sp, status); return (sp); } @@ -439,8 +439,8 @@ alloc_smf(struct smf_sc *sc, size_t bytes) sp2->size = bytes; sp2->alloc = 1; - TAILQ_INSERT_BEFORE(sp, sp2, order); - TAILQ_INSERT_TAIL(&sc->used, sp2, status); + VTAILQ_INSERT_BEFORE(sp, sp2, order); + VTAILQ_INSERT_TAIL(&sc->used, sp2, status); insfree(sc, sp); return (sp2); } @@ -460,29 +460,29 @@ free_smf(struct smf *sp) assert(sp->alloc != 0); assert(sp->size > 0); assert(!(sp->size % sc->pagesize)); - TAILQ_REMOVE(&sc->used, sp, status); + VTAILQ_REMOVE(&sc->used, sp, status); sp->alloc = 0; - sp2 = TAILQ_NEXT(sp, order); + sp2 = VTAILQ_NEXT(sp, order); if (sp2 != NULL && sp2->alloc == 0 && (sp2->ptr == sp->ptr + sp->size) && (sp2->offset == sp->offset + sp->size)) { sp->size += sp2->size; - TAILQ_REMOVE(&sc->order, sp2, order); + VTAILQ_REMOVE(&sc->order, sp2, order); remfree(sc, sp2); free(sp2); VSL_stats->n_smf--; } - sp2 = TAILQ_PREV(sp, smfhead, order); + sp2 = VTAILQ_PREV(sp, smfhead, order); if (sp2 != NULL && sp2->alloc == 0 && (sp->ptr == sp2->ptr + sp2->size) && (sp->offset == sp2->offset + sp2->size)) { remfree(sc, sp2); sp2->size += sp->size; - TAILQ_REMOVE(&sc->order, sp, order); + VTAILQ_REMOVE(&sc->order, sp, order); free(sp); VSL_stats->n_smf--; sp = sp2; @@ -516,8 +516,8 @@ trim_smf(struct smf *sp, size_t bytes) sp->size = bytes; sp2->ptr += bytes; sp2->offset += bytes; - TAILQ_INSERT_AFTER(&sc->order, sp, sp2, order); - TAILQ_INSERT_TAIL(&sc->used, sp2, status); + VTAILQ_INSERT_AFTER(&sc->order, sp, sp2, order); + VTAILQ_INSERT_TAIL(&sc->used, sp2, status); free_smf(sp2); } @@ -543,16 +543,16 @@ new_smf(struct smf_sc *sc, unsigned char *ptr, off_t off, size_t len) sp->offset = off; sp->alloc = 1; - TAILQ_FOREACH(sp2, &sc->order, order) { + VTAILQ_FOREACH(sp2, &sc->order, order) { if (sp->ptr < sp2->ptr) { - TAILQ_INSERT_BEFORE(sp2, sp, order); + VTAILQ_INSERT_BEFORE(sp2, sp, order); break; } } if (sp2 == NULL) - TAILQ_INSERT_TAIL(&sc->order, sp, order); + VTAILQ_INSERT_TAIL(&sc->order, sp, order); - TAILQ_INSERT_HEAD(&sc->used, sp, status); + VTAILQ_INSERT_HEAD(&sc->used, sp, status); free_smf(sp); } @@ -734,17 +734,17 @@ dumpit(void) return (0); printf("----------------\n"); printf("Order:\n"); - TAILQ_FOREACH(s, &sc->order, order) { + VTAILQ_FOREACH(s, &sc->order, order) { printf("%10p %12ju %12ju %12ju\n", s, s->offset, s->size, s->offset + s->size); } printf("Used:\n"); - TAILQ_FOREACH(s, &sc->used, status) { + VTAILQ_FOREACH(s, &sc->used, status) { printf("%10p %12ju %12ju %12ju\n", s, s->offset, s->size, s->offset + s->size); } printf("Free:\n"); - TAILQ_FOREACH(s, &sc->free, status) { + VTAILQ_FOREACH(s, &sc->free, status) { printf("%10p %12ju %12ju %12ju\n", s, s->offset, s->size, s->offset + s->size); } diff --git a/varnish-cache/bin/varnishd/varnishd.c b/varnish-cache/bin/varnishd/varnishd.c index ab9cff79..0c718b9b 100644 --- a/varnish-cache/bin/varnishd/varnishd.c +++ b/varnish-cache/bin/varnishd/varnishd.c @@ -395,7 +395,7 @@ main(int argc, char *argv[]) XXXAN(cli[0].sb); cli[0].result = CLIS_OK; - TAILQ_INIT(&heritage.socks); + VTAILQ_INIT(&heritage.socks); mgt_vcc_init(); diff --git a/varnish-cache/bin/varnishreplay/varnishreplay.c b/varnish-cache/bin/varnishreplay/varnishreplay.c index f34984dc..09fe4486 100644 --- a/varnish-cache/bin/varnishreplay/varnishreplay.c +++ b/varnish-cache/bin/varnishreplay/varnishreplay.c @@ -38,11 +38,7 @@ #include #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "libvarnish.h" #include "varnishapi.h" @@ -65,21 +61,21 @@ struct message { enum shmlogtag tag; size_t len; char *ptr; - STAILQ_ENTRY(message) list; + VSTAILQ_ENTRY(message) list; }; struct mailbox { pthread_mutex_t lock; pthread_cond_t has_mail; int open; - STAILQ_HEAD(msgq_head, message) messages; + VSTAILQ_HEAD(msgq_head, message) messages; }; static void mailbox_create(struct mailbox *mbox) { - STAILQ_INIT(&mbox->messages); + VSTAILQ_INIT(&mbox->messages); pthread_mutex_init(&mbox->lock, NULL); pthread_cond_init(&mbox->has_mail, NULL); mbox->open = 1; @@ -90,8 +86,8 @@ mailbox_destroy(struct mailbox *mbox) { struct message *msg; - while ((msg = STAILQ_FIRST(&mbox->messages))) { - STAILQ_REMOVE_HEAD(&mbox->messages, list); + while ((msg = VSTAILQ_FIRST(&mbox->messages))) { + VSTAILQ_REMOVE_HEAD(&mbox->messages, list); free(msg); } pthread_cond_destroy(&mbox->has_mail); @@ -103,7 +99,7 @@ mailbox_put(struct mailbox *mbox, struct message *msg) { pthread_mutex_lock(&mbox->lock); - STAILQ_INSERT_TAIL(&mbox->messages, msg, list); + VSTAILQ_INSERT_TAIL(&mbox->messages, msg, list); pthread_cond_signal(&mbox->has_mail); pthread_mutex_unlock(&mbox->lock); } @@ -114,10 +110,10 @@ mailbox_get(struct mailbox *mbox) struct message *msg; pthread_mutex_lock(&mbox->lock); - while ((msg = STAILQ_FIRST(&mbox->messages)) == NULL && mbox->open) + while ((msg = VSTAILQ_FIRST(&mbox->messages)) == NULL && mbox->open) pthread_cond_wait(&mbox->has_mail, &mbox->lock); if (msg != NULL) - STAILQ_REMOVE_HEAD(&mbox->messages, list); + VSTAILQ_REMOVE_HEAD(&mbox->messages, list); pthread_mutex_unlock(&mbox->lock); return msg; } diff --git a/varnish-cache/bin/varnishtop/varnishtop.c b/varnish-cache/bin/varnishtop/varnishtop.c index dca9ff67..b6aac8db 100644 --- a/varnish-cache/bin/varnishtop/varnishtop.c +++ b/varnish-cache/bin/varnishtop/varnishtop.c @@ -43,11 +43,7 @@ #include #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "vsb.h" @@ -59,11 +55,11 @@ struct top { unsigned char rec[4 + 255]; unsigned clen; unsigned hash; - TAILQ_ENTRY(top) list; + VTAILQ_ENTRY(top) list; double count; }; -static TAILQ_HEAD(tophead, top) top_head = TAILQ_HEAD_INITIALIZER(top_head); +static VTAILQ_HEAD(tophead, top) top_head = VTAILQ_HEAD_INITIALIZER(top_head); static unsigned ntop; @@ -91,7 +87,7 @@ accumulate(const unsigned char *p) u += *q; } - TAILQ_FOREACH(tp, &top_head, list) { + VTAILQ_FOREACH(tp, &top_head, list) { if (tp->hash != u) continue; if (tp->rec[0] != p[0]) @@ -110,22 +106,22 @@ accumulate(const unsigned char *p) tp->hash = u; tp->count = 1.0; tp->clen = q - p; - TAILQ_INSERT_TAIL(&top_head, tp, list); + VTAILQ_INSERT_TAIL(&top_head, tp, list); } memcpy(tp->rec, p, 4 + p[1]); while (1) { - tp2 = TAILQ_PREV(tp, tophead, list); + tp2 = VTAILQ_PREV(tp, tophead, list); if (tp2 == NULL || tp2->count >= tp->count) break; - TAILQ_REMOVE(&top_head, tp2, list); - TAILQ_INSERT_AFTER(&top_head, tp, tp2, list); + VTAILQ_REMOVE(&top_head, tp2, list); + VTAILQ_INSERT_AFTER(&top_head, tp, tp2, list); } while (1) { - tp2 = TAILQ_NEXT(tp, list); + tp2 = VTAILQ_NEXT(tp, list); if (tp2 == NULL || tp2->count <= tp->count) break; - TAILQ_REMOVE(&top_head, tp2, list); - TAILQ_INSERT_BEFORE(tp, tp2, list); + VTAILQ_REMOVE(&top_head, tp2, list); + VTAILQ_INSERT_BEFORE(tp, tp2, list); } } @@ -147,7 +143,7 @@ update(void) l = 1; mvprintw(0, 0, "%*s", COLS - 1, VSL_Name()); mvprintw(0, 0, "list length %u", ntop); - TAILQ_FOREACH_SAFE(tp, &top_head, list, tp2) { + VTAILQ_FOREACH_SAFE(tp, &top_head, list, tp2) { if (++l < LINES) { int len = tp->rec[1]; if (len > COLS - 20) @@ -159,7 +155,7 @@ update(void) } tp->count *= .999; if (tp->count * 10 < t || l > LINES * 10) { - TAILQ_REMOVE(&top_head, tp, list); + VTAILQ_REMOVE(&top_head, tp, list); free(tp); ntop--; } @@ -251,7 +247,7 @@ dump(void) struct top *tp, *tp2; int len; - TAILQ_FOREACH_SAFE(tp, &top_head, list, tp2) { + VTAILQ_FOREACH_SAFE(tp, &top_head, list, tp2) { if (tp->count <= 1.0) break; len = tp->rec[1]; diff --git a/varnish-cache/configure.ac b/varnish-cache/configure.ac index 4540e4e0..0603b567 100644 --- a/varnish-cache/configure.ac +++ b/varnish-cache/configure.ac @@ -65,7 +65,6 @@ AC_SUBST(LIBM) AC_HEADER_STDC AC_HEADER_SYS_WAIT AC_HEADER_TIME -AC_CHECK_HEADERS([sys/queue.h]) AC_CHECK_HEADERS([sys/socket.h]) AC_CHECK_HEADERS([sys/mount.h]) AC_CHECK_HEADERS([sys/vfs.h]) diff --git a/varnish-cache/include/Makefile.am b/varnish-cache/include/Makefile.am index 5f8c6f06..8e37c04d 100644 --- a/varnish-cache/include/Makefile.am +++ b/varnish-cache/include/Makefile.am @@ -25,7 +25,7 @@ noinst_HEADERS = \ libvarnish.h \ libvcl.h \ miniobj.h \ - queue.h \ + vqueue.h \ vpf.h \ vsb.h \ vcl.h \ diff --git a/varnish-cache/include/queue.h b/varnish-cache/include/queue.h deleted file mode 100644 index b87879ea..00000000 --- a/varnish-cache/include/queue.h +++ /dev/null @@ -1,676 +0,0 @@ -/* $Id$ */ -/* $NetBSD: queue.h,v 1.45 2006/03/07 17:56:00 pooka Exp $ */ - -/* - * Copyright (c) 1991, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)queue.h 8.5 (Berkeley) 8/20/94 - */ - -#ifndef _SYS_QUEUE_H_ -#define _SYS_QUEUE_H_ - -/* - * This file defines five types of data structures: singly-linked lists, - * lists, simple queues, tail queues, and circular queues. - * - * A singly-linked list is headed by a single forward pointer. The - * elements are singly linked for minimum space and pointer manipulation - * overhead at the expense of O(n) removal for arbitrary elements. New - * elements can be added to the list after an existing element or at the - * head of the list. Elements being removed from the head of the list - * should use the explicit macro for this purpose for optimum - * efficiency. A singly-linked list may only be traversed in the forward - * direction. Singly-linked lists are ideal for applications with large - * datasets and few or no removals or for implementing a LIFO queue. - * - * A list is headed by a single forward pointer (or an array of forward - * pointers for a hash table header). The elements are doubly linked - * so that an arbitrary element can be removed without a need to - * traverse the list. New elements can be added to the list before - * or after an existing element or at the head of the list. A list - * may only be traversed in the forward direction. - * - * A simple queue is headed by a pair of pointers, one the head of the - * list and the other to the tail of the list. The elements are singly - * linked to save space, so elements can only be removed from the - * head of the list. New elements can be added to the list after - * an existing element, at the head of the list, or at the end of the - * list. A simple queue may only be traversed in the forward direction. - * - * A tail queue is headed by a pair of pointers, one to the head of the - * list and the other to the tail of the list. The elements are doubly - * linked so that an arbitrary element can be removed without a need to - * traverse the list. New elements can be added to the list before or - * after an existing element, at the head of the list, or at the end of - * the list. A tail queue may be traversed in either direction. - * - * A circle queue is headed by a pair of pointers, one to the head of the - * list and the other to the tail of the list. The elements are doubly - * linked so that an arbitrary element can be removed without a need to - * traverse the list. New elements can be added to the list before or after - * an existing element, at the head of the list, or at the end of the list. - * A circle queue may be traversed in either direction, but has a more - * complex end of list detection. - * - * For details on the use of these macros, see the queue(3) manual page. - */ - -/* - * List definitions. - */ -#define LIST_HEAD(name, type) \ -struct name { \ - struct type *lh_first; /* first element */ \ -} - -#define LIST_HEAD_INITIALIZER(head) \ - { NULL } - -#define LIST_ENTRY(type) \ -struct { \ - struct type *le_next; /* next element */ \ - struct type **le_prev; /* address of previous next element */ \ -} - -/* - * List functions. - */ -#if defined(_KERNEL) && defined(QUEUEDEBUG) -#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \ - if ((head)->lh_first && \ - (head)->lh_first->field.le_prev != &(head)->lh_first) \ - panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); -#define QUEUEDEBUG_LIST_OP(elm, field) \ - if ((elm)->field.le_next && \ - (elm)->field.le_next->field.le_prev != \ - &(elm)->field.le_next) \ - panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ - if (*(elm)->field.le_prev != (elm)) \ - panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__); -#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \ - (elm)->field.le_next = (void *)1L; \ - (elm)->field.le_prev = (void *)1L; -#else -#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) -#define QUEUEDEBUG_LIST_OP(elm, field) -#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) -#endif - -#define LIST_INIT(head) do { \ - (head)->lh_first = NULL; \ -} while (/*CONSTCOND*/0) - -#define LIST_INSERT_AFTER(listelm, elm, field) do { \ - QUEUEDEBUG_LIST_OP((listelm), field) \ - if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ - (listelm)->field.le_next->field.le_prev = \ - &(elm)->field.le_next; \ - (listelm)->field.le_next = (elm); \ - (elm)->field.le_prev = &(listelm)->field.le_next; \ -} while (/*CONSTCOND*/0) - -#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ - QUEUEDEBUG_LIST_OP((listelm), field) \ - (elm)->field.le_prev = (listelm)->field.le_prev; \ - (elm)->field.le_next = (listelm); \ - *(listelm)->field.le_prev = (elm); \ - (listelm)->field.le_prev = &(elm)->field.le_next; \ -} while (/*CONSTCOND*/0) - -#define LIST_INSERT_HEAD(head, elm, field) do { \ - QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \ - if (((elm)->field.le_next = (head)->lh_first) != NULL) \ - (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ - (head)->lh_first = (elm); \ - (elm)->field.le_prev = &(head)->lh_first; \ -} while (/*CONSTCOND*/0) - -#define LIST_REMOVE(elm, field) do { \ - QUEUEDEBUG_LIST_OP((elm), field) \ - if ((elm)->field.le_next != NULL) \ - (elm)->field.le_next->field.le_prev = \ - (elm)->field.le_prev; \ - *(elm)->field.le_prev = (elm)->field.le_next; \ - QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ -} while (/*CONSTCOND*/0) - -#define LIST_FOREACH(var, head, field) \ - for ((var) = ((head)->lh_first); \ - (var); \ - (var) = ((var)->field.le_next)) - -/* - * List access methods. - */ -#define LIST_EMPTY(head) ((head)->lh_first == NULL) -#define LIST_FIRST(head) ((head)->lh_first) -#define LIST_NEXT(elm, field) ((elm)->field.le_next) - - -/* - * Singly-linked List definitions. - */ -#define SLIST_HEAD(name, type) \ -struct name { \ - struct type *slh_first; /* first element */ \ -} - -#define SLIST_HEAD_INITIALIZER(head) \ - { NULL } - -#define SLIST_ENTRY(type) \ -struct { \ - struct type *sle_next; /* next element */ \ -} - -/* - * Singly-linked List functions. - */ -#define SLIST_INIT(head) do { \ - (head)->slh_first = NULL; \ -} while (/*CONSTCOND*/0) - -#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ - (elm)->field.sle_next = (slistelm)->field.sle_next; \ - (slistelm)->field.sle_next = (elm); \ -} while (/*CONSTCOND*/0) - -#define SLIST_INSERT_HEAD(head, elm, field) do { \ - (elm)->field.sle_next = (head)->slh_first; \ - (head)->slh_first = (elm); \ -} while (/*CONSTCOND*/0) - -#define SLIST_REMOVE_HEAD(head, field) do { \ - (head)->slh_first = (head)->slh_first->field.sle_next; \ -} while (/*CONSTCOND*/0) - -#define SLIST_REMOVE(head, elm, type, field) do { \ - if ((head)->slh_first == (elm)) { \ - SLIST_REMOVE_HEAD((head), field); \ - } \ - else { \ - struct type *curelm = (head)->slh_first; \ - while(curelm->field.sle_next != (elm)) \ - curelm = curelm->field.sle_next; \ - curelm->field.sle_next = \ - curelm->field.sle_next->field.sle_next; \ - } \ -} while (/*CONSTCOND*/0) - -#define SLIST_FOREACH(var, head, field) \ - for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) - -/* - * Singly-linked List access methods. - */ -#define SLIST_EMPTY(head) ((head)->slh_first == NULL) -#define SLIST_FIRST(head) ((head)->slh_first) -#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) - - -/* - * Singly-linked Tail queue declarations. - */ -#define STAILQ_HEAD(name, type) \ -struct name { \ - struct type *stqh_first; /* first element */ \ - struct type **stqh_last; /* addr of last next element */ \ -} - -#define STAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).stqh_first } - -#define STAILQ_ENTRY(type) \ -struct { \ - struct type *stqe_next; /* next element */ \ -} - -/* - * Singly-linked Tail queue functions. - */ -#define STAILQ_INIT(head) do { \ - (head)->stqh_first = NULL; \ - (head)->stqh_last = &(head)->stqh_first; \ -} while (/*CONSTCOND*/0) - -#define STAILQ_INSERT_HEAD(head, elm, field) do { \ - if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ - (head)->stqh_last = &(elm)->field.stqe_next; \ - (head)->stqh_first = (elm); \ -} while (/*CONSTCOND*/0) - -#define STAILQ_INSERT_TAIL(head, elm, field) do { \ - (elm)->field.stqe_next = NULL; \ - *(head)->stqh_last = (elm); \ - (head)->stqh_last = &(elm)->field.stqe_next; \ -} while (/*CONSTCOND*/0) - -#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ - if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ - (head)->stqh_last = &(elm)->field.stqe_next; \ - (listelm)->field.stqe_next = (elm); \ -} while (/*CONSTCOND*/0) - -#define STAILQ_REMOVE_HEAD(head, field) do { \ - if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ - (head)->stqh_last = &(head)->stqh_first; \ -} while (/*CONSTCOND*/0) - -#define STAILQ_REMOVE(head, elm, type, field) do { \ - if ((head)->stqh_first == (elm)) { \ - STAILQ_REMOVE_HEAD((head), field); \ - } else { \ - struct type *curelm = (head)->stqh_first; \ - while (curelm->field.stqe_next != (elm)) \ - curelm = curelm->field.stqe_next; \ - if ((curelm->field.stqe_next = \ - curelm->field.stqe_next->field.stqe_next) == NULL) \ - (head)->stqh_last = &(curelm)->field.stqe_next; \ - } \ -} while (/*CONSTCOND*/0) - -#define STAILQ_FOREACH(var, head, field) \ - for ((var) = ((head)->stqh_first); \ - (var); \ - (var) = ((var)->field.stqe_next)) - -/* - * Singly-linked Tail queue access methods. - */ -#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) -#define STAILQ_FIRST(head) ((head)->stqh_first) -#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) - - -/* - * Simple queue definitions. - */ -#define SIMPLEQ_HEAD(name, type) \ -struct name { \ - struct type *sqh_first; /* first element */ \ - struct type **sqh_last; /* addr of last next element */ \ -} - -#define SIMPLEQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).sqh_first } - -#define SIMPLEQ_ENTRY(type) \ -struct { \ - struct type *sqe_next; /* next element */ \ -} - -/* - * Simple queue functions. - */ -#define SIMPLEQ_INIT(head) do { \ - (head)->sqh_first = NULL; \ - (head)->sqh_last = &(head)->sqh_first; \ -} while (/*CONSTCOND*/0) - -#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ - if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ - (head)->sqh_last = &(elm)->field.sqe_next; \ - (head)->sqh_first = (elm); \ -} while (/*CONSTCOND*/0) - -#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ - (elm)->field.sqe_next = NULL; \ - *(head)->sqh_last = (elm); \ - (head)->sqh_last = &(elm)->field.sqe_next; \ -} while (/*CONSTCOND*/0) - -#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ - if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ - (head)->sqh_last = &(elm)->field.sqe_next; \ - (listelm)->field.sqe_next = (elm); \ -} while (/*CONSTCOND*/0) - -#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ - if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ - (head)->sqh_last = &(head)->sqh_first; \ -} while (/*CONSTCOND*/0) - -#define SIMPLEQ_REMOVE(head, elm, type, field) do { \ - if ((head)->sqh_first == (elm)) { \ - SIMPLEQ_REMOVE_HEAD((head), field); \ - } else { \ - struct type *curelm = (head)->sqh_first; \ - while (curelm->field.sqe_next != (elm)) \ - curelm = curelm->field.sqe_next; \ - if ((curelm->field.sqe_next = \ - curelm->field.sqe_next->field.sqe_next) == NULL) \ - (head)->sqh_last = &(curelm)->field.sqe_next; \ - } \ -} while (/*CONSTCOND*/0) - -#define SIMPLEQ_FOREACH(var, head, field) \ - for ((var) = ((head)->sqh_first); \ - (var); \ - (var) = ((var)->field.sqe_next)) - -/* - * Simple queue access methods. - */ -#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) -#define SIMPLEQ_FIRST(head) ((head)->sqh_first) -#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) - - -/* - * Tail queue definitions. - */ -#define _TAILQ_HEAD(name, type, qual) \ -struct name { \ - qual type *tqh_first; /* first element */ \ - qual type *qual *tqh_last; /* addr of last next element */ \ -} -#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) - -#define TAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).tqh_first } - -#define _TAILQ_ENTRY(type, qual) \ -struct { \ - qual type *tqe_next; /* next element */ \ - qual type *qual *tqe_prev; /* address of previous next element */\ -} -#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) - -/* - * Tail queue functions. - */ -#if defined(_KERNEL) && defined(QUEUEDEBUG) -#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \ - if ((head)->tqh_first && \ - (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \ - panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); -#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \ - if (*(head)->tqh_last != NULL) \ - panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__); -#define QUEUEDEBUG_TAILQ_OP(elm, field) \ - if ((elm)->field.tqe_next && \ - (elm)->field.tqe_next->field.tqe_prev != \ - &(elm)->field.tqe_next) \ - panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ - if (*(elm)->field.tqe_prev != (elm)) \ - panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__); -#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \ - if ((elm)->field.tqe_next == NULL && \ - (head)->tqh_last != &(elm)->field.tqe_next) \ - panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \ - (head), (elm), __FILE__, __LINE__); -#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \ - (elm)->field.tqe_next = (void *)1L; \ - (elm)->field.tqe_prev = (void *)1L; -#else -#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) -#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) -#define QUEUEDEBUG_TAILQ_OP(elm, field) -#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) -#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) -#endif - -#define TAILQ_INIT(head) do { \ - (head)->tqh_first = NULL; \ - (head)->tqh_last = &(head)->tqh_first; \ -} while (/*CONSTCOND*/0) - -#define TAILQ_INSERT_HEAD(head, elm, field) do { \ - QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \ - if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ - (head)->tqh_first->field.tqe_prev = \ - &(elm)->field.tqe_next; \ - else \ - (head)->tqh_last = &(elm)->field.tqe_next; \ - (head)->tqh_first = (elm); \ - (elm)->field.tqe_prev = &(head)->tqh_first; \ -} while (/*CONSTCOND*/0) - -#define TAILQ_INSERT_TAIL(head, elm, field) do { \ - QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \ - (elm)->field.tqe_next = NULL; \ - (elm)->field.tqe_prev = (head)->tqh_last; \ - *(head)->tqh_last = (elm); \ - (head)->tqh_last = &(elm)->field.tqe_next; \ -} while (/*CONSTCOND*/0) - -#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ - QUEUEDEBUG_TAILQ_OP((listelm), field) \ - if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ - (elm)->field.tqe_next->field.tqe_prev = \ - &(elm)->field.tqe_next; \ - else \ - (head)->tqh_last = &(elm)->field.tqe_next; \ - (listelm)->field.tqe_next = (elm); \ - (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ -} while (/*CONSTCOND*/0) - -#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ - QUEUEDEBUG_TAILQ_OP((listelm), field) \ - (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ - (elm)->field.tqe_next = (listelm); \ - *(listelm)->field.tqe_prev = (elm); \ - (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ -} while (/*CONSTCOND*/0) - -#define TAILQ_REMOVE(head, elm, field) do { \ - QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \ - QUEUEDEBUG_TAILQ_OP((elm), field) \ - if (((elm)->field.tqe_next) != NULL) \ - (elm)->field.tqe_next->field.tqe_prev = \ - (elm)->field.tqe_prev; \ - else \ - (head)->tqh_last = (elm)->field.tqe_prev; \ - *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ - QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ -} while (/*CONSTCOND*/0) - -#define TAILQ_FOREACH(var, head, field) \ - for ((var) = ((head)->tqh_first); \ - (var); \ - (var) = ((var)->field.tqe_next)) - -#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = TAILQ_FIRST((head)); \ - (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ - (var) = (tvar)) - -#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ - for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ - (var); \ - (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) - -/* - * Tail queue access methods. - */ -#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) -#define TAILQ_FIRST(head) ((head)->tqh_first) -#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) - -#define TAILQ_LAST(head, headname) \ - (*(((struct headname *)((head)->tqh_last))->tqh_last)) -#define TAILQ_PREV(elm, headname, field) \ - (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) - - -/* - * Circular queue definitions. - */ -#if defined(_KERNEL) && defined(QUEUEDEBUG) -#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \ - if ((head)->cqh_first != (void *)(head) && \ - (head)->cqh_first->field.cqe_prev != (void *)(head)) \ - panic("CIRCLEQ head forw %p %s:%d", (head), \ - __FILE__, __LINE__); \ - if ((head)->cqh_last != (void *)(head) && \ - (head)->cqh_last->field.cqe_next != (void *)(head)) \ - panic("CIRCLEQ head back %p %s:%d", (head), \ - __FILE__, __LINE__); -#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \ - if ((elm)->field.cqe_next == (void *)(head)) { \ - if ((head)->cqh_last != (elm)) \ - panic("CIRCLEQ elm last %p %s:%d", (elm), \ - __FILE__, __LINE__); \ - } else { \ - if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \ - panic("CIRCLEQ elm forw %p %s:%d", (elm), \ - __FILE__, __LINE__); \ - } \ - if ((elm)->field.cqe_prev == (void *)(head)) { \ - if ((head)->cqh_first != (elm)) \ - panic("CIRCLEQ elm first %p %s:%d", (elm), \ - __FILE__, __LINE__); \ - } else { \ - if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \ - panic("CIRCLEQ elm prev %p %s:%d", (elm), \ - __FILE__, __LINE__); \ - } -#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \ - (elm)->field.cqe_next = (void *)1L; \ - (elm)->field.cqe_prev = (void *)1L; -#else -#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) -#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) -#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) -#endif - -#define CIRCLEQ_HEAD(name, type) \ -struct name { \ - struct type *cqh_first; /* first element */ \ - struct type *cqh_last; /* last element */ \ -} - -#define CIRCLEQ_HEAD_INITIALIZER(head) \ - { (void *)&head, (void *)&head } - -#define CIRCLEQ_ENTRY(type) \ -struct { \ - struct type *cqe_next; /* next element */ \ - struct type *cqe_prev; /* previous element */ \ -} - -/* - * Circular queue functions. - */ -#define CIRCLEQ_INIT(head) do { \ - (head)->cqh_first = (void *)(head); \ - (head)->cqh_last = (void *)(head); \ -} while (/*CONSTCOND*/0) - -#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ - QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ - QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ - (elm)->field.cqe_next = (listelm)->field.cqe_next; \ - (elm)->field.cqe_prev = (listelm); \ - if ((listelm)->field.cqe_next == (void *)(head)) \ - (head)->cqh_last = (elm); \ - else \ - (listelm)->field.cqe_next->field.cqe_prev = (elm); \ - (listelm)->field.cqe_next = (elm); \ -} while (/*CONSTCOND*/0) - -#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ - QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ - QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ - (elm)->field.cqe_next = (listelm); \ - (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ - if ((listelm)->field.cqe_prev == (void *)(head)) \ - (head)->cqh_first = (elm); \ - else \ - (listelm)->field.cqe_prev->field.cqe_next = (elm); \ - (listelm)->field.cqe_prev = (elm); \ -} while (/*CONSTCOND*/0) - -#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ - QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ - (elm)->field.cqe_next = (head)->cqh_first; \ - (elm)->field.cqe_prev = (void *)(head); \ - if ((head)->cqh_last == (void *)(head)) \ - (head)->cqh_last = (elm); \ - else \ - (head)->cqh_first->field.cqe_prev = (elm); \ - (head)->cqh_first = (elm); \ -} while (/*CONSTCOND*/0) - -#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ - QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ - (elm)->field.cqe_next = (void *)(head); \ - (elm)->field.cqe_prev = (head)->cqh_last; \ - if ((head)->cqh_first == (void *)(head)) \ - (head)->cqh_first = (elm); \ - else \ - (head)->cqh_last->field.cqe_next = (elm); \ - (head)->cqh_last = (elm); \ -} while (/*CONSTCOND*/0) - -#define CIRCLEQ_REMOVE(head, elm, field) do { \ - QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ - QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \ - if ((elm)->field.cqe_next == (void *)(head)) \ - (head)->cqh_last = (elm)->field.cqe_prev; \ - else \ - (elm)->field.cqe_next->field.cqe_prev = \ - (elm)->field.cqe_prev; \ - if ((elm)->field.cqe_prev == (void *)(head)) \ - (head)->cqh_first = (elm)->field.cqe_next; \ - else \ - (elm)->field.cqe_prev->field.cqe_next = \ - (elm)->field.cqe_next; \ - QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \ -} while (/*CONSTCOND*/0) - -#define CIRCLEQ_FOREACH(var, head, field) \ - for ((var) = ((head)->cqh_first); \ - (var) != (const void *)(head); \ - (var) = ((var)->field.cqe_next)) - -#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ - for ((var) = ((head)->cqh_last); \ - (var) != (const void *)(head); \ - (var) = ((var)->field.cqe_prev)) - -/* - * Circular queue access methods. - */ -#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) -#define CIRCLEQ_FIRST(head) ((head)->cqh_first) -#define CIRCLEQ_LAST(head) ((head)->cqh_last) -#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) -#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) - -#define CIRCLEQ_LOOP_NEXT(head, elm, field) \ - (((elm)->field.cqe_next == (void *)(head)) \ - ? ((head)->cqh_first) \ - : (elm->field.cqe_next)) -#define CIRCLEQ_LOOP_PREV(head, elm, field) \ - (((elm)->field.cqe_prev == (void *)(head)) \ - ? ((head)->cqh_last) \ - : (elm->field.cqe_prev)) - -#endif /* !_SYS_QUEUE_H_ */ diff --git a/varnish-cache/include/vqueue.h b/varnish-cache/include/vqueue.h new file mode 100644 index 00000000..89f1780f --- /dev/null +++ b/varnish-cache/include/vqueue.h @@ -0,0 +1,495 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + * $FreeBSD: src/sys/sys/queue.h,v 1.68 2006/10/24 11:20:29 ru Exp $ + * $Id$ + */ + +#ifndef VARNISH_QUEUE_H +#define VARNISH_QUEUE_H + +/* + * This file defines four types of data structures: singly-linked lists, + * singly-linked tail queues, lists and tail queues. + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A singly-linked tail queue is headed by a pair of pointers, one to the + * head of the list and the other to the tail of the list. The elements are + * singly linked for minimum space and pointer manipulation overhead at the + * expense of O(n) removal for arbitrary elements. New elements can be added + * to the list after an existing element, at the head of the list, or at the + * end of the list. Elements being removed from the head of the tail queue + * should use the explicit macro for this purpose for optimum efficiency. + * A singly-linked tail queue may only be traversed in the forward direction. + * Singly-linked tail queues are ideal for applications with large datasets + * and few or no removals or for implementing a FIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * For details on the use of these macros, see the queue(3) manual page. + * + * + * VSLIST VLIST VSTAILQ VTAILQ + * _HEAD + + + + + * _HEAD_INITIALIZER + + + + + * _ENTRY + + + + + * _INIT + + + + + * _EMPTY + + + + + * _FIRST + + + + + * _NEXT + + + + + * _PREV - - - + + * _LAST - - + + + * _FOREACH + + + + + * _FOREACH_SAFE + + + + + * _FOREACH_REVERSE - - - + + * _FOREACH_REVERSE_SAFE - - - + + * _INSERT_HEAD + + + + + * _INSERT_BEFORE - + - + + * _INSERT_AFTER + + + + + * _INSERT_TAIL - - + + + * _CONCAT - - + + + * _REMOVE_HEAD + - + - + * _REMOVE + + + + + * + */ + +/* + * Singly-linked List declarations. + */ +#define VSLIST_HEAD(name, type) \ +struct name { \ + struct type *vslh_first; /* first element */ \ +} + +#define VSLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define VSLIST_ENTRY(type) \ +struct { \ + struct type *vsle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define VSLIST_EMPTY(head) ((head)->vslh_first == NULL) + +#define VSLIST_FIRST(head) ((head)->vslh_first) + +#define VSLIST_FOREACH(var, head, field) \ + for ((var) = VSLIST_FIRST((head)); \ + (var); \ + (var) = VSLIST_NEXT((var), field)) + +#define VSLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = VSLIST_FIRST((head)); \ + (var) && ((tvar) = VSLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define VSLIST_FOREACH_PREVPTR(var, varp, head, field) \ + for ((varp) = &VSLIST_FIRST((head)); \ + ((var) = *(varp)) != NULL; \ + (varp) = &VSLIST_NEXT((var), field)) + +#define VSLIST_INIT(head) do { \ + VSLIST_FIRST((head)) = NULL; \ +} while (0) + +#define VSLIST_INSERT_AFTER(slistelm, elm, field) do { \ + VSLIST_NEXT((elm), field) = VSLIST_NEXT((slistelm), field); \ + VSLIST_NEXT((slistelm), field) = (elm); \ +} while (0) + +#define VSLIST_INSERT_HEAD(head, elm, field) do { \ + VSLIST_NEXT((elm), field) = VSLIST_FIRST((head)); \ + VSLIST_FIRST((head)) = (elm); \ +} while (0) + +#define VSLIST_NEXT(elm, field) ((elm)->field.vsle_next) + +#define VSLIST_REMOVE(head, elm, type, field) do { \ + if (VSLIST_FIRST((head)) == (elm)) { \ + VSLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = VSLIST_FIRST((head)); \ + while (VSLIST_NEXT(curelm, field) != (elm)) \ + curelm = VSLIST_NEXT(curelm, field); \ + VSLIST_NEXT(curelm, field) = \ + VSLIST_NEXT(VSLIST_NEXT(curelm, field), field); \ + } \ +} while (0) + +#define VSLIST_REMOVE_HEAD(head, field) do { \ + VSLIST_FIRST((head)) = VSLIST_NEXT(VSLIST_FIRST((head)), field);\ +} while (0) + +/* + * Singly-linked Tail queue declarations. + */ +#define VSTAILQ_HEAD(name, type) \ +struct name { \ + struct type *vstqh_first;/* first element */ \ + struct type **vstqh_last;/* addr of last next element */ \ +} + +#define VSTAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).vstqh_first } + +#define VSTAILQ_ENTRY(type) \ +struct { \ + struct type *vstqe_next; /* next element */ \ +} + +/* + * Singly-linked Tail queue functions. + */ +#define VSTAILQ_CONCAT(head1, head2) do { \ + if (!VSTAILQ_EMPTY((head2))) { \ + *(head1)->vstqh_last = (head2)->vstqh_first; \ + (head1)->vstqh_last = (head2)->vstqh_last; \ + VSTAILQ_INIT((head2)); \ + } \ +} while (0) + +#define VSTAILQ_EMPTY(head) ((head)->vstqh_first == NULL) + +#define VSTAILQ_FIRST(head) ((head)->vstqh_first) + +#define VSTAILQ_FOREACH(var, head, field) \ + for((var) = VSTAILQ_FIRST((head)); \ + (var); \ + (var) = VSTAILQ_NEXT((var), field)) + + +#define VSTAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = VSTAILQ_FIRST((head)); \ + (var) && ((tvar) = VSTAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define VSTAILQ_INIT(head) do { \ + VSTAILQ_FIRST((head)) = NULL; \ + (head)->vstqh_last = &VSTAILQ_FIRST((head)); \ +} while (0) + +#define VSTAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ + if ((VSTAILQ_NEXT((elm), field) = VSTAILQ_NEXT((tqelm), field)) == NULL)\ + (head)->vstqh_last = &VSTAILQ_NEXT((elm), field); \ + VSTAILQ_NEXT((tqelm), field) = (elm); \ +} while (0) + +#define VSTAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((VSTAILQ_NEXT((elm), field) = VSTAILQ_FIRST((head))) == NULL)\ + (head)->vstqh_last = &VSTAILQ_NEXT((elm), field); \ + VSTAILQ_FIRST((head)) = (elm); \ +} while (0) + +#define VSTAILQ_INSERT_TAIL(head, elm, field) do { \ + VSTAILQ_NEXT((elm), field) = NULL; \ + *(head)->vstqh_last = (elm); \ + (head)->vstqh_last = &VSTAILQ_NEXT((elm), field); \ +} while (0) + +#define VSTAILQ_LAST(head, type, field) \ + (VSTAILQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->vstqh_last) - __offsetof(struct type, field)))) + +#define VSTAILQ_NEXT(elm, field) ((elm)->field.vstqe_next) + +#define VSTAILQ_REMOVE(head, elm, type, field) do { \ + if (VSTAILQ_FIRST((head)) == (elm)) { \ + VSTAILQ_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = VSTAILQ_FIRST((head)); \ + while (VSTAILQ_NEXT(curelm, field) != (elm)) \ + curelm = VSTAILQ_NEXT(curelm, field); \ + if ((VSTAILQ_NEXT(curelm, field) = \ + VSTAILQ_NEXT(VSTAILQ_NEXT(curelm, field), field)) == NULL)\ + (head)->vstqh_last = &VSTAILQ_NEXT((curelm), field);\ + } \ +} while (0) + +#define VSTAILQ_REMOVE_HEAD(head, field) do { \ + if ((VSTAILQ_FIRST((head)) = \ + VSTAILQ_NEXT(VSTAILQ_FIRST((head)), field)) == NULL) \ + (head)->vstqh_last = &VSTAILQ_FIRST((head)); \ +} while (0) + +/* + * List declarations. + */ +#define VLIST_HEAD(name, type) \ +struct name { \ + struct type *vlh_first; /* first element */ \ +} + +#define VLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define VLIST_ENTRY(type) \ +struct { \ + struct type *vle_next; /* next element */ \ + struct type **vle_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ +#define VLIST_EMPTY(head) ((head)->vlh_first == NULL) + +#define VLIST_FIRST(head) ((head)->vlh_first) + +#define VLIST_FOREACH(var, head, field) \ + for ((var) = VLIST_FIRST((head)); \ + (var); \ + (var) = VLIST_NEXT((var), field)) + +#define VLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = VLIST_FIRST((head)); \ + (var) && ((tvar) = VLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define VLIST_INIT(head) do { \ + VLIST_FIRST((head)) = NULL; \ +} while (0) + +#define VLIST_INSERT_AFTER(listelm, elm, field) do { \ + if ((VLIST_NEXT((elm), field) = VLIST_NEXT((listelm), field)) != NULL)\ + VLIST_NEXT((listelm), field)->field.vle_prev = \ + &VLIST_NEXT((elm), field); \ + VLIST_NEXT((listelm), field) = (elm); \ + (elm)->field.vle_prev = &VLIST_NEXT((listelm), field); \ +} while (0) + +#define VLIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.vle_prev = (listelm)->field.vle_prev; \ + VLIST_NEXT((elm), field) = (listelm); \ + *(listelm)->field.vle_prev = (elm); \ + (listelm)->field.vle_prev = &VLIST_NEXT((elm), field); \ +} while (0) + +#define VLIST_INSERT_HEAD(head, elm, field) do { \ + if ((VLIST_NEXT((elm), field) = VLIST_FIRST((head))) != NULL) \ + VLIST_FIRST((head))->field.vle_prev = &VLIST_NEXT((elm), field);\ + VLIST_FIRST((head)) = (elm); \ + (elm)->field.vle_prev = &VLIST_FIRST((head)); \ +} while (0) + +#define VLIST_NEXT(elm, field) ((elm)->field.vle_next) + +#define VLIST_REMOVE(elm, field) do { \ + if (VLIST_NEXT((elm), field) != NULL) \ + VLIST_NEXT((elm), field)->field.vle_prev = \ + (elm)->field.vle_prev; \ + *(elm)->field.vle_prev = VLIST_NEXT((elm), field); \ +} while (0) + +/* + * Tail queue declarations. + */ +#define VTAILQ_HEAD(name, type) \ +struct name { \ + struct type *vtqh_first; /* first element */ \ + struct type **vtqh_last; /* addr of last next element */ \ +} + +#define VTAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).vtqh_first } + +#define VTAILQ_ENTRY(type) \ +struct { \ + struct type *vtqe_next; /* next element */ \ + struct type **vtqe_prev; /* address of previous next element */\ +} + +/* + * Tail queue functions. + */ +#define VTAILQ_CONCAT(head1, head2, field) do { \ + if (!VTAILQ_EMPTY(head2)) { \ + *(head1)->vtqh_last = (head2)->vtqh_first; \ + (head2)->vtqh_first->field.vtqe_prev = (head1)->vtqh_last;\ + (head1)->vtqh_last = (head2)->vtqh_last; \ + VTAILQ_INIT((head2)); \ + } \ +} while (0) + +#define VTAILQ_EMPTY(head) ((head)->vtqh_first == NULL) + +#define VTAILQ_FIRST(head) ((head)->vtqh_first) + +#define VTAILQ_FOREACH(var, head, field) \ + for ((var) = VTAILQ_FIRST((head)); \ + (var); \ + (var) = VTAILQ_NEXT((var), field)) + +#define VTAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = VTAILQ_FIRST((head)); \ + (var) && ((tvar) = VTAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define VTAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = VTAILQ_LAST((head), headname); \ + (var); \ + (var) = VTAILQ_PREV((var), headname, field)) + +#define VTAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ + for ((var) = VTAILQ_LAST((head), headname); \ + (var) && ((tvar) = VTAILQ_PREV((var), headname, field), 1); \ + (var) = (tvar)) + +#define VTAILQ_INIT(head) do { \ + VTAILQ_FIRST((head)) = NULL; \ + (head)->vtqh_last = &VTAILQ_FIRST((head)); \ +} while (0) + +#define VTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if ((VTAILQ_NEXT((elm), field) = VTAILQ_NEXT((listelm), field)) != NULL)\ + VTAILQ_NEXT((elm), field)->field.vtqe_prev = \ + &VTAILQ_NEXT((elm), field); \ + else { \ + (head)->vtqh_last = &VTAILQ_NEXT((elm), field); \ + } \ + VTAILQ_NEXT((listelm), field) = (elm); \ + (elm)->field.vtqe_prev = &VTAILQ_NEXT((listelm), field); \ +} while (0) + +#define VTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.vtqe_prev = (listelm)->field.vtqe_prev; \ + VTAILQ_NEXT((elm), field) = (listelm); \ + *(listelm)->field.vtqe_prev = (elm); \ + (listelm)->field.vtqe_prev = &VTAILQ_NEXT((elm), field); \ +} while (0) + +#define VTAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((VTAILQ_NEXT((elm), field) = VTAILQ_FIRST((head))) != NULL) \ + VTAILQ_FIRST((head))->field.vtqe_prev = \ + &VTAILQ_NEXT((elm), field); \ + else \ + (head)->vtqh_last = &VTAILQ_NEXT((elm), field); \ + VTAILQ_FIRST((head)) = (elm); \ + (elm)->field.vtqe_prev = &VTAILQ_FIRST((head)); \ +} while (0) + +#define VTAILQ_INSERT_TAIL(head, elm, field) do { \ + VTAILQ_NEXT((elm), field) = NULL; \ + (elm)->field.vtqe_prev = (head)->vtqh_last; \ + *(head)->vtqh_last = (elm); \ + (head)->vtqh_last = &VTAILQ_NEXT((elm), field); \ +} while (0) + +#define VTAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->vtqh_last))->vtqh_last)) + +#define VTAILQ_NEXT(elm, field) ((elm)->field.vtqe_next) + +#define VTAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.vtqe_prev))->vtqh_last)) + +#define VTAILQ_REMOVE(head, elm, field) do { \ + if ((VTAILQ_NEXT((elm), field)) != NULL) \ + VTAILQ_NEXT((elm), field)->field.vtqe_prev = \ + (elm)->field.vtqe_prev; \ + else { \ + (head)->vtqh_last = (elm)->field.vtqe_prev; \ + } \ + *(elm)->field.vtqe_prev = VTAILQ_NEXT((elm), field); \ +} while (0) + + +#ifdef _KERNEL + +/* + * XXX insque() and remque() are an old way of handling certain queues. + * They bogusly assumes that all queue heads look alike. + */ + +struct quehead { + struct quehead *qh_link; + struct quehead *qh_rlink; +}; + +#ifdef __CC_SUPPORTS___INLINE + +static __inline void +insque(void *a, void *b) +{ + struct quehead *element = (struct quehead *)a, + *head = (struct quehead *)b; + + element->qh_link = head->qh_link; + element->qh_rlink = head; + head->qh_link = element; + element->qh_link->qh_rlink = element; +} + +static __inline void +remque(void *a) +{ + struct quehead *element = (struct quehead *)a; + + element->qh_link->qh_rlink = element->qh_rlink; + element->qh_rlink->qh_link = element->qh_link; + element->qh_rlink = 0; +} + +#else /* !__CC_SUPPORTS___INLINE */ + +void insque(void *a, void *b); +void remque(void *a); + +#endif /* __CC_SUPPORTS___INLINE */ + +#endif /* _KERNEL */ + +#endif /* !VARNISH_QUEUE_H */ diff --git a/varnish-cache/lib/libvcl/vcc_backend.c b/varnish-cache/lib/libvcl/vcc_backend.c index fe64cb3c..f0733371 100644 --- a/varnish-cache/lib/libvcl/vcc_backend.c +++ b/varnish-cache/lib/libvcl/vcc_backend.c @@ -79,7 +79,7 @@ vcc_EmitBeIdent(struct tokenlist *tl, struct token *first, struct token *last) PF(first)); else Fc(tl, 0, "\n\t \"%.*s \"", PF(first)); - first = TAILQ_NEXT(first, list); + first = VTAILQ_NEXT(first, list); } Fc(tl, 0, ",\n"); } diff --git a/varnish-cache/lib/libvcl/vcc_compile.c b/varnish-cache/lib/libvcl/vcc_compile.c index f3f83978..7c66800d 100644 --- a/varnish-cache/lib/libvcl/vcc_compile.c +++ b/varnish-cache/lib/libvcl/vcc_compile.c @@ -71,11 +71,7 @@ #include #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "vsb.h" @@ -108,7 +104,7 @@ TlFree(struct tokenlist *tl, void *p) mb = calloc(sizeof *mb, 1); assert(mb != NULL); mb->ptr = p; - TAILQ_INSERT_TAIL(&tl->membits, mb, list); + VTAILQ_INSERT_TAIL(&tl->membits, mb, list); } @@ -264,7 +260,7 @@ LocTable(const struct tokenlist *tl) pos = 0; sp = 0; p = NULL; - TAILQ_FOREACH(t, &tl->tokens, list) { + VTAILQ_FOREACH(t, &tl->tokens, list) { if (t->cnt == 0) continue; assert(t->src != NULL); @@ -325,7 +321,7 @@ EmitStruct(const struct tokenlist *tl) struct source *sp; Fc(tl, 0, "\nconst char *srcname[%u] = {\n", tl->nsources); - TAILQ_FOREACH(sp, &tl->sources, list) { + VTAILQ_FOREACH(sp, &tl->sources, list) { Fc(tl, 0, "\t"); EncString(tl->fc, sp->name, NULL, 0); Fc(tl, 0, ",\n"); @@ -333,7 +329,7 @@ EmitStruct(const struct tokenlist *tl) Fc(tl, 0, "};\n"); Fc(tl, 0, "\nconst char *srcbody[%u] = {\n", tl->nsources); - TAILQ_FOREACH(sp, &tl->sources, list) { + VTAILQ_FOREACH(sp, &tl->sources, list) { Fc(tl, 0, " /* "); EncString(tl->fc, sp->name, NULL, 0); Fc(tl, 0, "*/\n"); @@ -431,11 +427,11 @@ vcc_resolve_includes(struct tokenlist *tl) struct token *t, *t1, *t2; struct source *sp; - TAILQ_FOREACH(t, &tl->tokens, list) { + VTAILQ_FOREACH(t, &tl->tokens, list) { if (t->tok != T_INCLUDE) continue; - t1 = TAILQ_NEXT(t, list); + t1 = VTAILQ_NEXT(t, list); assert(t1 != NULL); /* There's always an EOI */ if (t1->tok != CSTR) { vsb_printf(tl->sb, @@ -443,7 +439,7 @@ vcc_resolve_includes(struct tokenlist *tl) vcc_ErrWhere(tl, t1); return; } - t2 = TAILQ_NEXT(t1, list); + t2 = VTAILQ_NEXT(t1, list); assert(t2 != NULL); /* There's always an EOI */ if (t2->tok != ';') { vsb_printf(tl->sb, @@ -458,14 +454,14 @@ vcc_resolve_includes(struct tokenlist *tl) vcc_ErrWhere(tl, t1); return; } - TAILQ_INSERT_TAIL(&tl->sources, sp, list); + VTAILQ_INSERT_TAIL(&tl->sources, sp, list); sp->idx = tl->nsources++; tl->t = t2; vcc_Lexer(tl, sp); - TAILQ_REMOVE(&tl->tokens, t, list); - TAILQ_REMOVE(&tl->tokens, t1, list); - TAILQ_REMOVE(&tl->tokens, t2, list); + VTAILQ_REMOVE(&tl->tokens, t, list); + VTAILQ_REMOVE(&tl->tokens, t1, list); + VTAILQ_REMOVE(&tl->tokens, t2, list); if (!tl->err) vcc_resolve_includes(tl); return; @@ -482,11 +478,11 @@ vcc_NewTokenList(void) tl = calloc(sizeof *tl, 1); assert(tl != NULL); - TAILQ_INIT(&tl->membits); - TAILQ_INIT(&tl->tokens); - TAILQ_INIT(&tl->refs); - TAILQ_INIT(&tl->procs); - TAILQ_INIT(&tl->sources); + VTAILQ_INIT(&tl->membits); + VTAILQ_INIT(&tl->tokens); + VTAILQ_INIT(&tl->refs); + VTAILQ_INIT(&tl->procs); + VTAILQ_INIT(&tl->sources); tl->nsources = 0; @@ -523,15 +519,15 @@ vcc_DestroyTokenList(struct tokenlist *tl, char *ret) struct source *sp; int i; - while (!TAILQ_EMPTY(&tl->membits)) { - mb = TAILQ_FIRST(&tl->membits); - TAILQ_REMOVE(&tl->membits, mb, list); + while (!VTAILQ_EMPTY(&tl->membits)) { + mb = VTAILQ_FIRST(&tl->membits); + VTAILQ_REMOVE(&tl->membits, mb, list); free(mb->ptr); free(mb); } - while (!TAILQ_EMPTY(&tl->sources)) { - sp = TAILQ_FIRST(&tl->sources); - TAILQ_REMOVE(&tl->sources, sp, list); + while (!VTAILQ_EMPTY(&tl->sources)) { + sp = VTAILQ_FIRST(&tl->sources); + VTAILQ_REMOVE(&tl->sources, sp, list); vcc_destroy_source(sp); } @@ -565,7 +561,7 @@ vcc_CompileSource(struct vsb *sb, struct source *sp) Fh(tl, 0, "\nextern struct VCL_conf VCL_conf;\n"); /* Register and lex the main source */ - TAILQ_INSERT_TAIL(&tl->sources, sp, list); + VTAILQ_INSERT_TAIL(&tl->sources, sp, list); sp->idx = tl->nsources++; vcc_Lexer(tl, sp); if (tl->err) @@ -574,7 +570,7 @@ vcc_CompileSource(struct vsb *sb, struct source *sp) /* Register and lex the default VCL */ sp = vcc_new_source(vcc_default_vcl_b, vcc_default_vcl_e, "Default"); assert(sp != NULL); - TAILQ_INSERT_TAIL(&tl->sources, sp, list); + VTAILQ_INSERT_TAIL(&tl->sources, sp, list); sp->idx = tl->nsources++; vcc_Lexer(tl, sp); if (tl->err) @@ -591,7 +587,7 @@ vcc_CompileSource(struct vsb *sb, struct source *sp) return (vcc_DestroyTokenList(tl, NULL)); /* Parse the token string */ - tl->t = TAILQ_FIRST(&tl->tokens); + tl->t = VTAILQ_FIRST(&tl->tokens); vcc_Parse(tl); if (tl->err) return (vcc_DestroyTokenList(tl, NULL)); diff --git a/varnish-cache/lib/libvcl/vcc_compile.h b/varnish-cache/lib/libvcl/vcc_compile.h index 0ae1ec64..56f8407f 100644 --- a/varnish-cache/lib/libvcl/vcc_compile.h +++ b/varnish-cache/lib/libvcl/vcc_compile.h @@ -29,23 +29,19 @@ * $Id$ */ -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "vcl_returns.h" #define INDENT 2 struct membit { - TAILQ_ENTRY(membit) list; + VTAILQ_ENTRY(membit) list; void *ptr; }; struct source { - TAILQ_ENTRY(source) list; + VTAILQ_ENTRY(source) list; char *name; const char *b; const char *e; @@ -58,17 +54,17 @@ struct token { const char *b; const char *e; struct source *src; - TAILQ_ENTRY(token) list; + VTAILQ_ENTRY(token) list; unsigned cnt; char *dec; }; -TAILQ_HEAD(tokenhead, token); +VTAILQ_HEAD(tokenhead, token); struct tokenlist { struct tokenhead tokens; - TAILQ_HEAD(, source) sources; - TAILQ_HEAD(, membit) membits; + VTAILQ_HEAD(, source) sources; + VTAILQ_HEAD(, membit) membits; unsigned nsources; struct source *src; struct token *t; @@ -79,11 +75,11 @@ struct tokenlist { unsigned cnt; struct vsb *fc, *fh, *fi, *ff, *fb; struct vsb *fm[N_METHODS]; - TAILQ_HEAD(, ref) refs; + VTAILQ_HEAD(, ref) refs; struct vsb *sb; int err; int nbackend; - TAILQ_HEAD(, proc) procs; + VTAILQ_HEAD(, proc) procs; struct proc *curproc; struct proc *mprocs[N_METHODS]; @@ -125,7 +121,7 @@ struct ref { struct token *name; unsigned defcnt; unsigned refcnt; - TAILQ_ENTRY(ref) list; + VTAILQ_ENTRY(ref) list; }; struct var { diff --git a/varnish-cache/lib/libvcl/vcc_token.c b/varnish-cache/lib/libvcl/vcc_token.c index d193be5c..4607cdeb 100644 --- a/varnish-cache/lib/libvcl/vcc_token.c +++ b/varnish-cache/lib/libvcl/vcc_token.c @@ -34,11 +34,7 @@ #include #include -#ifdef HAVE_SYS_QUEUE_H -#include -#else -#include "queue.h" -#endif +#include "vqueue.h" #include "vsb.h" @@ -138,7 +134,7 @@ void vcc_NextToken(struct tokenlist *tl) { - tl->t = TAILQ_NEXT(tl->t, list); + tl->t = VTAILQ_NEXT(tl->t, list); if (tl->t == NULL) { vsb_printf(tl->sb, "Ran out of input, something is missing or" @@ -267,9 +263,9 @@ vcc_AddToken(struct tokenlist *tl, unsigned tok, const char *b, const char *e) t->e = e; t->src = tl->src; if (tl->t != NULL) - TAILQ_INSERT_AFTER(&tl->tokens, tl->t, t, list); + VTAILQ_INSERT_AFTER(&tl->tokens, tl->t, t, list); else - TAILQ_INSERT_TAIL(&tl->tokens, t, list); + VTAILQ_INSERT_TAIL(&tl->tokens, t, list); tl->t = t; if (0) { fprintf(stderr, "[%s %.*s] ", diff --git a/varnish-cache/lib/libvcl/vcc_xref.c b/varnish-cache/lib/libvcl/vcc_xref.c index 6755b6e3..034e89a2 100644 --- a/varnish-cache/lib/libvcl/vcc_xref.c +++ b/varnish-cache/lib/libvcl/vcc_xref.c @@ -50,21 +50,21 @@ /*--------------------------------------------------------------------*/ struct proccall { - TAILQ_ENTRY(proccall) list; + VTAILQ_ENTRY(proccall) list; struct proc *p; struct token *t; }; struct procuse { - TAILQ_ENTRY(procuse) list; + VTAILQ_ENTRY(procuse) list; struct token *t; struct var *v; }; struct proc { - TAILQ_ENTRY(proc) list; - TAILQ_HEAD(,proccall) calls; - TAILQ_HEAD(,procuse) uses; + VTAILQ_ENTRY(proc) list; + VTAILQ_HEAD(,proccall) calls; + VTAILQ_HEAD(,procuse) uses; struct token *name; unsigned returns; unsigned exists; @@ -101,7 +101,7 @@ vcc_findref(struct tokenlist *tl, struct token *t, enum ref_type type) { struct ref *r; - TAILQ_FOREACH(r, &tl->refs, list) { + VTAILQ_FOREACH(r, &tl->refs, list) { if (r->type != type) continue; if (vcc_Teq(r->name, t)) @@ -111,7 +111,7 @@ vcc_findref(struct tokenlist *tl, struct token *t, enum ref_type type) assert(r != NULL); r->name = t; r->type = type; - TAILQ_INSERT_TAIL(&tl->refs, r, list); + VTAILQ_INSERT_TAIL(&tl->refs, r, list); return (r); } @@ -154,7 +154,7 @@ vcc_CheckReferences(struct tokenlist *tl) const char *type; int nerr = 0; - TAILQ_FOREACH(r, &tl->refs, list) { + VTAILQ_FOREACH(r, &tl->refs, list) { if (r->defcnt != 0 && r->refcnt != 0) continue; nerr++; @@ -185,14 +185,14 @@ vcc_findproc(struct tokenlist *tl, struct token *t) { struct proc *p; - TAILQ_FOREACH(p, &tl->procs, list) + VTAILQ_FOREACH(p, &tl->procs, list) if (vcc_Teq(p->name, t)) return (p); p = TlAlloc(tl, sizeof *p); assert(p != NULL); - TAILQ_INIT(&p->calls); - TAILQ_INIT(&p->uses); - TAILQ_INSERT_TAIL(&tl->procs, p, list); + VTAILQ_INIT(&p->calls); + VTAILQ_INIT(&p->uses); + VTAILQ_INSERT_TAIL(&tl->procs, p, list); p->name = t; return (p); } @@ -219,7 +219,7 @@ vcc_AddUses(struct tokenlist *tl, struct var *v) assert(pu != NULL); pu->v = v; pu->t = tl->t; - TAILQ_INSERT_TAIL(&tl->curproc->uses, pu, list); + VTAILQ_INSERT_TAIL(&tl->curproc->uses, pu, list); } void @@ -233,7 +233,7 @@ vcc_AddCall(struct tokenlist *tl, struct token *t) assert(pc != NULL); pc->p = p; pc->t = t; - TAILQ_INSERT_TAIL(&tl->curproc->calls, pc, list); + VTAILQ_INSERT_TAIL(&tl->curproc->calls, pc, list); } void @@ -279,7 +279,7 @@ vcc_CheckActionRecurse(struct tokenlist *tl, struct proc *p, unsigned returns) return (1); } p->active = 1; - TAILQ_FOREACH(pc, &p->calls, list) { + VTAILQ_FOREACH(pc, &p->calls, list) { if (vcc_CheckActionRecurse(tl, pc->p, returns)) { vsb_printf(tl->sb, "\n...called from \"%.*s\"\n", PF(p->name)); @@ -299,7 +299,7 @@ vcc_CheckAction(struct tokenlist *tl) struct method *m; int i; - TAILQ_FOREACH(p, &tl->procs, list) { + VTAILQ_FOREACH(p, &tl->procs, list) { i = IsMethod(p->name); if (i < 0) continue; @@ -321,7 +321,7 @@ vcc_CheckAction(struct tokenlist *tl) return (1); } } - TAILQ_FOREACH(p, &tl->procs, list) { + VTAILQ_FOREACH(p, &tl->procs, list) { if (p->called) continue; vsb_printf(tl->sb, "Function unused\n"); @@ -336,7 +336,7 @@ vcc_FindIllegalUse(const struct proc *p, const struct method *m) { struct procuse *pu; - TAILQ_FOREACH(pu, &p->uses, list) + VTAILQ_FOREACH(pu, &p->uses, list) if (!(pu->v->methods & m->bitval)) return (pu); return (NULL); @@ -359,7 +359,7 @@ vcc_CheckUseRecurse(struct tokenlist *tl, const struct proc *p, struct method *m vcc_ErrWhere(tl, p->name); return (1); } - TAILQ_FOREACH(pc, &p->calls, list) { + VTAILQ_FOREACH(pc, &p->calls, list) { if (vcc_CheckUseRecurse(tl, pc->p, m)) { vsb_printf(tl->sb, "\n...called from \"%.*s\"\n", PF(p->name)); @@ -378,7 +378,7 @@ vcc_CheckUses(struct tokenlist *tl) struct procuse *pu; int i; - TAILQ_FOREACH(p, &tl->procs, list) { + VTAILQ_FOREACH(p, &tl->procs, list) { i = IsMethod(p->name); if (i < 0) continue; -- 2.39.5