Index: sys/modules/Makefile =================================================================== --- sys/modules/Makefile (revision 195780) +++ sys/modules/Makefile (revision 197452) @@ -22,6 +22,7 @@ ${_amdtemp} \ alc \ ale \ + alq \ amr \ ${_an} \ ${_aout} \ Index: sys/modules/alq/Makefile =================================================================== --- sys/modules/alq/Makefile (revision 0) +++ sys/modules/alq/Makefile (revision 197452) @@ -0,0 +1,9 @@ +# $FreeBSD$ + +.include + +.PATH: ${.CURDIR}/../../kern +KMOD= alq +SRCS= opt_mac.h vnode_if.h kern_alq.c + +.include Index: sys/kern/kern_alq.c =================================================================== --- sys/kern/kern_alq.c (revision 195780) +++ sys/kern/kern_alq.c (revision 197452) @@ -1,5 +1,6 @@ /*- * Copyright (c) 2002, Jeffrey Roberson + * Copyright (c) 2008-2009, Lawrence Stewart * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,6 +28,8 @@ #include __FBSDID("$FreeBSD$"); +#include "opt_mac.h" + #include #include #include @@ -49,14 +52,17 @@ struct alq { int aq_entmax; /* Max entries */ int aq_entlen; /* Entry length */ + int aq_freebytes; /* Bytes available in buffer */ + int aq_buflen; /* Total length of our buffer */ char *aq_entbuf; /* Buffer for stored entries */ + int aq_writehead; + int aq_writetail; + int aq_wrapearly; /* # bytes left blank at end of buf */ int aq_flags; /* Queue flags */ + struct ale aq_getpost; /* ALE for use by get/post */ struct mtx aq_mtx; /* Queue lock */ struct vnode *aq_vp; /* Open vnode handle */ struct ucred *aq_cred; /* Credentials of the opening thread */ - struct ale *aq_first; /* First ent */ - struct ale *aq_entfree; /* First free ent */ - struct ale *aq_entvalid; /* First ent valid for writing */ LIST_ENTRY(alq) aq_act; /* List of active queues */ LIST_ENTRY(alq) aq_link; /* List of all queues */ }; @@ -69,6 +75,8 @@ #define ALQ_LOCK(alq) mtx_lock_spin(&(alq)->aq_mtx) #define ALQ_UNLOCK(alq) mtx_unlock_spin(&(alq)->aq_mtx) +#define ALQ_HAS_PENDING_DATA(alq) ((alq)->aq_freebytes != (alq)->aq_buflen) + static MALLOC_DEFINE(M_ALD, "ALD", "ALD"); /* @@ -78,7 +86,6 @@ static LIST_HEAD(, alq) ald_queues; static LIST_HEAD(, alq) ald_active; static int ald_shutingdown = 0; -struct thread *ald_thread; static struct proc *ald_proc; #define ALD_LOCK() mtx_lock(&ald_mtx) @@ -172,26 +179,33 @@ int needwakeup; struct alq *alq; - ald_thread = FIRST_THREAD_IN_PROC(ald_proc); - EVENTHANDLER_REGISTER(shutdown_pre_sync, ald_shutdown, NULL, SHUTDOWN_PRI_FIRST); ALD_LOCK(); for (;;) { - while ((alq = LIST_FIRST(&ald_active)) == NULL) - msleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0); + while ((alq = LIST_FIRST(&ald_active)) == NULL + && !ald_shutingdown) + mtx_sleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0); + /* Don't shutdown until all active alq's are flushed */ + if (ald_shutingdown && alq == NULL) { + ALD_UNLOCK(); + break; + } + ALQ_LOCK(alq); ald_deactivate(alq); ALD_UNLOCK(); needwakeup = alq_doio(alq); ALQ_UNLOCK(alq); if (needwakeup) - wakeup(alq); + wakeup_one(alq); ALD_LOCK(); } + + kproc_exit(0); } static void @@ -200,14 +214,29 @@ struct alq *alq; ALD_LOCK(); + + /* Ensure no new queues can be created */ ald_shutingdown = 1; + /* Shutdown all alqs prior to terminating the ald_daemon */ while ((alq = LIST_FIRST(&ald_queues)) != NULL) { LIST_REMOVE(alq, aq_link); ALD_UNLOCK(); alq_shutdown(alq); ALD_LOCK(); } + + /* At this point, all alqs are flushed and shutdown */ + + /* + * Wake ald_daemon so that it exits. It won't be able to do + * anything until we mtx_sleep because we hold the ald_mtx + */ + wakeup(&ald_active); + + /* Wait for ald_daemon to exit */ + mtx_sleep(ald_proc, &ald_mtx, PWAIT, "aldslp", 0); + ALD_UNLOCK(); } @@ -219,15 +248,30 @@ /* Stop any new writers. */ alq->aq_flags |= AQ_SHUTDOWN; + /* + * If the alq isn't active but has unwritten data (possible if + * the ALQ_NOACTIVATE flag has been used), explicitly activate the + * alq here so that the pending data gets flushed by the ald_daemon. + */ + if (!(alq->aq_flags & AQ_ACTIVE) && + ALQ_HAS_PENDING_DATA(alq)) { + alq->aq_flags |= AQ_ACTIVE; + ALQ_UNLOCK(alq); + ALD_LOCK(); + ald_activate(alq); + ALD_UNLOCK(); + ALQ_LOCK(alq); + } + /* Drain IO */ - while (alq->aq_flags & (AQ_FLUSHING|AQ_ACTIVE)) { + while (alq->aq_flags & AQ_ACTIVE) { alq->aq_flags |= AQ_WANTED; msleep_spin(alq, &alq->aq_mtx, "aldclose", 0); } + ALQ_UNLOCK(alq); - vn_close(alq->aq_vp, FWRITE, alq->aq_cred, - curthread); + vn_close(alq->aq_vp, FWRITE, alq->aq_cred, curthread); crfree(alq->aq_cred); } @@ -242,46 +286,55 @@ struct vnode *vp; struct uio auio; struct iovec aiov[2]; - struct ale *ale; - struct ale *alstart; int totlen; int iov; int vfslocked; + int wrapearly; + KASSERT((ALQ_HAS_PENDING_DATA(alq)), + ("%s: queue emtpy!", __func__) + ); + vp = alq->aq_vp; td = curthread; totlen = 0; - iov = 0; + iov = 1; - alstart = ale = alq->aq_entvalid; - alq->aq_entvalid = NULL; - bzero(&aiov, sizeof(aiov)); bzero(&auio, sizeof(auio)); - do { - if (aiov[iov].iov_base == NULL) - aiov[iov].iov_base = ale->ae_data; - aiov[iov].iov_len += alq->aq_entlen; - totlen += alq->aq_entlen; - /* Check to see if we're wrapping the buffer */ - if (ale->ae_data + alq->aq_entlen != ale->ae_next->ae_data) - iov++; - ale->ae_flags &= ~AE_VALID; - ale = ale->ae_next; - } while (ale->ae_flags & AE_VALID); + /* Start the write from the location of our buffer tail pointer. */ + aiov[0].iov_base = alq->aq_entbuf + alq->aq_writetail; + if (alq->aq_writetail < alq->aq_writehead) { + /* Buffer not wrapped */ + totlen = aiov[0].iov_len = alq->aq_writehead - alq->aq_writetail; + } else if (alq->aq_writehead == 0) { + /* Buffer not wrapped (special case to avoid an empty iov) */ + totlen = aiov[0].iov_len = alq->aq_buflen - alq->aq_writetail;; + } else { + /* + * Buffer wrapped, requires 2 aiov entries: + * - first is from writetail to end of buffer + * - second is from start of buffer to writehead + */ + aiov[0].iov_len = alq->aq_buflen - alq->aq_writetail - + alq->aq_wrapearly; + iov++; + aiov[1].iov_base = alq->aq_entbuf; + aiov[1].iov_len = alq->aq_writehead; + totlen = aiov[0].iov_len + aiov[1].iov_len; + } + + wrapearly = alq->aq_wrapearly; alq->aq_flags |= AQ_FLUSHING; ALQ_UNLOCK(alq); - if (iov == 2 || aiov[iov].iov_base == NULL) - iov--; - auio.uio_iov = &aiov[0]; auio.uio_offset = 0; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; - auio.uio_iovcnt = iov + 1; + auio.uio_iovcnt = iov; auio.uio_resid = totlen; auio.uio_td = td; @@ -291,6 +344,7 @@ vfslocked = VFS_LOCK_GIANT(vp->v_mount); vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + /* * XXX: VOP_WRITE error checks are ignored. */ @@ -305,9 +359,30 @@ ALQ_LOCK(alq); alq->aq_flags &= ~AQ_FLUSHING; - if (alq->aq_entfree == NULL) - alq->aq_entfree = alstart; + /* Adjust writetail as required, taking into account wrapping. */ + alq->aq_writetail = (alq->aq_writetail + totlen + wrapearly) % + alq->aq_buflen; + alq->aq_freebytes += totlen + wrapearly; + /* + * If we just flushed part of the buffer which wrapped, reset the + * wrapearly indicator. + */ + if (wrapearly) + alq->aq_wrapearly = 0; + + /* + * If we just flushed the buffer completely, + * reset indexes to 0 to minimise buffer wraps. + * This is also required to ensure alq_getn() can't wedge itself. + */ + if (!ALQ_HAS_PENDING_DATA(alq)) + alq->aq_writehead = alq->aq_writetail = 0; + + KASSERT((alq->aq_writetail >= 0 && alq->aq_writetail < alq->aq_buflen), + ("%s: aq_writetail < 0 || aq_writetail >= aq_buflen", __func__) + ); + if (alq->aq_flags & AQ_WANTED) { alq->aq_flags &= ~AQ_WANTED; return (1); @@ -337,14 +412,14 @@ { struct thread *td; struct nameidata nd; - struct ale *ale; - struct ale *alp; struct alq *alq; - char *bufp; int flags; int error; - int i, vfslocked; + int vfslocked; + KASSERT((size > 0), ("%s: size <= 0", __func__)); + KASSERT((count >= 0), ("%s: count < 0", __func__)); + *alqp = NULL; td = curthread; @@ -357,37 +432,33 @@ vfslocked = NDHASGIANT(&nd); NDFREE(&nd, NDF_ONLY_PNBUF); - /* We just unlock so we hold a reference */ + /* We just unlock so we hold a reference. */ VOP_UNLOCK(nd.ni_vp, 0); VFS_UNLOCK_GIANT(vfslocked); alq = malloc(sizeof(*alq), M_ALD, M_WAITOK|M_ZERO); - alq->aq_entbuf = malloc(count * size, M_ALD, M_WAITOK|M_ZERO); - alq->aq_first = malloc(sizeof(*ale) * count, M_ALD, M_WAITOK|M_ZERO); alq->aq_vp = nd.ni_vp; alq->aq_cred = crhold(cred); - alq->aq_entmax = count; - alq->aq_entlen = size; - alq->aq_entfree = alq->aq_first; mtx_init(&alq->aq_mtx, "ALD Queue", NULL, MTX_SPIN|MTX_QUIET); - bufp = alq->aq_entbuf; - ale = alq->aq_first; - alp = NULL; - - /* Match up entries with buffers */ - for (i = 0; i < count; i++) { - if (alp) - alp->ae_next = ale; - ale->ae_data = bufp; - alp = ale; - ale++; - bufp += size; + if (count > 0) { + /* Fixed length messages. */ + alq->aq_buflen = size * count; + alq->aq_entmax = count; + alq->aq_entlen = size; + } else { + /* Variable length messages. */ + alq->aq_buflen = size; + alq->aq_entmax = 0; + alq->aq_entlen = 0; } - alp->ae_next = alq->aq_first; + alq->aq_freebytes = alq->aq_buflen; + alq->aq_entbuf = malloc(alq->aq_buflen, M_ALD, M_WAITOK|M_ZERO); + alq->aq_writehead = alq->aq_writetail = 0; + if ((error = ald_add(alq)) != 0) return (error); *alqp = alq; @@ -400,67 +471,240 @@ * wait or return an error depending on the value of waitok. */ int -alq_write(struct alq *alq, void *data, int waitok) +alq_write(struct alq *alq, void *data, int flags) { - struct ale *ale; + /* Should only be called in fixed length message (legacy) mode. */ + KASSERT((alq->aq_entmax > 0 && alq->aq_entlen > 0), + ("%s: fixed length write on variable length queue", __func__) + ); + return (alq_writen(alq, data, alq->aq_entlen, flags)); +} - if ((ale = alq_get(alq, waitok)) == NULL) +int +alq_writen(struct alq *alq, void *data, int len, int flags) +{ + int activate = 0; + int copy = len; + + KASSERT((len > 0 && len <= alq->aq_buflen), + ("%s: len <= 0 || len > aq_buflen", __func__) + ); + + ALQ_LOCK(alq); + + /* + * If the message is larger than our underlying buffer or + * there is not enough free space in our underlying buffer + * to accept the message and the user can't wait, return. + */ + if ((len > alq->aq_buflen) || + ((flags & ALQ_NOWAIT) && (alq->aq_freebytes < len))) { + ALQ_UNLOCK(alq); return (EWOULDBLOCK); + } - bcopy(data, ale->ae_data, alq->aq_entlen); - alq_post(alq, ale); + /* + * ALQ_WAITOK or alq->aq_freebytes > len, either spin until + * we have enough free bytes (former) or skip (latter). + */ + while (alq->aq_freebytes < len && (alq->aq_flags & AQ_SHUTDOWN) == 0) { + alq->aq_flags |= AQ_WANTED; + msleep_spin(alq, &alq->aq_mtx, "alqwriten", 0); + } + /* + * We need to serialise wakups to ensure records remain in order... + * Therefore, wakeup the next thread in the queue waiting for + * alq resources to be available. + * (technically this is only required if we actually entered the above + * while loop) + */ + wakeup_one(alq); + + /* Bail if we're shutting down. */ + if (alq->aq_flags & AQ_SHUTDOWN) { + ALQ_UNLOCK(alq); + return (EWOULDBLOCK); + } + + /* + * If we need to wrap the buffer to accommodate the write, + * we'll need 2 calls to bcopy. + */ + if ((alq->aq_buflen - alq->aq_writehead) < len) + copy = alq->aq_buflen - alq->aq_writehead; + + /* Copy message (or part thereof if wrap required) to the buffer. */ + bcopy(data, alq->aq_entbuf + alq->aq_writehead, copy); + alq->aq_writehead += copy; + + if (alq->aq_writehead >= alq->aq_buflen) { + KASSERT((alq->aq_writehead == alq->aq_buflen), + ("alq->aq_writehead (%d) > alq->aq_buflen (%d)", + alq->aq_writehead, + alq->aq_buflen) + ); + alq->aq_writehead = 0; + } + + if (copy != len) { + /* + * Wrap the buffer by copying the remainder of our message + * to the start of the buffer and resetting aq_writehead. + */ + bcopy(((uint8_t *)data)+copy, alq->aq_entbuf, len - copy); + alq->aq_writehead = len - copy; + } + + KASSERT((alq->aq_writehead >= 0 && alq->aq_writehead < alq->aq_buflen), + ("%s: aq_writehead < 0 || aq_writehead >= aq_buflen", __func__) + ); + + alq->aq_freebytes -= len; + + if (((alq->aq_flags & AQ_ACTIVE) == 0) && + ((flags & ALQ_NOACTIVATE) == 0)) { + alq->aq_flags |= AQ_ACTIVE; + activate = 1; + } + + ALQ_UNLOCK(alq); + + if (activate) { + ALD_LOCK(); + ald_activate(alq); + ALD_UNLOCK(); + } + return (0); } struct ale * -alq_get(struct alq *alq, int waitok) +alq_get(struct alq *alq, int flags) { - struct ale *ale; - struct ale *aln; + /* Should only be called in fixed length message (legacy) mode. */ + KASSERT((alq->aq_entmax > 0 && alq->aq_entlen > 0), + ("%s: fixed length get on variable length queue", __func__) + ); + return (alq_getn(alq, alq->aq_entlen, flags)); +} - ale = NULL; +struct ale * +alq_getn(struct alq *alq, int len, int flags) +{ + int contigbytes; + KASSERT((len > 0 && len <= alq->aq_buflen), + ("%s: len <= 0 || len > alq->aq_buflen", __func__) + ); + ALQ_LOCK(alq); - /* Loop until we get an entry or we're shutting down */ - while ((alq->aq_flags & AQ_SHUTDOWN) == 0 && - (ale = alq->aq_entfree) == NULL && - (waitok & ALQ_WAITOK)) { + /* + * Determine the number of free contiguous bytes. + * We ensure elsewhere that if aq_writehead == aq_writetail because + * the buffer is empty, they will both be set to 0 and therefore + * aq_freebytes == aq_buflen and is fully contiguous. + * If they are equal and the buffer is not empty, aq_freebytes will + * be 0 indicating the buffer is full. + */ + if (alq->aq_writehead <= alq->aq_writetail) + contigbytes = alq->aq_freebytes; + else { + contigbytes = alq->aq_buflen - alq->aq_writehead; + + if (contigbytes < len) { + /* + * Insufficient space at end of buffer to handle a + * contiguous write. Wrap early if there's space at + * the beginning. This will leave a hole at the end + * of the buffer which we will have to skip over when + * flushing the buffer to disk. + */ + if (alq->aq_writetail >= len || flags & ALQ_WAITOK) { + /* Keep track of # bytes left blank. */ + alq->aq_wrapearly = contigbytes; + /* Do the wrap and adjust counters. */ + contigbytes = alq->aq_freebytes = + alq->aq_writetail; + alq->aq_writehead = 0; + } + } + } + + /* + * If the message is larger than our underlying buffer or + * there is not enough free contiguous space in our underlying buffer + * to accept the message and the user can't wait, return. + */ + if ((len > alq->aq_buflen) || + ((flags & ALQ_NOWAIT) && (contigbytes < len))) { + ALQ_UNLOCK(alq); + return (NULL); + } + + /* + * ALQ_WAITOK or contigbytes >= len, + * either spin until we have enough free contiguous bytes (former) + * or skip (latter). + */ + while (contigbytes < len && (alq->aq_flags & AQ_SHUTDOWN) == 0) { alq->aq_flags |= AQ_WANTED; - msleep_spin(alq, &alq->aq_mtx, "alqget", 0); + msleep_spin(alq, &alq->aq_mtx, "alqgetn", 0); + if (alq->aq_writehead <= alq->aq_writetail) + contigbytes = alq->aq_freebytes; + else + contigbytes = alq->aq_buflen - alq->aq_writehead; } - if (ale != NULL) { - aln = ale->ae_next; - if ((aln->ae_flags & AE_VALID) == 0) - alq->aq_entfree = aln; - else - alq->aq_entfree = NULL; - } else + /* + * We need to serialise wakups to ensure records remain in order. + * Therefore, wakeup the next thread in the queue waiting for + * alq resources to be available. + * (technically this is only required if we actually entered the above + * while loop) + */ + wakeup_one(alq); + + /* Bail if we're shutting down. */ + if (alq->aq_flags & AQ_SHUTDOWN) { ALQ_UNLOCK(alq); + return (NULL); + } + /* + * If we are here, we have a contiguous number of bytes >= len + * available in our buffer starting at aq_writehead. + */ + alq->aq_getpost.ae_data = alq->aq_entbuf + alq->aq_writehead; + alq->aq_writehead += len; + alq->aq_freebytes -= len; - return (ale); + /* Wrap aq_writehead if we've filled to the end of the buffer. */ + if (alq->aq_writehead == alq->aq_buflen) + alq->aq_writehead = 0; + + KASSERT((alq->aq_writehead >= 0 && alq->aq_writehead < alq->aq_buflen), + ("%s: aq_writehead < 0 || aq_writehead >= aq_buflen", __func__) + ); + + return (&alq->aq_getpost); } void -alq_post(struct alq *alq, struct ale *ale) +alq_post(struct alq *alq, struct ale *ale, int flags) { int activate; - ale->ae_flags |= AE_VALID; - - if (alq->aq_entvalid == NULL) - alq->aq_entvalid = ale; - - if ((alq->aq_flags & AQ_ACTIVE) == 0) { + if (((alq->aq_flags & AQ_ACTIVE) == 0) && + ((flags & ALQ_NOACTIVATE) == 0)) { alq->aq_flags |= AQ_ACTIVE; activate = 1; } else activate = 0; ALQ_UNLOCK(alq); + if (activate) { ALD_LOCK(); ald_activate(alq); @@ -475,16 +719,23 @@ ALD_LOCK(); ALQ_LOCK(alq); - if (alq->aq_flags & AQ_ACTIVE) { + + if (alq->aq_flags & AQ_ACTIVE) ald_deactivate(alq); - ALD_UNLOCK(); + + ALD_UNLOCK(); + + /* + * Pull the lever iff there is data to flush and we're + * not already in the middle of a flush operation. + */ + if (ALQ_HAS_PENDING_DATA(alq) && (alq->aq_flags & AQ_FLUSHING) == 0) needwakeup = alq_doio(alq); - } else - ALD_UNLOCK(); + ALQ_UNLOCK(alq); if (needwakeup) - wakeup(alq); + wakeup_one(alq); } /* @@ -506,7 +757,49 @@ alq_shutdown(alq); mtx_destroy(&alq->aq_mtx); - free(alq->aq_first, M_ALD); free(alq->aq_entbuf, M_ALD); free(alq, M_ALD); } + +static int alq_load_handler(module_t mod, int what, void *arg) +{ + int ret = 0; + + switch(what) { + case MOD_LOAD: + case MOD_UNLOAD: + case MOD_SHUTDOWN: + break; + + case MOD_QUIESCE: + ALD_LOCK(); + /* only allow unload if there are no open queues */ + if (LIST_FIRST(&ald_queues) == NULL) { + ald_shutingdown = 1; + ALD_UNLOCK(); + ald_shutdown(NULL, 0); + mtx_destroy(&ald_mtx); + } else { + ALD_UNLOCK(); + ret = EBUSY; + } + break; + + default: + ret = EINVAL; + break; + } + + return (ret); +} + +/* basic module data */ +static moduledata_t alq_mod = +{ + "alq", + alq_load_handler, /* execution entry point for the module */ + NULL +}; + +DECLARE_MODULE(alq, alq_mod, SI_SUB_SMP, SI_ORDER_ANY); +MODULE_VERSION(alq, 1); Index: sys/sys/alq.h =================================================================== --- sys/sys/alq.h (revision 195780) +++ sys/sys/alq.h (revision 197452) @@ -1,5 +1,6 @@ /*- * Copyright (c) 2002, Jeffrey Roberson + * Copyright (c) 2008-2009, Lawrence Stewart * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -26,7 +27,7 @@ * $FreeBSD$ * */ -#ifndef _SYS_ALQ_H_ +#ifndef _SYS_ALQ_H_ #define _SYS_ALQ_H_ /* @@ -41,17 +42,15 @@ * Async. Logging Entry */ struct ale { - struct ale *ae_next; /* Next Entry */ - char *ae_data; /* Entry buffer */ - int ae_flags; /* Entry flags */ + struct ale *ae_next; /* Unused, compat. */ + char *ae_data; /* Write ptr. */ + int ae_flags; /* Unused, compat. */ }; -#define AE_VALID 0x0001 /* Entry has valid data */ - - -/* waitok options */ +/* flags options */ #define ALQ_NOWAIT 0x0001 #define ALQ_WAITOK 0x0002 +#define ALQ_NOACTIVATE 0x0004 /* Suggested mode for file creation. */ #define ALQ_DEFAULT_CMODE 0600 @@ -64,7 +63,8 @@ * file The filename to open for logging. * cred Credential to authorize open and I/O with. * cmode Creation mode for file, if new. - * size The size of each entry in the queue. + * size The size of each entry in the queue, or the size of the queue + * itself in bytes if count=0 (variable length queues). * count The number of items in the buffer, this should be large enough * to store items over the period of a disk write. * Returns: @@ -88,7 +88,8 @@ * The system is shutting down. * 0 on success. */ -int alq_write(struct alq *alq, void *data, int waitok); +int alq_write(struct alq *alq, void *data, int flags); +int alq_writen(struct alq *alq, void *data, int len, int flags); /* * alq_flush: Flush the queue out to disk @@ -115,13 +116,14 @@ * * This leaves the queue locked until a subsequent alq_post. */ -struct ale *alq_get(struct alq *alq, int waitok); +struct ale *alq_get(struct alq *alq, int flags); +struct ale *alq_getn(struct alq *alq, int len, int flags); /* * alq_post: Schedule the ale retrieved by alq_get for writing. * alq The queue to post the entry to. * ale An asynch logging entry returned by alq_get. */ -void alq_post(struct alq *, struct ale *); +void alq_post(struct alq *alq, struct ale *ale, int flags); #endif /* _SYS_ALQ_H_ */