1 /*-
2 * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/systm.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38
39 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
40 #include <dev/sound/pcm/sound.h>
41 #endif
42
43 #include <dev/sound/clone.h>
44
45 /*
46 * So here we go again, another clonedevs manager. Unlike default clonedevs,
47 * this clone manager is designed to withstand various abusive behavior
48 * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
49 * after reaching certain expiration threshold, aggressive garbage collector,
50 * transparent device allocator and concurrency handling across multiple
51 * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
52 * we don't have much clues whether the caller wants a real open() or simply
53 * making fun of us with things like stat(), mtime() etc. Assuming that:
54 * 1) Time window between dev_clone EH <-> real open() should be small
55 * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
56 * operation, we can decide whether a new cdev must be created, old
57 * (expired) cdev can be reused or an existing cdev can be shared.
58 *
59 * Most of the operations and logics are generic enough and can be applied
60 * on other places (such as if_tap, snp, etc). Perhaps this can be
61 * rearranged to complement clone_*(). However, due to this still being
62 * specific to the sound driver (and as a proof of concept on how it can be
63 * done), si_drv2 is used to keep the pointer of the clone list entry to
64 * avoid expensive lookup.
65 */
66
67 /* clone entry */
68 struct snd_clone_entry {
69 TAILQ_ENTRY(snd_clone_entry) link;
70 struct snd_clone *parent;
71 struct cdev *devt;
72 struct timespec tsp;
73 uint32_t flags;
74 pid_t pid;
75 int unit;
76 };
77
78 /* clone manager */
79 struct snd_clone {
80 TAILQ_HEAD(link_head, snd_clone_entry) head;
81 #ifdef SND_DIAGNOSTIC
82 struct mtx *lock;
83 #endif
84 struct timespec tsp;
85 int refcount;
86 int size;
87 int typemask;
88 int maxunit;
89 int deadline;
90 uint32_t flags;
91 };
92
93 #ifdef SND_DIAGNOSTIC
94 #define SND_CLONE_LOCKASSERT(x) do { \
95 if ((x)->lock == NULL) \
96 panic("%s(): NULL mutex!", __func__); \
97 if (mtx_owned((x)->lock) == 0) \
98 panic("%s(): mutex not owned!", __func__); \
99 } while(0)
100 #define SND_CLONE_ASSERT(x, y) do { \
101 if (!(x)) \
102 panic y; \
103 } while(0)
104 #else
105 #define SND_CLONE_LOCKASSERT(...)
106 #define SND_CLONE_ASSERT(x...) KASSERT(x)
107 #endif
108
109 /*
110 * Shamely ripped from vfs_subr.c
111 * We need at least 1/HZ precision as default timestamping.
112 */
113 enum { SND_TSP_SEC, SND_TSP_HZ, SND_TSP_USEC, SND_TSP_NSEC };
114
115 static int snd_timestamp_precision = SND_TSP_HZ;
116 TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision);
117
118 void
119 snd_timestamp(struct timespec *tsp)
120 {
121 struct timeval tv;
122
123 switch (snd_timestamp_precision) {
124 case SND_TSP_SEC:
125 tsp->tv_sec = time_second;
126 tsp->tv_nsec = 0;
127 break;
128 case SND_TSP_HZ:
129 getnanouptime(tsp);
130 break;
131 case SND_TSP_USEC:
132 microuptime(&tv);
133 TIMEVAL_TO_TIMESPEC(&tv, tsp);
134 break;
135 case SND_TSP_NSEC:
136 nanouptime(tsp);
137 break;
138 default:
139 snd_timestamp_precision = SND_TSP_HZ;
140 snd_timestamp(tsp);
141 break;
142 }
143 }
144
145 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
146 static int
147 sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS)
148 {
149 int err, val;
150
151 val = snd_timestamp_precision;
152 err = sysctl_handle_int(oidp, &val, sizeof(val), req);
153 if (err == 0 && req->newptr != NULL) {
154 switch (val) {
155 case SND_TSP_SEC:
156 case SND_TSP_HZ:
157 case SND_TSP_USEC:
158 case SND_TSP_NSEC:
159 snd_timestamp_precision = val;
160 break;
161 default:
162 break;
163 }
164 }
165
166 return (err);
167 }
168 SYSCTL_PROC(_hw_snd, OID_AUTO, timestamp_precision, CTLTYPE_INT | CTLFLAG_RW,
169 0, sizeof(int), sysctl_hw_snd_timestamp_precision, "I",
170 "timestamp precision (0=s 1=hz 2=us 3=ns)");
171 #endif
172
173 /*
174 * snd_clone_create() : Return opaque allocated clone manager. Mutex is not
175 * a mandatory requirement if the caller can guarantee safety against
176 * concurrent access.
177 */
178 struct snd_clone *
179 snd_clone_create(
180 #ifdef SND_DIAGNOSTIC
181 struct mtx *lock,
182 #endif
183 int typemask, int maxunit, int deadline, uint32_t flags)
184 {
185 struct snd_clone *c;
186
187 SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
188 ("invalid typemask: 0x%08x", typemask));
189 SND_CLONE_ASSERT(maxunit == -1 ||
190 !(maxunit & ~(~typemask & SND_CLONE_MAXUNIT)),
191 ("maxunit overflow: typemask=0x%08x maxunit=%d",
192 typemask, maxunit));
193 SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
194 ("invalid clone flags=0x%08x", flags));
195
196 c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
197 #ifdef SND_DIAGNOSTIC
198 c->lock = lock;
199 #endif
200 c->refcount = 0;
201 c->size = 0;
202 c->typemask = typemask;
203 c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
204 maxunit;
205 c->deadline = deadline;
206 c->flags = flags;
207 snd_timestamp(&c->tsp);
208 TAILQ_INIT(&c->head);
209
210 return (c);
211 }
212
213 /*
214 * Getters / Setters. Not worth explaining :)
215 */
216 int
217 snd_clone_getsize(struct snd_clone *c)
218 {
219 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
220 SND_CLONE_LOCKASSERT(c);
221
222 return (c->size);
223 }
224
225 /* In unit world, everything is offset by -1 */
226 int
227 snd_clone_getmaxunit(struct snd_clone *c)
228 {
229 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
230 SND_CLONE_LOCKASSERT(c);
231
232 return (c->maxunit);
233 }
234
235 int
236 snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
237 {
238 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
239 SND_CLONE_LOCKASSERT(c);
240 SND_CLONE_ASSERT(maxunit == -1 ||
241 !(maxunit & ~(~c->typemask & SND_CLONE_MAXUNIT)),
242 ("maxunit overflow: typemask=0x%08x maxunit=%d",
243 c->typemask, maxunit));
244
245 c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
246 maxunit;
247
248 return (c->maxunit);
249 }
250
251 int
252 snd_clone_getdeadline(struct snd_clone *c)
253 {
254 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
255 SND_CLONE_LOCKASSERT(c);
256
257 return (c->deadline);
258 }
259
260 int
261 snd_clone_setdeadline(struct snd_clone *c, int deadline)
262 {
263 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
264 SND_CLONE_LOCKASSERT(c);
265
266 c->deadline = deadline;
267
268 return (c->deadline);
269 }
270
271 int
272 snd_clone_gettime(struct snd_clone *c, struct timespec *tsp)
273 {
274 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
275 SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
276 SND_CLONE_LOCKASSERT(c);
277
278 *tsp = c->tsp;
279
280 return (0);
281 }
282
283 uint32_t
284 snd_clone_getflags(struct snd_clone *c)
285 {
286 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
287 SND_CLONE_LOCKASSERT(c);
288
289 return (c->flags);
290 }
291
292 uint32_t
293 snd_clone_setflags(struct snd_clone *c, uint32_t flags)
294 {
295 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
296 SND_CLONE_LOCKASSERT(c);
297 SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
298 ("invalid clone flags=0x%08x", flags));
299
300 c->flags = flags;
301
302 return (c->flags);
303 }
304
305 int
306 snd_clone_getdevtime(struct cdev *dev, struct timespec *tsp)
307 {
308 struct snd_clone_entry *ce;
309
310 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
311 SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
312
313 ce = dev->si_drv2;
314 if (ce == NULL)
315 return (ENODEV);
316
317 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
318 SND_CLONE_LOCKASSERT(ce->parent);
319
320 *tsp = ce->tsp;
321
322 return (0);
323 }
324
325 uint32_t
326 snd_clone_getdevflags(struct cdev *dev)
327 {
328 struct snd_clone_entry *ce;
329
330 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
331
332 ce = dev->si_drv2;
333 if (ce == NULL)
334 return (0xffffffff);
335
336 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
337 SND_CLONE_LOCKASSERT(ce->parent);
338
339 return (ce->flags);
340 }
341
342 uint32_t
343 snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
344 {
345 struct snd_clone_entry *ce;
346
347 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
348 SND_CLONE_ASSERT(!(flags & ~SND_CLONE_DEVMASK),
349 ("invalid clone dev flags=0x%08x", flags));
350
351 ce = dev->si_drv2;
352 if (ce == NULL)
353 return (0xffffffff);
354
355 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
356 SND_CLONE_LOCKASSERT(ce->parent);
357
358 ce->flags = flags;
359
360 return (ce->flags);
361 }
362
363 /* Elapsed time conversion to ms */
364 #define SND_CLONE_ELAPSED(x, y) \
365 ((((x)->tv_sec - (y)->tv_sec) * 1000) + \
366 (((y)->tv_nsec > (x)->tv_nsec) ? \
367 (((1000000000L + (x)->tv_nsec - \
368 (y)->tv_nsec) / 1000000) - 1000) : \
369 (((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
370
371 #define SND_CLONE_EXPIRED(x, y, z) \
372 ((x)->deadline < 1 || \
373 ((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) || \
374 SND_CLONE_ELAPSED(y, z) > (x)->deadline)
375
376 /*
377 * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
378 * clone.h for explanations on GC settings.
379 */
380 int
381 snd_clone_gc(struct snd_clone *c)
382 {
383 struct snd_clone_entry *ce, *tce;
384 struct timespec now;
385 int size, rsize, *counter;
386
387 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
388 SND_CLONE_LOCKASSERT(c);
389
390 if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
391 return (0);
392
393 snd_timestamp(&now);
394
395 /*
396 * Bail out if the last clone handler was invoked below the deadline
397 * threshold.
398 */
399 if ((c->flags & SND_CLONE_GC_EXPIRED) &&
400 !SND_CLONE_EXPIRED(c, &now, &c->tsp))
401 return (0);
402
403 size = c->size;
404 rsize = c->size;
405 counter = (c->flags & SND_CLONE_GC_REVOKE) ? &rsize : &c->size;
406
407 /*
408 * Visit each object in reverse order. If the object is still being
409 * referenced by a valid open(), skip it. Look for expired objects
410 * and either revoke its clone invocation status or mercilessly
411 * throw it away.
412 */
413 TAILQ_FOREACH_REVERSE_SAFE(ce, &c->head, link_head, link, tce) {
414 if (!(ce->flags & SND_CLONE_BUSY) &&
415 (!(ce->flags & SND_CLONE_INVOKE) ||
416 SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
417 if (c->flags & SND_CLONE_GC_REVOKE)
418 ce->flags &= ~SND_CLONE_INVOKE;
419 else {
420 TAILQ_REMOVE(&c->head, ce, link);
421 destroy_dev(ce->devt);
422 free(ce, M_DEVBUF);
423 }
424 (*counter)--;
425 }
426 }
427
428 /* return total pruned objects */
429 return (size - *counter);
430 }
431
432 void
433 snd_clone_destroy(struct snd_clone *c)
434 {
435 struct snd_clone_entry *ce;
436
437 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
438 SND_CLONE_ASSERT(c->refcount == 0, ("refcount > 0"));
439 SND_CLONE_LOCKASSERT(c);
440
441 while (!TAILQ_EMPTY(&c->head)) {
442 ce = TAILQ_FIRST(&c->head);
443 TAILQ_REMOVE(&c->head, ce, link);
444 if (ce->devt != NULL)
445 destroy_dev(ce->devt);
446 free(ce, M_DEVBUF);
447 }
448
449 free(c, M_DEVBUF);
450 }
451
452 /*
453 * snd_clone_acquire() : The vital part of concurrency management. Must be
454 * called somewhere at the beginning of open() handler. ENODEV is not really
455 * fatal since it just tell the caller that this is not cloned stuff.
456 * EBUSY is *real*, don't forget that!
457 */
458 int
459 snd_clone_acquire(struct cdev *dev)
460 {
461 struct snd_clone_entry *ce;
462
463 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
464
465 ce = dev->si_drv2;
466 if (ce == NULL)
467 return (ENODEV);
468
469 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
470 SND_CLONE_LOCKASSERT(ce->parent);
471
472 ce->flags &= ~SND_CLONE_INVOKE;
473
474 if (ce->flags & SND_CLONE_BUSY)
475 return (EBUSY);
476
477 ce->flags |= SND_CLONE_BUSY;
478
479 return (0);
480 }
481
482 /*
483 * snd_clone_release() : Release busy status. Must be called somewhere at
484 * the end of close() handler, or somewhere after fail open().
485 */
486 int
487 snd_clone_release(struct cdev *dev)
488 {
489 struct snd_clone_entry *ce;
490
491 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
492
493 ce = dev->si_drv2;
494 if (ce == NULL)
495 return (ENODEV);
496
497 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
498 SND_CLONE_LOCKASSERT(ce->parent);
499
500 ce->flags &= ~SND_CLONE_INVOKE;
501
502 if (!(ce->flags & SND_CLONE_BUSY))
503 return (EBADF);
504
505 ce->flags &= ~SND_CLONE_BUSY;
506
507 return (0);
508 }
509
510 /*
511 * snd_clone_ref/unref() : Garbage collector reference counter. To make
512 * garbage collector run automatically, the sequence must be something like
513 * this (both in open() and close() handlers):
514 *
515 * open() - 1) snd_clone_acquire()
516 * 2) .... check check ... if failed, snd_clone_release()
517 * 3) Success. Call snd_clone_ref()
518 *
519 * close() - 1) .... check check check ....
520 * 2) Success. snd_clone_release()
521 * 3) snd_clone_unref() . Garbage collector will run at this point
522 * if this is the last referenced object.
523 */
524 int
525 snd_clone_ref(struct cdev *dev)
526 {
527 struct snd_clone_entry *ce;
528 struct snd_clone *c;
529
530 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
531
532 ce = dev->si_drv2;
533 if (ce == NULL)
534 return (0);
535
536 c = ce->parent;
537 SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
538 SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
539 SND_CLONE_LOCKASSERT(c);
540
541 return (++c->refcount);
542 }
543
544 int
545 snd_clone_unref(struct cdev *dev)
546 {
547 struct snd_clone_entry *ce;
548 struct snd_clone *c;
549
550 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
551
552 ce = dev->si_drv2;
553 if (ce == NULL)
554 return (0);
555
556 c = ce->parent;
557 SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
558 SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
559 SND_CLONE_LOCKASSERT(c);
560
561 c->refcount--;
562
563 /*
564 * Run automatic garbage collector, if needed.
565 */
566 if ((c->flags & SND_CLONE_GC_UNREF) &&
567 (!(c->flags & SND_CLONE_GC_LASTREF) ||
568 (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
569 (void)snd_clone_gc(c);
570
571 return (c->refcount);
572 }
573
574 void
575 snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
576 {
577 SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
578 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
579 SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
580 SND_CLONE_ASSERT((ce->flags & SND_CLONE_ALLOC) == SND_CLONE_ALLOC,
581 ("invalid clone alloc flags=0x%08x", ce->flags));
582 SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
583 SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
584 ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
585 ce->unit, dev2unit(dev)));
586
587 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
588 SND_CLONE_LOCKASSERT(ce->parent);
589
590 dev->si_drv2 = ce;
591 ce->devt = dev;
592 ce->flags &= ~SND_CLONE_ALLOC;
593 ce->flags |= SND_CLONE_INVOKE;
594 }
595
596 struct snd_clone_entry *
597 snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
598 {
599 struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
600 struct timespec now;
601 int cunit, allocunit;
602 pid_t curpid;
603
604 ce = NULL;
605 after = NULL;
606 bce = NULL; /* "b"usy candidate */
607 cce = NULL; /* "c"urthread/proc candidate */
608 nce = NULL; /* "n"ull, totally unbusy candidate */
609 tce = NULL; /* Last "t"ry candidate */
610 cunit = 0;
611
612 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
613 SND_CLONE_LOCKASSERT(c);
614 SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
615 SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
616 ("invalid tmask: typemask=0x%08x tmask=0x%08x",
617 c->typemask, tmask));
618 SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
619 SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
620 ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
621 c->typemask, tmask, *unit));
622
623 if (*unit != -1 && *unit > c->maxunit)
624 return (NULL);
625
626 allocunit = (*unit == -1) ? 0 : *unit;
627
628 snd_timestamp(&now);
629
630 curpid = curthread->td_proc->p_pid;
631
632 TAILQ_FOREACH(ce, &c->head, link) {
633 /*
634 * Sort incrementally according to device type.
635 */
636 if (tmask > (ce->unit & c->typemask)) {
637 if (cunit == 0)
638 after = ce;
639 continue;
640 } else if (tmask < (ce->unit & c->typemask))
641 break;
642
643 /*
644 * Shoot.. this is where the grumpiness begin. Just
645 * return immediately.
646 */
647 if (*unit != -1 && *unit == (ce->unit & ~tmask))
648 goto snd_clone_alloc_out;
649
650 cunit++;
651 /*
652 * Simmilar device type. Sort incrementally according
653 * to allocation unit. While here, look for free slot
654 * and possible collision for new / future allocation.
655 */
656 if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
657 allocunit++;
658 if ((ce->unit & ~tmask) < allocunit)
659 after = ce;
660 /*
661 * Clone logic:
662 * 1. Look for non busy, but keep track of the best
663 * possible busy cdev.
664 * 2. Look for the best (oldest referenced) entry that is
665 * in a same process / thread.
666 * 3. Look for the best (oldest referenced), absolute free
667 * entry.
668 * 4. Lastly, look for the best (oldest referenced)
669 * any entries that doesn't fit with anything above.
670 */
671 if (ce->flags & SND_CLONE_BUSY) {
672 if (ce->devt != NULL && (bce == NULL ||
673 timespeccmp(&ce->tsp, &bce->tsp, <)))
674 bce = ce;
675 continue;
676 }
677 if (ce->pid == curpid &&
678 (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
679 cce = ce;
680 else if (!(ce->flags & SND_CLONE_INVOKE) &&
681 (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
682 nce = ce;
683 else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
684 tce = ce;
685 }
686 if (*unit != -1)
687 goto snd_clone_alloc_new;
688 else if (cce != NULL) {
689 /* Same proc entry found, go for it */
690 ce = cce;
691 goto snd_clone_alloc_out;
692 } else if (nce != NULL) {
693 /* Next, try absolute free entry */
694 ce = nce;
695 goto snd_clone_alloc_out;
696 } else if (allocunit > c->maxunit) {
697 /*
698 * Maximum allowable unit reached. Try returning any
699 * available cdev and hope for the best. If the lookup is
700 * done for things like stat(), mtime() etc. , things should
701 * be ok. Otherwise, open() handler should do further checks
702 * and decide whether to return correct error code or not.
703 */
704 if (tce != NULL) {
705 ce = tce;
706 goto snd_clone_alloc_out;
707 } else if (bce != NULL) {
708 ce = bce;
709 goto snd_clone_alloc_out;
710 }
711 return (NULL);
712 }
713
714 snd_clone_alloc_new:
715 /*
716 * No free entries found, and we still haven't reached maximum
717 * allowable units. Allocate, setup a minimal unique entry with busy
718 * status so nobody will monkey on this new entry since we had to
719 * give up locking for further setup. Unit magic is set right here
720 * to avoid collision with other contesting handler.
721 */
722 ce = malloc(sizeof(*ce), M_DEVBUF, M_NOWAIT | M_ZERO);
723 if (ce == NULL) {
724 if (*unit != -1)
725 return (NULL);
726 /*
727 * We're being dense, ignorance is bliss,
728 * Super Regulatory Measure (TM).. TRY AGAIN!
729 */
730 if (tce != NULL) {
731 ce = tce;
732 goto snd_clone_alloc_out;
733 } else if (bce != NULL) {
734 ce = bce;
735 goto snd_clone_alloc_out;
736 }
737 return (NULL);
738 }
739 /* Setup new entry */
740 ce->parent = c;
741 ce->unit = tmask | allocunit;
742 ce->pid = curpid;
743 ce->tsp = now;
744 ce->flags |= SND_CLONE_ALLOC;
745 if (after != NULL) {
746 TAILQ_INSERT_AFTER(&c->head, after, ce, link);
747 } else {
748 TAILQ_INSERT_HEAD(&c->head, ce, link);
749 }
750 c->size++;
751 c->tsp = now;
752 /*
753 * Save new allocation unit for caller which will be used
754 * by make_dev().
755 */
756 *unit = allocunit;
757
758 return (ce);
759
760 snd_clone_alloc_out:
761 /*
762 * Set, mark, timestamp the entry if this is a truly free entry.
763 * Leave busy entry alone.
764 */
765 if (!(ce->flags & SND_CLONE_BUSY)) {
766 ce->pid = curpid;
767 ce->tsp = now;
768 ce->flags |= SND_CLONE_INVOKE;
769 }
770 c->tsp = now;
771 *dev = ce->devt;
772
773 return (NULL);
774 }