diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c index fc468b0..b031289 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c +++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c @@ -61,8 +61,13 @@ #include #include #if !defined(sun) -#include #include +#include +#include +#include +#include +#include +#include #include #endif @@ -206,6 +211,10 @@ static void fasttrap_provider_free(fasttrap_provider_t *); static fasttrap_proc_t *fasttrap_proc_lookup(pid_t); static void fasttrap_proc_release(fasttrap_proc_t *); +#if !defined(sun) +static void fasttrap_thread_dtor(void *, struct thread *); +#endif + #define FASTTRAP_PROVS_INDEX(pid, name) \ ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask) @@ -213,6 +222,7 @@ static void fasttrap_proc_release(fasttrap_proc_t *); #if !defined(sun) static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU]; +static eventhandler_tag fasttrap_thread_dtor_tag; #endif static int @@ -289,6 +299,123 @@ fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc) #endif } +#if !defined(sun) +/* + * Obtain the userland address of a scratch space chunk. + */ +uintptr_t +fasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc) +{ + fasttrap_scrblock_t *scrblk; + fasttrap_scrspace_t *scrspc; + struct proc *p; + uintptr_t ret; + vm_offset_t offset; + int error, i; + + if (td->t_dtrace_sscr != NULL) { + /* If the thread already has scratch space, we're done. */ + scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; + return (scrspc->ftss_addr); + } + + if ((p = pfind(fprc->ftpc_pid)) == NULL) + return (0); + else + PROC_UNLOCK(p); + + mutex_enter(&fprc->ftpc_mtx); + if (LIST_EMPTY(&fprc->ftpc_fscr)) { + /* + * No scratch space is available, so we'll map a new scratch + * space block into the traced process' address space. + */ + error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &offset, + FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, + VM_PROT_ALL, 0); + if (error != KERN_SUCCESS) { + ret = 0; + goto done; + } + + scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK); + scrblk->ftsb_addr = offset; + LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next); + + /* + * Carve the block up into chunks and put them on the free list. + */ + for (i = 0; + i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) { + scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK); + scrspc->ftss_addr = offset + + i * FASTTRAP_SCRSPACE_SIZE; + LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, + ftss_next); + } + } + + /* + * Take the first scratch chunk off the free list, put it on the + * allocated list, and return its address. + */ + scrspc = LIST_FIRST(&fprc->ftpc_fscr); + LIST_REMOVE(scrspc, ftss_next); + LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next); + + /* + * This scratch space is reserved for use by td until the thread exits. + */ + td->t_dtrace_sscr = scrspc; + ret = scrspc->ftss_addr; + +done: + mutex_exit(&fprc->ftpc_mtx); + + return (ret); +} + +/* + * Return any allocated per-thread scratch space chunks back to the process' + * free list. + */ +static void +fasttrap_thread_dtor(void *arg __unused, struct thread *td) +{ + fasttrap_bucket_t *bucket; + fasttrap_proc_t *fprc; + fasttrap_scrspace_t *scrspc; + pid_t pid; + + pid = td->td_proc->p_pid; + bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; + fprc = NULL; + + if (td->t_dtrace_sscr == NULL) + return; + + /* Look up the fasttrap process handle for this process. */ + mutex_enter(&bucket->ftb_mtx); + for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { + if (fprc->ftpc_pid == pid) { + mutex_enter(&fprc->ftpc_mtx); + mutex_exit(&bucket->ftb_mtx); + break; + } + } + if (fprc == NULL) { + mutex_exit(&bucket->ftb_mtx); + return; + } + + scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; + LIST_REMOVE(scrspc, ftss_next); + + LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next); + mutex_exit(&fprc->ftpc_mtx); +} +#endif + /* * This function ensures that no threads are actively using the memory * associated with probes that were formerly live. @@ -449,6 +576,10 @@ fasttrap_pid_cleanup(void) static void fasttrap_fork(proc_t *p, proc_t *cp) { +#if !defined(sun) + fasttrap_scrblock_t *scrblk; + fasttrap_proc_t *fprc = NULL; +#endif pid_t ppid = p->p_pid; int i; @@ -534,9 +665,28 @@ fasttrap_fork(proc_t *p, proc_t *cp) * mid-fork. */ ASSERT(tp->ftt_proc->ftpc_acount != 0); +#if !defined(sun) + fprc = tp->ftt_proc; +#endif } } mutex_exit(&bucket->ftb_mtx); + +#if !defined(sun) + /* + * Unmap any scratch space inherited from the parent's address + * space. + */ + if (fprc != NULL) { + mutex_enter(&fprc->ftpc_mtx); + LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) { + vm_map_remove(&cp->p_vmspace->vm_map, + scrblk->ftsb_addr, + scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE); + } + mutex_exit(&fprc->ftpc_mtx); + } +#endif } #if defined(sun) @@ -557,12 +707,25 @@ fasttrap_fork(proc_t *p, proc_t *cp) static void fasttrap_exec_exit(proc_t *p) { +#if !defined(sun) + struct thread *td; +#endif + #if defined(sun) ASSERT(p == curproc); -#endif +#else PROC_LOCK_ASSERT(p, MA_OWNED); _PHOLD(p); + /* + * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr + * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it + * ourselves when a process exits. + */ + FOREACH_THREAD_IN_PROC(p, td) { + td->t_dtrace_sscr = NULL; + } PROC_UNLOCK(p); +#endif /* * We clean up the pid provider for this process here; user-land @@ -572,9 +735,9 @@ fasttrap_exec_exit(proc_t *p) #if !defined(sun) if (p->p_dtrace_helpers) dtrace_helpers_destroy(p); -#endif PROC_LOCK(p); _PRELE(p); +#endif } @@ -1367,6 +1530,12 @@ fasttrap_proc_release(fasttrap_proc_t *proc) fasttrap_bucket_t *bucket; fasttrap_proc_t *fprc, **fprcp; pid_t pid = proc->ftpc_pid; +#if !defined(sun) + fasttrap_scrblock_t *scrblk, *scrblktmp; + fasttrap_scrspace_t *scrspc, *scrspctmp; + struct proc *p; + struct thread *td; +#endif mutex_enter(&proc->ftpc_mtx); @@ -1406,6 +1575,26 @@ fasttrap_proc_release(fasttrap_proc_t *proc) mutex_exit(&bucket->ftb_mtx); +#if !defined(sun) + /* + * Free all structures used to manage per-thread scratch space. + */ + LIST_FOREACH_SAFE(scrblk, &fprc->ftpc_scrblks, ftsb_next, + scrblktmp) { + LIST_REMOVE(scrblk, ftsb_next); + free(scrblk, M_SOLARIS); + } + LIST_FOREACH_SAFE(scrspc, &fprc->ftpc_fscr, ftss_next, scrspctmp) { + LIST_REMOVE(scrspc, ftss_next); + free(scrspc, M_SOLARIS); + } + LIST_FOREACH_SAFE(scrspc, &fprc->ftpc_ascr, ftss_next, scrspctmp) { + LIST_REMOVE(scrspc, ftss_next); + free(scrspc, M_SOLARIS); + } + +#endif + kmem_free(fprc, sizeof (fasttrap_proc_t)); } @@ -2363,6 +2552,13 @@ fasttrap_load(void) mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier", MUTEX_DEFAULT, NULL); } + + /* + * This event handler must run before kdtrace_thread_dtor() since it + * accesses the thread's struct kdtrace_thread. + */ + fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor, + fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST); #endif /* @@ -2464,6 +2660,8 @@ fasttrap_unload(void) #endif #if !defined(sun) + EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag); + for (i = 0; i < fasttrap_tpoints.fth_nent; i++) mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); for (i = 0; i < fasttrap_provs.fth_nent; i++) diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/fasttrap_impl.h b/sys/cddl/contrib/opensolaris/uts/common/sys/fasttrap_impl.h index 6512da7..aa693a3 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/sys/fasttrap_impl.h +++ b/sys/cddl/contrib/opensolaris/uts/common/sys/fasttrap_impl.h @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -68,14 +69,45 @@ extern "C" { * then disabled, ownership of that tracepoint may be exchanged for an * unused tracepoint belonging to another probe that was attached to the * enabled tracepoint. + * + * On FreeBSD, fasttrap providers also maintain per-thread scratch space for use + * by the ISA-specific fasttrap code. The fasttrap_scrblock_t type stores the + * virtual address of a page-sized memory block that is mapped into a process' + * address space. Each block is carved up into chunks (fasttrap_scrspace_t) for + * use by individual threads, which keep the address of their scratch space + * chunk in their struct kdtrace_thread. A thread's scratch space isn't released + * until it exits. */ +#if !defined(sun) +typedef struct fasttrap_scrblock { + vm_offset_t ftsb_addr; /* address of a scratch block */ + LIST_ENTRY(fasttrap_scrblock) ftsb_next;/* next block in list */ +} fasttrap_scrblock_t; +#define FASTTRAP_SCRBLOCK_SIZE PAGE_SIZE + +typedef struct fasttrap_scrspace { + uintptr_t ftss_addr; /* scratch space address */ + LIST_ENTRY(fasttrap_scrspace) ftss_next;/* next in list */ +} fasttrap_scrspace_t; +#if defined(__amd64__) || defined(__i386__) +#define FASTTRAP_SCRSPACE_SIZE 64 +#else +#define FASTTRAP_SCRSPACE_SIZE 0 +#endif +#endif + typedef struct fasttrap_proc { pid_t ftpc_pid; /* process ID for this proc */ uint64_t ftpc_acount; /* count of active providers */ uint64_t ftpc_rcount; /* count of extant providers */ kmutex_t ftpc_mtx; /* lock on all but acount */ struct fasttrap_proc *ftpc_next; /* next proc in hash chain */ +#if !defined(sun) + LIST_HEAD(, fasttrap_scrblock) ftpc_scrblks; /* mapped scratch blocks */ + LIST_HEAD(, fasttrap_scrspace) ftpc_fscr; /* free scratch space */ + LIST_HEAD(, fasttrap_scrspace) ftpc_ascr; /* used scratch space */ +#endif } fasttrap_proc_t; typedef struct fasttrap_provider { @@ -170,6 +202,9 @@ typedef struct fasttrap_hash { #endif extern void fasttrap_sigtrap(proc_t *, kthread_t *, uintptr_t); +#if !defined(sun) +extern uintptr_t fasttrap_scraddr(struct thread *, fasttrap_proc_t *); +#endif extern dtrace_id_t fasttrap_probe_id; extern fasttrap_hash_t fasttrap_tpoints; diff --git a/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c b/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c index 53ad33b..4f34bbc 100644 --- a/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c +++ b/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c @@ -1546,7 +1546,6 @@ fasttrap_pid_probe(struct reg *rp) uint_t i = 0; #if defined(sun) klwp_t *lwp = ttolwp(curthread); -#endif /* * Compute the address of the ulwp_t and step over the @@ -1554,7 +1553,6 @@ fasttrap_pid_probe(struct reg *rp) * thread pointer is very different on 32- and 64-bit * kernels. */ -#if defined(sun) #if defined(__amd64) if (p->p_model == DATAMODEL_LP64) { addr = lwp->lwp_pcb.pcb_fsbase; @@ -1567,13 +1565,21 @@ fasttrap_pid_probe(struct reg *rp) addr = USD_GETBASE(&lwp->lwp_pcb.pcb_gsdesc); addr += sizeof (void *); #endif -#endif /* sun */ -#ifdef __i386__ - addr = USD_GETBASE(&curthread->td_pcb->pcb_gsd); #else - addr = curthread->td_pcb->pcb_gsbase; -#endif - addr += sizeof (void *); + addr = fasttrap_scraddr(curthread, tp->ftt_proc); + if (addr == 0) { + /* + * We failed to allocate scratch space for this thread. + * Try to write the original instruction back out and + * reset the pc. + */ + if (fasttrap_copyout(tp->ftt_instr, (void *)pc, + tp->ftt_size)) + fasttrap_sigtrap(p, curthread, pc); + new_pc = pc; + break; + } +#endif /* sun */ /* * Generic Instruction Tracing diff --git a/sys/cddl/dev/dtrace/dtrace_cddl.h b/sys/cddl/dev/dtrace/dtrace_cddl.h index d2adfbc..b0df2c1 100644 --- a/sys/cddl/dev/dtrace/dtrace_cddl.h +++ b/sys/cddl/dev/dtrace/dtrace_cddl.h @@ -84,6 +84,7 @@ typedef struct kdtrace_thread { #endif u_int64_t td_hrtime; /* Last time on cpu. */ int td_errno; /* Syscall return value. */ + void *td_dtrace_sscr; /* Saved scratch space location. */ } kdtrace_thread_t; /* @@ -108,10 +109,12 @@ typedef struct kdtrace_thread { #define t_dtrace_scrpc td_dtrace->td_dtrace_scrpc #define t_dtrace_astpc td_dtrace->td_dtrace_astpc #define t_dtrace_regv td_dtrace->td_dtrace_regv +#define t_dtrace_sscr td_dtrace->td_dtrace_sscr #define p_dtrace_helpers p_dtrace->p_dtrace_helpers #define p_dtrace_count p_dtrace->p_dtrace_count #define p_dtrace_probes p_dtrace->p_dtrace_probes #define p_model p_dtrace->p_dtrace_model + #define DATAMODEL_NATIVE 0 #ifdef __amd64__ #define DATAMODEL_LP64 0