Index: libgomp/config/freebsd/affinity.c =================================================================== --- libgomp/config/freebsd/affinity.c (revision 0) +++ libgomp/config/freebsd/affinity.c (working copy) @@ -0,0 +1,294 @@ +/* Copyright (C) 2015 Free Software Foundation, Inc. + Contributed by Adrian Chadd . + Based on work by Jakub Jelinek . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is a FreeBSD specific implementation of a CPU affinity setting. */ + +#include "libgomp.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* ULONG_MAX */ + +extern unsigned long gomp_cpuset_size; +extern cpuset_t *gomp_cpusetp; + +/* + * The Linux code supports the notion of variable sized + * CPU sets. For now we don't, so just use the Linux + * compatibility defines here to bootstrap things. + */ +#define CPU_ISSET_S(idx, size, set) CPU_ISSET(idx, set) +#define CPU_ZERO_S(size, set) CPU_ZERO(set) +#define CPU_SET_S(idx, size, set) CPU_SET(idx, set) +#define CPU_CLR_S(idx, size, set) CPU_CLR(idx, set) +#define CPU_COUNT_S(size, set) CPU_COUNT(set) + +void +gomp_init_affinity (void) +{ + if (gomp_places_list == NULL) + { + if (!gomp_affinity_init_level (1, ULONG_MAX, true)) + return; + } + + struct gomp_thread *thr = gomp_thread (); + pthread_setaffinity_np (pthread_self (), gomp_cpuset_size, + (cpuset_t *) gomp_places_list[0]); + thr->place = 1; + thr->ts.place_partition_off = 0; + thr->ts.place_partition_len = gomp_places_list_len; +} + +void +gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place) +{ + pthread_attr_setaffinity_np (attr, gomp_cpuset_size, + (cpuset_t *) gomp_places_list[place]); +} + +void ** +gomp_affinity_alloc (unsigned long count, bool quiet) +{ + unsigned long i; + void **ret; + char *p; + + if (gomp_cpusetp == NULL) + { + if (!quiet) + gomp_error ("Could not get CPU affinity set"); + return NULL; + } + + ret = malloc (count * sizeof (void *) + count * gomp_cpuset_size); + if (ret == NULL) + { + if (!quiet) + gomp_error ("Out of memory trying to allocate places list"); + return NULL; + } + + p = (char *) (ret + count); + for (i = 0; i < count; i++, p += gomp_cpuset_size) + ret[i] = p; + return ret; +} + +void +gomp_affinity_init_place (void *p) +{ + cpuset_t *cpusetp = (cpuset_t *) p; + CPU_ZERO_S (gomp_cpuset_size, cpusetp); +} + +bool +gomp_affinity_add_cpus (void *p, unsigned long num, + unsigned long len, long stride, bool quiet) +{ + cpuset_t *cpusetp = (cpuset_t *) p; + unsigned long max = 8 * gomp_cpuset_size; + for (;;) + { + if (num >= max) + { + if (!quiet) + gomp_error ("Logical CPU number %lu out of range", num); + return false; + } + CPU_SET_S (num, gomp_cpuset_size, cpusetp); + if (--len == 0) + return true; + if ((stride < 0 && num + stride > num) + || (stride > 0 && num + stride < num)) + { + if (!quiet) + gomp_error ("Logical CPU number %lu+%ld out of range", + num, stride); + return false; + } + num += stride; + } +} + +bool +gomp_affinity_remove_cpu (void *p, unsigned long num) +{ + cpuset_t *cpusetp = (cpuset_t *) p; + if (num >= 8 * gomp_cpuset_size) + { + gomp_error ("Logical CPU number %lu out of range", num); + return false; + } + if (!CPU_ISSET_S (num, gomp_cpuset_size, cpusetp)) + { + gomp_error ("Logical CPU %lu to be removed is not in the set", num); + return false; + } + CPU_CLR_S (num, gomp_cpuset_size, cpusetp); + return true; +} + +bool +gomp_affinity_copy_place (void *p, void *q, long stride) +{ + unsigned long i, max = 8 * gomp_cpuset_size; + cpuset_t *destp = (cpuset_t *) p; + cpuset_t *srcp = (cpuset_t *) q; + + CPU_ZERO_S (gomp_cpuset_size, destp); + for (i = 0; i < max; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, srcp)) + { + if ((stride < 0 && i + stride > i) + || (stride > 0 && (i + stride < i || i + stride >= max))) + { + gomp_error ("Logical CPU number %lu+%ld out of range", i, stride); + return false; + } + CPU_SET_S (i + stride, gomp_cpuset_size, destp); + } + return true; +} + +bool +gomp_affinity_same_place (void *p, void *q) +{ +#ifdef CPU_EQUAL_S + return CPU_EQUAL_S (gomp_cpuset_size, (cpuset_t *) p, (cpuset_t *) q); +#else + return memcmp (p, q, gomp_cpuset_size) == 0; +#endif +} + +bool +gomp_affinity_finalize_place_list (bool quiet) +{ + unsigned long i, j; + + for (i = 0, j = 0; i < gomp_places_list_len; i++) + { + cpuset_t *cpusetp = (cpuset_t *) gomp_places_list[i]; + bool nonempty = false; +#ifdef CPU_AND_S + CPU_AND_S (gomp_cpuset_size, cpusetp, cpusetp, gomp_cpusetp); + nonempty = gomp_cpuset_popcount (gomp_cpuset_size, cpusetp) != 0; +#else + unsigned long k, max = gomp_cpuset_size / sizeof (cpusetp->__bits[0]); + for (k = 0; k < max; k++) + if ((cpusetp->__bits[k] &= gomp_cpusetp->__bits[k]) != 0) + nonempty = true; +#endif + if (nonempty) + gomp_places_list[j++] = gomp_places_list[i]; + } + + if (j == 0) + { + if (!quiet) + gomp_error ("None of the places contain usable logical CPUs"); + return false; + } + else if (j < gomp_places_list_len) + { + if (!quiet) + gomp_error ("Number of places reduced from %ld to %ld because some " + "places didn't contain any usable logical CPUs", + gomp_places_list_len, j); + gomp_places_list_len = j; + } + return true; +} + +bool +gomp_affinity_init_level (int level, unsigned long count, bool quiet) +{ + unsigned long i, max = 8 * gomp_cpuset_size; + + if (gomp_cpusetp) + { + unsigned long maxcount + = CPU_COUNT_S (gomp_cpuset_size, gomp_cpusetp); + if (count > maxcount) + count = maxcount; + } + gomp_places_list = gomp_affinity_alloc (count, quiet); + gomp_places_list_len = 0; + if (gomp_places_list == NULL) + return false; + /* SMT (threads). */ + if (level == 1) + { + for (i = 0; i < max && gomp_places_list_len < count; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, gomp_cpusetp)) + { + gomp_affinity_init_place (gomp_places_list[gomp_places_list_len]); + gomp_affinity_add_cpus (gomp_places_list[gomp_places_list_len], + i, 1, 0, true); + ++gomp_places_list_len; + } + return true; + } + + /* For now, there's no other affinity type support just yet */ + return false; +} + +void +gomp_affinity_print_place (void *p) +{ + unsigned long i, max = 8 * gomp_cpuset_size, len; + cpuset_t *cpusetp = (cpuset_t *) p; + bool notfirst = false; + + for (i = 0, len = 0; i < max; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, cpusetp)) + { + if (len == 0) + { + if (notfirst) + fputc (',', stderr); + notfirst = true; + fprintf (stderr, "%lu", i); + } + ++len; + } + else + { + if (len > 1) + fprintf (stderr, ":%lu", len); + len = 0; + } + if (len > 1) + fprintf (stderr, ":%lu", len); +} Index: libgomp/config/freebsd/proc.c =================================================================== --- libgomp/config/freebsd/proc.c (revision 0) +++ libgomp/config/freebsd/proc.c (working copy) @@ -0,0 +1,154 @@ +/* Copyright (C) 2015 Free Software Foundation, Inc. + Contributed by Adrian Chadd . + Based on work by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains system specific routines related to counting + online processors and dynamic load balancing. It is expected that + a system may well want to write special versions of each of these. + + The following implementation is FreeBSD-specific. */ + +#include "libgomp.h" +#include +#include +#ifdef HAVE_GETLOADAVG +# ifdef HAVE_SYS_LOADAVG_H +# include +# endif +#endif + +#include +#include +#include +#include +#include +#include + +#include + +unsigned long gomp_cpuset_size; + +cpuset_t *gomp_cpusetp; + +static int +get_num_procs (void) +{ +#ifdef _SC_NPROCESSORS_ONLN + return sysconf (_SC_NPROCESSORS_ONLN); +#elif defined HW_NCPU + int ncpus = 1; + size_t len = sizeof(ncpus); + sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0); + return ncpus; +#else + return 0; +#endif +} + +static int +get_gomp_cpusetsize(void) +{ + size_t len = sizeof(int); + int val = 0; + + if (sysctlbyname("kern.sched.cpusetsize", &val, &len, NULL, 0) != 0) { + warn("%s: sysctlbyname(kern.sched.cpusetsize)", __func__); + /* Default to something useful */ + return (32); + } + return (val); +} + +/* At startup, determine the default number of threads. It would seem + this should be related to the number of cpus online. */ + +void +gomp_init_num_threads (void) +{ + int ncpus = get_num_procs (); + + gomp_cpuset_size = get_gomp_cpusetsize(); + gomp_cpusetp = (cpuset_t *) gomp_malloc (gomp_cpuset_size); + + /* XXX error checking! */ + + if (ncpus > 0) + gomp_global_icv.nthreads_var = ncpus; + + /* + * look at linux/proc.c - see how things get + * auto sized based on the number of cpus the process + * is able to run on. + */ + if (pthread_getaffinity_np(pthread_self(), gomp_cpuset_size, + gomp_cpusetp) == 0) { + gomp_global_icv.nthreads_var = CPU_COUNT(gomp_cpusetp); + } +} + +/* When OMP_DYNAMIC is set, at thread launch determine the number of + threads we should spawn for this team. */ +/* ??? I have no idea what best practice for this is. Surely some + function of the number of processors that are *still* online and + the load average. Here I use the number of processors online + minus the 15 minute load average. */ + +unsigned +gomp_dynamic_max_threads (void) +{ + unsigned n_onln, loadavg; + unsigned nthreads_var = gomp_icv (false)->nthreads_var; + + n_onln = get_num_procs (); + if (!n_onln || n_onln > nthreads_var) + n_onln = nthreads_var; + + loadavg = 0; +#ifdef HAVE_GETLOADAVG + { + double dloadavg[3]; + if (getloadavg (dloadavg, 3) == 3) + { + /* Add 0.1 to get a kind of biased rounding. */ + loadavg = dloadavg[2] + 0.1; + } + } +#endif + + if (loadavg >= n_onln) + return 1; + else + return n_onln - loadavg; +} + +int +omp_get_num_procs (void) +{ + int ncpus = get_num_procs (); + if (ncpus <= 0) + ncpus = gomp_icv (false)->nthreads_var; + return ncpus; +} + +ialias (omp_get_num_procs) Index: libgomp/configure.tgt =================================================================== --- libgomp/configure.tgt (revision 222864) +++ libgomp/configure.tgt (working copy) @@ -141,6 +141,7 @@ case "${target}" in ;; *-*-freebsd*) + config_path="freebsd bsd posix" # Need to link with -lpthread so libgomp.so is self-contained. XLDFLAGS="${XLDFLAGS} -lpthread" ;;