patches for easier mirroring, to eliminate a special copy, to make www.freebsd.org/security a full copy of security.freebsd.org and be eventually be the same. For now files are just sitting there. The symlinks are missing. Discussed on: www (repository location) Discussed with: simon (so)
372 lines
11 KiB
Diff
372 lines
11 KiB
Diff
Index: sys/amd64/amd64/mp_machdep.c
|
|
===================================================================
|
|
RCS file: /home/ncvs/src/sys/amd64/amd64/mp_machdep.c,v
|
|
retrieving revision 1.242.2.10
|
|
diff -u -p -r1.242.2.10 mp_machdep.c
|
|
--- sys/amd64/amd64/mp_machdep.c 1 May 2005 05:34:45 -0000 1.242.2.10
|
|
+++ sys/amd64/amd64/mp_machdep.c 12 May 2005 15:22:54 -0000
|
|
@@ -142,6 +142,9 @@ static int start_ap(int apic_id);
|
|
static void release_aps(void *dummy);
|
|
|
|
static int hlt_logical_cpus;
|
|
+static u_int hyperthreading_cpus;
|
|
+static cpumask_t hyperthreading_cpus_mask;
|
|
+static int hyperthreading_allowed;
|
|
static struct sysctl_ctx_list logical_cpu_clist;
|
|
static u_int bootMP_size;
|
|
|
|
@@ -301,6 +304,7 @@ void
|
|
cpu_mp_start(void)
|
|
{
|
|
int i;
|
|
+ u_int threads_per_cache, p[4];
|
|
|
|
/* Initialize the logical ID to APIC ID table. */
|
|
for (i = 0; i < MAXCPU; i++) {
|
|
@@ -340,6 +344,48 @@ cpu_mp_start(void)
|
|
if (cpu_feature & CPUID_HTT)
|
|
logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
|
|
|
|
+ /*
|
|
+ * Work out if hyperthreading is *really* enabled. This
|
|
+ * is made really ugly by the fact that processors lie: Dual
|
|
+ * core processors claim to be hyperthreaded even when they're
|
|
+ * not, presumably because they want to be treated the same
|
|
+ * way as HTT with respect to per-cpu software licensing.
|
|
+ * At the time of writing (May 12, 2005) the only hyperthreaded
|
|
+ * cpus are from Intel, and Intel's dual-core processors can be
|
|
+ * identified via the "deterministic cache parameters" cpuid
|
|
+ * calls.
|
|
+ */
|
|
+ /*
|
|
+ * First determine if this is an Intel processor which claims
|
|
+ * to have hyperthreading support.
|
|
+ */
|
|
+ if ((cpu_feature & CPUID_HTT) &&
|
|
+ (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
|
|
+ /*
|
|
+ * If the "deterministic cache parameters" cpuid calls
|
|
+ * are available, use them.
|
|
+ */
|
|
+ if (cpu_high >= 4) {
|
|
+ /* Ask the processor about up to 32 caches. */
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ cpuid_count(4, i, p);
|
|
+ threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
|
|
+ if (hyperthreading_cpus < threads_per_cache)
|
|
+ hyperthreading_cpus = threads_per_cache;
|
|
+ if ((p[0] & 0x1f) == 0)
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If the deterministic cache parameters are not
|
|
+ * available, or if no caches were reported to exist,
|
|
+ * just accept what the HTT flag indicated.
|
|
+ */
|
|
+ if (hyperthreading_cpus == 0)
|
|
+ hyperthreading_cpus = logical_cpus;
|
|
+ }
|
|
+
|
|
set_logical_apic_ids();
|
|
}
|
|
|
|
@@ -474,6 +520,11 @@ init_secondary(void)
|
|
if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
|
|
logical_cpus_mask |= PCPU_GET(cpumask);
|
|
|
|
+ /* Determine if we are a hyperthread. */
|
|
+ if (hyperthreading_cpus > 1 &&
|
|
+ PCPU_GET(apic_id) % hyperthreading_cpus != 0)
|
|
+ hyperthreading_cpus_mask |= PCPU_GET(cpumask);
|
|
+
|
|
/* Build our map of 'other' CPUs. */
|
|
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
|
|
|
|
@@ -1148,6 +1199,9 @@ sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
|
|
else
|
|
hlt_logical_cpus = 0;
|
|
|
|
+ if (! hyperthreading_allowed)
|
|
+ mask |= hyperthreading_cpus_mask;
|
|
+
|
|
if ((mask & all_cpus) == all_cpus)
|
|
mask &= ~(1<<0);
|
|
hlt_cpus_mask = mask;
|
|
@@ -1172,6 +1226,9 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A
|
|
else
|
|
hlt_cpus_mask &= ~logical_cpus_mask;
|
|
|
|
+ if (! hyperthreading_allowed)
|
|
+ hlt_cpus_mask |= hyperthreading_cpus_mask;
|
|
+
|
|
if ((hlt_cpus_mask & all_cpus) == all_cpus)
|
|
hlt_cpus_mask &= ~(1<<0);
|
|
|
|
@@ -1179,6 +1236,34 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A
|
|
return (error);
|
|
}
|
|
|
|
+static int
|
|
+sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
|
|
+{
|
|
+ int allowed, error;
|
|
+
|
|
+ allowed = hyperthreading_allowed;
|
|
+ error = sysctl_handle_int(oidp, &allowed, 0, req);
|
|
+ if (error || !req->newptr)
|
|
+ return (error);
|
|
+
|
|
+ if (allowed)
|
|
+ hlt_cpus_mask &= ~hyperthreading_cpus_mask;
|
|
+ else
|
|
+ hlt_cpus_mask |= hyperthreading_cpus_mask;
|
|
+
|
|
+ if (logical_cpus_mask != 0 &&
|
|
+ (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
|
|
+ hlt_logical_cpus = 1;
|
|
+ else
|
|
+ hlt_logical_cpus = 0;
|
|
+
|
|
+ if ((hlt_cpus_mask & all_cpus) == all_cpus)
|
|
+ hlt_cpus_mask &= ~(1<<0);
|
|
+
|
|
+ hyperthreading_allowed = allowed;
|
|
+ return (error);
|
|
+}
|
|
+
|
|
static void
|
|
cpu_hlt_setup(void *dummy __unused)
|
|
{
|
|
@@ -1198,6 +1283,22 @@ cpu_hlt_setup(void *dummy __unused)
|
|
|
|
if (hlt_logical_cpus)
|
|
hlt_cpus_mask |= logical_cpus_mask;
|
|
+
|
|
+ /*
|
|
+ * If necessary for security purposes, force
|
|
+ * hyperthreading off, regardless of the value
|
|
+ * of hlt_logical_cpus.
|
|
+ */
|
|
+ if (hyperthreading_cpus_mask) {
|
|
+ TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
|
|
+ &hyperthreading_allowed);
|
|
+ SYSCTL_ADD_PROC(&logical_cpu_clist,
|
|
+ SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
|
|
+ "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
|
|
+ 0, 0, sysctl_hyperthreading_allowed, "IU", "");
|
|
+ if (! hyperthreading_allowed)
|
|
+ hlt_cpus_mask |= hyperthreading_cpus_mask;
|
|
+ }
|
|
}
|
|
}
|
|
SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
|
|
Index: sys/amd64/include/cpufunc.h
|
|
===================================================================
|
|
RCS file: /home/ncvs/src/sys/amd64/include/cpufunc.h,v
|
|
retrieving revision 1.145
|
|
diff -u -p -r1.145 cpufunc.h
|
|
--- sys/amd64/include/cpufunc.h 30 Jul 2004 16:44:29 -0000 1.145
|
|
+++ sys/amd64/include/cpufunc.h 12 May 2005 15:22:55 -0000
|
|
@@ -110,6 +110,14 @@ do_cpuid(u_int ax, u_int *p)
|
|
}
|
|
|
|
static __inline void
|
|
+cpuid_count(u_int ax, u_int cx, u_int *p)
|
|
+{
|
|
+ __asm __volatile("cpuid"
|
|
+ : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
|
|
+ : "0" (ax), "c" (cx));
|
|
+}
|
|
+
|
|
+static __inline void
|
|
enable_intr(void)
|
|
{
|
|
__asm __volatile("sti");
|
|
Index: sys/i386/i386/mp_machdep.c
|
|
===================================================================
|
|
RCS file: /home/ncvs/src/sys/i386/i386/mp_machdep.c,v
|
|
retrieving revision 1.235.2.9
|
|
diff -u -p -r1.235.2.9 mp_machdep.c
|
|
--- sys/i386/i386/mp_machdep.c 1 May 2005 05:34:46 -0000 1.235.2.9
|
|
+++ sys/i386/i386/mp_machdep.c 12 May 2005 15:22:55 -0000
|
|
@@ -217,6 +217,9 @@ static int start_ap(int apic_id);
|
|
static void release_aps(void *dummy);
|
|
|
|
static int hlt_logical_cpus;
|
|
+static u_int hyperthreading_cpus;
|
|
+static cpumask_t hyperthreading_cpus_mask;
|
|
+static int hyperthreading_allowed;
|
|
static struct sysctl_ctx_list logical_cpu_clist;
|
|
|
|
static void
|
|
@@ -353,6 +356,7 @@ void
|
|
cpu_mp_start(void)
|
|
{
|
|
int i;
|
|
+ u_int threads_per_cache, p[4];
|
|
|
|
POSTCODE(MP_START_POST);
|
|
|
|
@@ -404,6 +408,48 @@ cpu_mp_start(void)
|
|
if (cpu_feature & CPUID_HTT)
|
|
logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
|
|
|
|
+ /*
|
|
+ * Work out if hyperthreading is *really* enabled. This
|
|
+ * is made really ugly by the fact that processors lie: Dual
|
|
+ * core processors claim to be hyperthreaded even when they're
|
|
+ * not, presumably because they want to be treated the same
|
|
+ * way as HTT with respect to per-cpu software licensing.
|
|
+ * At the time of writing (May 12, 2005) the only hyperthreaded
|
|
+ * cpus are from Intel, and Intel's dual-core processors can be
|
|
+ * identified via the "deterministic cache parameters" cpuid
|
|
+ * calls.
|
|
+ */
|
|
+ /*
|
|
+ * First determine if this is an Intel processor which claims
|
|
+ * to have hyperthreading support.
|
|
+ */
|
|
+ if ((cpu_feature & CPUID_HTT) &&
|
|
+ (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
|
|
+ /*
|
|
+ * If the "deterministic cache parameters" cpuid calls
|
|
+ * are available, use them.
|
|
+ */
|
|
+ if (cpu_high >= 4) {
|
|
+ /* Ask the processor about up to 32 caches. */
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ cpuid_count(4, i, p);
|
|
+ threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
|
|
+ if (hyperthreading_cpus < threads_per_cache)
|
|
+ hyperthreading_cpus = threads_per_cache;
|
|
+ if ((p[0] & 0x1f) == 0)
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If the deterministic cache parameters are not
|
|
+ * available, or if no caches were reported to exist,
|
|
+ * just accept what the HTT flag indicated.
|
|
+ */
|
|
+ if (hyperthreading_cpus == 0)
|
|
+ hyperthreading_cpus = logical_cpus;
|
|
+ }
|
|
+
|
|
set_logical_apic_ids();
|
|
}
|
|
|
|
@@ -539,6 +585,11 @@ init_secondary(void)
|
|
if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
|
|
logical_cpus_mask |= PCPU_GET(cpumask);
|
|
|
|
+ /* Determine if we are a hyperthread. */
|
|
+ if (hyperthreading_cpus > 1 &&
|
|
+ PCPU_GET(apic_id) % hyperthreading_cpus != 0)
|
|
+ hyperthreading_cpus_mask |= PCPU_GET(cpumask);
|
|
+
|
|
/* Build our map of 'other' CPUs. */
|
|
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
|
|
|
|
@@ -1368,6 +1419,9 @@ sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
|
|
else
|
|
hlt_logical_cpus = 0;
|
|
|
|
+ if (! hyperthreading_allowed)
|
|
+ mask |= hyperthreading_cpus_mask;
|
|
+
|
|
if ((mask & all_cpus) == all_cpus)
|
|
mask &= ~(1<<0);
|
|
hlt_cpus_mask = mask;
|
|
@@ -1392,6 +1446,9 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A
|
|
else
|
|
hlt_cpus_mask &= ~logical_cpus_mask;
|
|
|
|
+ if (! hyperthreading_allowed)
|
|
+ hlt_cpus_mask |= hyperthreading_cpus_mask;
|
|
+
|
|
if ((hlt_cpus_mask & all_cpus) == all_cpus)
|
|
hlt_cpus_mask &= ~(1<<0);
|
|
|
|
@@ -1399,6 +1456,34 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A
|
|
return (error);
|
|
}
|
|
|
|
+static int
|
|
+sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
|
|
+{
|
|
+ int allowed, error;
|
|
+
|
|
+ allowed = hyperthreading_allowed;
|
|
+ error = sysctl_handle_int(oidp, &allowed, 0, req);
|
|
+ if (error || !req->newptr)
|
|
+ return (error);
|
|
+
|
|
+ if (allowed)
|
|
+ hlt_cpus_mask &= ~hyperthreading_cpus_mask;
|
|
+ else
|
|
+ hlt_cpus_mask |= hyperthreading_cpus_mask;
|
|
+
|
|
+ if (logical_cpus_mask != 0 &&
|
|
+ (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
|
|
+ hlt_logical_cpus = 1;
|
|
+ else
|
|
+ hlt_logical_cpus = 0;
|
|
+
|
|
+ if ((hlt_cpus_mask & all_cpus) == all_cpus)
|
|
+ hlt_cpus_mask &= ~(1<<0);
|
|
+
|
|
+ hyperthreading_allowed = allowed;
|
|
+ return (error);
|
|
+}
|
|
+
|
|
static void
|
|
cpu_hlt_setup(void *dummy __unused)
|
|
{
|
|
@@ -1418,6 +1503,22 @@ cpu_hlt_setup(void *dummy __unused)
|
|
|
|
if (hlt_logical_cpus)
|
|
hlt_cpus_mask |= logical_cpus_mask;
|
|
+
|
|
+ /*
|
|
+ * If necessary for security purposes, force
|
|
+ * hyperthreading off, regardless of the value
|
|
+ * of hlt_logical_cpus.
|
|
+ */
|
|
+ if (hyperthreading_cpus_mask) {
|
|
+ TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
|
|
+ &hyperthreading_allowed);
|
|
+ SYSCTL_ADD_PROC(&logical_cpu_clist,
|
|
+ SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
|
|
+ "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
|
|
+ 0, 0, sysctl_hyperthreading_allowed, "IU", "");
|
|
+ if (! hyperthreading_allowed)
|
|
+ hlt_cpus_mask |= hyperthreading_cpus_mask;
|
|
+ }
|
|
}
|
|
}
|
|
SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
|
|
Index: sys/i386/include/cpufunc.h
|
|
===================================================================
|
|
RCS file: /home/ncvs/src/sys/i386/include/cpufunc.h,v
|
|
retrieving revision 1.142
|
|
diff -u -p -r1.142 cpufunc.h
|
|
--- sys/i386/include/cpufunc.h 7 Apr 2004 20:46:05 -0000 1.142
|
|
+++ sys/i386/include/cpufunc.h 12 May 2005 15:22:55 -0000
|
|
@@ -89,6 +89,14 @@ do_cpuid(u_int ax, u_int *p)
|
|
}
|
|
|
|
static __inline void
|
|
+cpuid_count(u_int ax, u_int cx, u_int *p)
|
|
+{
|
|
+ __asm __volatile("cpuid"
|
|
+ : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
|
|
+ : "0" (ax), "c" (cx));
|
|
+}
|
|
+
|
|
+static __inline void
|
|
enable_intr(void)
|
|
{
|
|
__asm __volatile("sti");
|