diff options
author | Jake Burkholder <jake@FreeBSD.org> | 2001-07-31 06:05:05 +0000 |
---|---|---|
committer | Jake Burkholder <jake@FreeBSD.org> | 2001-07-31 06:05:05 +0000 |
commit | 89bf8575eeadb650aefa7fe48d0d9c9f58cd4d2a (patch) | |
tree | 09518e5a462c7f369bd3cbd7d956ad24588b2a9b /sys/sparc64/include | |
parent | 98bb5304e12e75a43230c686a5c7cb422d8f780d (diff) |
Flesh out the sparc64 port considerably. This contains:
- mostly complete kernel pmap support, and tested but currently turned
off userland pmap support
- low level assembly language trap, context switching and support code
- fully implemented atomic.h and supporting cpufunc.h
- some support for kernel debugging with ddb
- various header tweaks and filling out of machine dependent structures
Notes
Notes:
svn path=/head/; revision=80709
Diffstat (limited to 'sys/sparc64/include')
25 files changed, 1595 insertions, 361 deletions
diff --git a/sys/sparc64/include/asi.h b/sys/sparc64/include/asi.h new file mode 100644 index 000000000000..34f146de1405 --- /dev/null +++ b/sys/sparc64/include/asi.h @@ -0,0 +1,93 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_ASI_H_ +#define _MACHINE_ASI_H_ + +/* + * Standard v9 asis + */ +#define ASI_N 0x4 +#define ASI_NL 0xc +#define ASI_AIUP 0x10 +#define ASI_AIUS 0x11 +#define ASI_AIUSL 0x19 +#define ASI_P 0x80 +#define ASI_S 0x81 +#define ASI_PNF 0x82 +#define ASI_SNF 0x83 +#define ASI_PL 0x88 +#define ASI_PNFL 0x8a +#define ASI_SNFL 0x8b + +/* + * UltraSPARC extensions + */ +#define ASI_PHYS_USE_EC 0x14 +#define ASI_PHYS_BYPASS_EC_WITH_EBIT 0x15 +#define ASI_PHYS_USE_EC_L 0x1c +#define ASI_PHYS_BYPASS_EC_WITH_EBIT_L 0x1d + +#define ASI_NUCLEUS_QUAD_LDD 0x24 +#define ASI_NUCLEUS_QUAD_LDD_L 0x2c + +#define ASI_IMMU 0x50 +#define AA_IMMU_TTR 0x0 +#define AA_IMMU_SFSR 0x18 +#define AA_IMMU_TSB 0x28 +#define AA_IMMU_TAR 0x30 + +#define ASI_IMMU_TSB_8KB_PTR_REG 0x51 +#define ASI_IMMU_TSB_64KB_PTR_REG 0x52 +#define ASI_ITLB_DATA_IN_REG 0x54 +#define ASI_ITLB_DATA_ACCESS_REG 0x55 +#define ASI_ITLB_TAG_READ_REG 0x56 +#define ASI_IMMU_DEMAP 0x57 + +#define ASI_DMMU_TAG_TARGET_REG 0x58 + +#define ASI_DMMU 0x58 +#define AA_DMMU_TTR 0x0 +#define AA_DMMU_PCXR 0x8 +#define AA_DMMU_SCXR 0x10 +#define AA_DMMU_SFSR 0x18 +#define AA_DMMU_SFAR 0x20 +#define AA_DMMU_TSB 0x28 +#define AA_DMMU_TAR 0x30 +#define AA_DMMU_VWPR 0x38 +#define AA_DMMU_PWPR 0x40 + +#define ASI_DMMU_TSB_8KB_PTR_REG 0x59 +#define ASI_DMMU_TSB_64KB_PTR_REG 0x5a +#define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b +#define ASI_DTLB_DATA_IN_REG 0x5c +#define ASI_DTLB_DATA_ACCESS_REG 0x5d +#define ASI_DTLB_TAG_READ_REG 0x5e +#define ASI_DMMU_DEMAP 0x5f + +#endif /* !_MACHINE_ASI_H_ */ diff --git a/sys/sparc64/include/asmacros.h b/sys/sparc64/include/asmacros.h new file mode 100644 index 000000000000..efca538093da --- /dev/null +++ b/sys/sparc64/include/asmacros.h @@ -0,0 +1,64 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_ASMACROS_H_ +#define _MACHINE_ASMACROS_H_ + +#ifdef _KERNEL + +#define PCPU(member) %g7 + GD_ ## member +#define DEBUGGER() ta %xcc, 1 +#define PANIC(msg, reg) \ + .sect .rodata ; \ +9: .asciz msg ; \ + .previous ; \ + setx 9b, reg, %o0 ; \ + call panic ; \ + nop + +#endif + +#define DATA(name) \ + .data ; \ + .globl name ; \ + .type name, @object ; \ +name ## : + +#define EMPTY + +#define ENTRY(name) \ + .text ; \ + .align 4 ; \ + .globl name ; \ + .type name, @function ; \ +name ## : + +#define END(name) \ + .size name, . - name + +#endif /* !_MACHINE_ASMACROS_H_ */ diff --git a/sys/sparc64/include/atomic.h b/sys/sparc64/include/atomic.h index b8273e9d3702..3dcb616da6ad 100644 --- a/sys/sparc64/include/atomic.h +++ b/sys/sparc64/include/atomic.h @@ -26,327 +26,241 @@ * $FreeBSD$ */ +#ifndef _MACHINE_ATOMIC_H_ +#define _MACHINE_ATOMIC_H_ + +#include <machine/cpufunc.h> + /* - * This is not atomic. It is just a stub to make things compile. + * Various simple arithmetic on memory which is atomic in the presence + * of interrupts and multiple processors. See atomic(9) for details. + * Note that efficient hardware support exists only for the 32 and 64 + * bit variants; the 8 and 16 bit versions are not provided and should + * not be used in MI code. + * + * This implementation takes advantage of the fact that the sparc64 + * cas instruction is both a load and a store. The loop is often coded + * as follows: + * + * do { + * expect = *p; + * new = expect + 1; + * } while (cas(p, expect, new) != expect); + * + * which performs an unnnecessary load on each iteration that the cas + * operation fails. Modified as follows: + * + * expect = *p; + * for (;;) { + * new = expect + 1; + * result = cas(p, expect, new); + * if (result == expect) + * break; + * expect = result; + * } + * + * the return value of cas is used to avoid the extra reload. At the + * time of writing, with gcc version 2.95.3, the branch for the if + * statement is predicted incorrectly as not taken, rather than taken. + * It is expected that the branch prediction hints available in gcc 3.0, + * __builtin_expect, will allow better code to be generated. + * + * The memory barriers provided by the acq and rel variants are intended + * to be sufficient for use of relaxed memory ordering. Due to the + * suggested assembly syntax of the membar operands containing a # + * character, they cannot be used in macros. The cmask and mmask bits + * are hard coded in machine/cpufunc.h and used here through macros. + * Hopefully sun will choose not to change the bit numbers. */ -#ifndef _MACHINE_ATOMIC_H_ -#define _MACHINE_ATOMIC_H_ +#define itype(sz) u_int ## sz ## _t -#define __atomic_op(p, op, v) ({ \ - __typeof(*p) __v = (__typeof(*p))v; \ - *p op __v; \ -}) +#define atomic_cas_32(p, e, s) casa(p, e, s, ASI_N) +#define atomic_cas_64(p, e, s) casxa(p, e, s, ASI_N) + +#define atomic_cas(p, e, s, sz) \ + atomic_cas_ ## sz(p, e, s) -#define __atomic_load(p) ({ \ - __typeof(*p) __v; \ - __v = *p; \ - __v; \ +#define atomic_cas_acq(p, e, s, sz) ({ \ + itype(sz) v; \ + v = atomic_cas(p, e, s, sz); \ + membar(LoadLoad | LoadStore); \ + v; \ }) -#define __atomic_load_clear(p) ({ \ - __typeof(*p) __v; \ - __v = *p; \ - *p = 0; \ - __v; \ +#define atomic_cas_rel(p, e, s, sz) ({ \ + itype(sz) v; \ + membar(LoadStore | StoreStore); \ + v = atomic_cas(p, e, s, sz); \ + v; \ }) -#define __atomic_cas(p, e, s) ({ \ - u_int __v; \ - if (*p == (__typeof(*p))e) { \ - *p = (__typeof(*p))s; \ - __v = 1; \ - } else { \ - __v = 0; \ +#define atomic_op(p, op, v, sz) do { \ + itype(sz) e, r, s; \ + for (e = *(volatile itype(sz) *)p;; e = r) { \ + s = e op v; \ + r = atomic_cas_ ## sz(p, e, s); \ + if (r == e) \ + break; \ } \ - __v; \ +} while (0) + +#define atomic_op_acq(p, op, v, sz) do { \ + atomic_op(p, op, v, sz); \ + membar(LoadLoad | LoadStore); \ +} while (0) + +#define atomic_op_rel(p, op, v, sz) do { \ + membar(LoadStore | StoreStore); \ + atomic_op(p, op, v, sz); \ +} while (0) + +#define atomic_load_acq(p, sz) ({ \ + itype(sz) v; \ + v = atomic_cas_ ## sz(p, 0, 0); \ + membar(LoadLoad | LoadStore); \ + v; \ }) -#define __atomic_op_8(p, op, v) __atomic_op(p, op, v) -#define __atomic_op_16(p, op, v) __atomic_op(p, op, v) -#define __atomic_op_32(p, op, v) __atomic_op(p, op, v) -#define __atomic_load_32(p) __atomic_load(p) -#define __atomic_load_clear_32(p) __atomic_load_clear(p) -#define __atomic_cas_32(p, e, s) __atomic_cas(p, e, s) -#define __atomic_op_64(p, op, v) __atomic_op(p, op, v) -#define __atomic_load_64(p) __atomic_load(p) -#define __atomic_load_clear_64(p) __atomic_load_clear(p) -#define __atomic_cas_64(p, e, s) __atomic_cas(p, e, s) - -#define atomic_add_8(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_8(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_8(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_8(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_8(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_16(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_16(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_16(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_16(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_16(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_32(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_32(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_32(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_32(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_32(p, v) __atomic_op_32(p, =, v) -#define atomic_load_32(p) __atomic_load_32(p) -#define atomic_readandclear_32(p) __atomic_load_clear_32(p) -#define atomic_cmpset_32(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_64(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_64(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_64(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_64(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_64(p, v) __atomic_op_64(p, =, v) -#define atomic_load_64(p) __atomic_load_64(p) -#define atomic_readandclear_64(p) __atomic_load_clear_64(p) -#define atomic_cmpset_64(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_acq_8(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_acq_8(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_acq_8(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_acq_8(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_acq_8(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_acq_16(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_acq_16(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_acq_16(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_acq_16(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_acq_16(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_acq_32(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_acq_32(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_acq_32(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_acq_32(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_acq_32(p, v) __atomic_op_32(p, =, v) -#define atomic_load_acq_32(p) __atomic_load_32(p) -#define atomic_cmpset_acq_32(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_acq_64(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_acq_64(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_acq_64(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_acq_64(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_acq_64(p, v) __atomic_op_64(p, =, v) -#define atomic_load_acq_64(p) __atomic_load_64(p) -#define atomic_cmpset_acq_64(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_rel_8(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_rel_8(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_rel_8(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_rel_8(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_rel_8(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_rel_16(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_rel_16(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_rel_16(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_rel_16(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_rel_16(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_rel_32(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_rel_32(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_rel_32(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_rel_32(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_rel_32(p, v) __atomic_op_32(p, =, v) -#define atomic_cmpset_rel_32(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_rel_64(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_rel_64(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_rel_64(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_rel_64(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_rel_64(p, v) __atomic_op_64(p, =, v) -#define atomic_cmpset_rel_64(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_char(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_char(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_char(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_char(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_char(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_short(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_short(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_short(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_short(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_short(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_int(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_int(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_int(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_int(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_int(p, v) __atomic_op_32(p, =, v) -#define atomic_load_int(p) __atomic_load_32(p) -#define atomic_readandclear_int(p) __atomic_load_clear_32(p) -#define atomic_cmpset_int(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_long(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_long(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_long(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_long(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_long(p, v) __atomic_op_64(p, =, v) -#define atomic_load_long(p) __atomic_load_64(p) -#define atomic_readandclear_long(p) __atomic_load_clear_64(p) -#define atomic_cmpset_long(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_acq_char(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_acq_char(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_acq_char(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_acq_char(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_acq_char(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_acq_short(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_acq_short(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_acq_short(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_acq_short(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_acq_short(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_acq_int(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_acq_int(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_acq_int(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_acq_int(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_acq_int(p, v) __atomic_op_32(p, =, v) -#define atomic_load_acq_int(p) __atomic_load_32(p) -#define atomic_cmpset_acq_int(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_acq_long(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_acq_long(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_acq_long(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_acq_long(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_acq_long(p, v) __atomic_op_64(p, =, v) -#define atomic_load_acq_long(p) __atomic_load_64(p) -#define atomic_cmpset_acq_long(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_rel_char(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_rel_char(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_rel_char(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_rel_char(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_rel_char(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_rel_short(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_rel_short(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_rel_short(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_rel_short(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_rel_short(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_rel_int(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_rel_int(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_rel_int(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_rel_int(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_rel_int(p, v) __atomic_op_32(p, =, v) -#define atomic_cmpset_rel_int(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_rel_long(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_rel_long(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_rel_long(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_rel_long(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_rel_long(p, v) __atomic_op_64(p, =, v) -#define atomic_cmpset_rel_long(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_char(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_char(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_char(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_char(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_char(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_short(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_short(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_short(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_short(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_short(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_int(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_int(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_int(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_int(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_int(p, v) __atomic_op_32(p, =, v) -#define atomic_load_int(p) __atomic_load_32(p) -#define atomic_readandclear_int(p) __atomic_load_clear_32(p) -#define atomic_cmpset_int(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_long(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_long(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_long(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_long(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_long(p, v) __atomic_op_64(p, =, v) -#define atomic_load_long(p) __atomic_load_64(p) -#define atomic_readandclear_long(p) __atomic_load_clear_64(p) -#define atomic_cmpset_long(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_ptr(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_ptr(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_ptr(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_ptr(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_ptr(p, v) __atomic_op_64(p, =, v) -#define atomic_load_ptr(p) __atomic_load_64(p) -#define atomic_readandclear_ptr(p) __atomic_load_clear_64(p) -#define atomic_cmpset_ptr(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_acq_char(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_acq_char(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_acq_char(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_acq_char(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_acq_char(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_acq_short(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_acq_short(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_acq_short(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_acq_short(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_acq_short(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_acq_int(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_acq_int(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_acq_int(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_acq_int(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_acq_int(p, v) __atomic_op_32(p, =, v) -#define atomic_load_acq_int(p) __atomic_load_32(p) -#define atomic_cmpset_acq_int(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_acq_long(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_acq_long(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_acq_long(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_acq_long(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_acq_long(p, v) __atomic_op_64(p, =, v) -#define atomic_load_acq_long(p) __atomic_load_64(p) -#define atomic_cmpset_acq_long(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_acq_ptr(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_acq_ptr(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_acq_ptr(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_acq_ptr(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_acq_ptr(p, v) __atomic_op_64(p, =, v) -#define atomic_load_acq_ptr(p) __atomic_load_64(p) -#define atomic_cmpset_acq_ptr(p, e, s) __atomic_cas_64(p, e, s) - -#define atomic_add_rel_char(p, v) __atomic_op_8(p, +=, v) -#define atomic_subtract_rel_char(p, v) __atomic_op_8(p, -=, v) -#define atomic_set_rel_char(p, v) __atomic_op_8(p, |=, v) -#define atomic_clear_rel_char(p, v) __atomic_op_8(p, &=, ~v) -#define atomic_store_rel_char(p, v) __atomic_op_8(p, =, v) - -#define atomic_add_rel_short(p, v) __atomic_op_16(p, +=, v) -#define atomic_subtract_rel_short(p, v) __atomic_op_16(p, -=, v) -#define atomic_set_rel_short(p, v) __atomic_op_16(p, |=, v) -#define atomic_clear_rel_short(p, v) __atomic_op_16(p, &=, ~v) -#define atomic_store_rel_short(p, v) __atomic_op_16(p, =, v) - -#define atomic_add_rel_int(p, v) __atomic_op_32(p, +=, v) -#define atomic_subtract_rel_int(p, v) __atomic_op_32(p, -=, v) -#define atomic_set_rel_int(p, v) __atomic_op_32(p, |=, v) -#define atomic_clear_rel_int(p, v) __atomic_op_32(p, &=, ~v) -#define atomic_store_rel_int(p, v) __atomic_op_32(p, =, v) -#define atomic_cmpset_rel_int(p, e, s) __atomic_cas_32(p, e, s) - -#define atomic_add_rel_long(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_rel_long(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_rel_long(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_rel_long(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_rel_long(p, v) __atomic_op_64(p, =, v) -#define atomic_cmpset_rel_long(p, e, s) __atomic_cas_64(p, e, s) +#define atomic_load_clear(p, sz) ({ \ + itype(sz) e, r; \ + for (e = *(volatile itype(sz) *)p;; e = r) { \ + r = atomic_cas_ ## sz(p, e, 0); \ + if (r == e) \ + break; \ + } \ + e; \ +}) -#define atomic_add_rel_ptr(p, v) __atomic_op_64(p, +=, v) -#define atomic_subtract_rel_ptr(p, v) __atomic_op_64(p, -=, v) -#define atomic_set_rel_ptr(p, v) __atomic_op_64(p, |=, v) -#define atomic_clear_rel_ptr(p, v) __atomic_op_64(p, &=, ~v) -#define atomic_store_rel_ptr(p, v) __atomic_op_64(p, =, v) -#define atomic_cmpset_rel_ptr(p, e, s) __atomic_cas_64(p, e, s) +#define atomic_store_rel(p, v, sz) do { \ + itype(sz) e, r; \ + membar(LoadStore | StoreStore); \ + for (e = *(volatile itype(sz) *)p;; e = r) { \ + r = atomic_cas_ ## sz(p, e, v); \ + if (r == e) \ + break; \ + } \ +} while (0) + +#define ATOMIC_GEN(name, ptype, vtype, atype, sz) \ + \ +static __inline void \ +atomic_add_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op(p, +, v, sz); \ +} \ +static __inline void \ +atomic_add_acq_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_acq(p, +, v, sz); \ +} \ +static __inline void \ +atomic_add_rel_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_rel(p, +, v, sz); \ +} \ + \ +static __inline void \ +atomic_clear_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op(p, &, ~v, sz); \ +} \ +static __inline void \ +atomic_clear_acq_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_acq(p, &, ~v, sz); \ +} \ +static __inline void \ +atomic_clear_rel_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_rel(p, &, ~v, sz); \ +} \ + \ +static __inline int \ +atomic_cmpset_ ## name(volatile ptype p, vtype e, vtype s) \ +{ \ + return (((vtype)atomic_cas(p, e, s, sz)) == e); \ +} \ +static __inline int \ +atomic_cmpset_acq_ ## name(volatile ptype p, vtype e, vtype s) \ +{ \ + return (((vtype)atomic_cas_acq(p, e, s, sz)) == e); \ +} \ +static __inline int \ +atomic_cmpset_rel_ ## name(volatile ptype p, vtype e, vtype s) \ +{ \ + return (((vtype)atomic_cas_rel(p, e, s, sz)) == e); \ +} \ + \ +static __inline vtype \ +atomic_load_acq_ ## name(volatile ptype p) \ +{ \ + return ((vtype)atomic_cas_acq(p, 0, 0, sz)); \ +} \ + \ +static __inline vtype \ +atomic_readandclear_ ## name(volatile ptype p) \ +{ \ + return ((vtype)atomic_load_clear(p, sz)); \ +} \ + \ +static __inline void \ +atomic_set_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op(p, |, v, sz); \ +} \ +static __inline void \ +atomic_set_acq_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_acq(p, |, v, sz); \ +} \ +static __inline void \ +atomic_set_rel_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_rel(p, |, v, sz); \ +} \ + \ +static __inline void \ +atomic_subtract_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op(p, -, v, sz); \ +} \ +static __inline void \ +atomic_subtract_acq_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_acq(p, -, v, sz); \ +} \ +static __inline void \ +atomic_subtract_rel_ ## name(volatile ptype p, atype v) \ +{ \ + atomic_op_rel(p, -, v, sz); \ +} \ + \ +static __inline void \ +atomic_store_rel_ ## name(volatile ptype p, vtype v) \ +{ \ + atomic_store_rel(p, v, sz); \ +} + +ATOMIC_GEN(int, int *, int, int, 32); +ATOMIC_GEN(32, int *, int, int, 32); + +ATOMIC_GEN(long, long *, long, long, 64); +ATOMIC_GEN(64, long *, long, long, 64); + +ATOMIC_GEN(ptr, void *, void *, uintptr_t, 64); + +#undef ATOMIC_GEN +#undef atomic_cas_32 +#undef atomic_cas_64 +#undef atomic_cas +#undef atomic_cas_acq +#undef atomic_cas_rel +#undef atomic_op +#undef atomic_op_acq +#undef atomic_op_rel +#undef atomic_load_acq +#undef atomic_store_rel +#undef atomic_load_clear #endif /* !_MACHINE_ATOMIC_H_ */ diff --git a/sys/sparc64/include/bootinfo.h b/sys/sparc64/include/bootinfo.h new file mode 100644 index 000000000000..7da2226d23b0 --- /dev/null +++ b/sys/sparc64/include/bootinfo.h @@ -0,0 +1,44 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_BOOTINFO_H_ +#define _MACHINE_BOOTINFO_H_ + +/* + * Increment the version number when you break binary compatibiity. + */ +#define BOOTINFO_VERSION 1 + +struct bootinfo { + u_int bi_version; + u_long bi_end; + u_long bi_kpa; + u_long bi_metadata; +}; + +#endif /* !_MACHINE_BOOTINFO_H_ */ diff --git a/sys/sparc64/include/cpu.h b/sys/sparc64/include/cpu.h index a6c206a3c13d..83d34c3dd90d 100644 --- a/sys/sparc64/include/cpu.h +++ b/sys/sparc64/include/cpu.h @@ -32,10 +32,13 @@ #include <machine/frame.h> #define CLKF_USERMODE(cfp) (0) -#define CLKF_PC(cfp) (0) +#define CLKF_PC(cfp) ((cfp)->cf_tf.tf_tpc) -#define cpu_getstack(p) (0) -#define cpu_setstack(p, sp) (0) +#define TRAPF_PC(tfp) ((tfp)->tf_tpc) +#define TRAPF_USERMODE(tfp) (0) + +#define cpu_getstack(p) ((p)->p_frame->tf_sp) +#define cpu_setstack(p, sp) ((p)->p_frame->tf_sp = (sp)) /* * Arrange to handle pending profiling ticks before returning to user mode. @@ -66,11 +69,13 @@ { "wall_cmos_clock", CTLTYPE_INT }, \ } +void fork_trampoline(void); + static __inline u_int64_t get_cyclecount(void) { - static u_long now; - return (++now); + + return (rd(tick)); } #endif /* !_MACHINE_CPU_H_ */ diff --git a/sys/sparc64/include/cpufunc.h b/sys/sparc64/include/cpufunc.h index 57cf2ab2e19c..a3e5a6bb4a69 100644 --- a/sys/sparc64/include/cpufunc.h +++ b/sys/sparc64/include/cpufunc.h @@ -29,15 +29,139 @@ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ +#include <machine/asi.h> +#include <machine/pstate.h> + +/* + * membar operand macros for use in other macros when # is a special + * character. Keep these in sync with what the hardware expects. + */ +#define C_Lookaside (0) +#define C_MemIssue (1) +#define C_Sync (2) +#define M_LoadLoad (0) +#define M_StoreLoad (1) +#define M_LoadStore (2) +#define M_StoreStore (3) + +#define CMASK_SHIFT (4) +#define MMASK_SHIFT (0) + +#define CMASK_GEN(bit) ((1 << (bit)) << CMASK_SHIFT) +#define MMASK_GEN(bit) ((1 << (bit)) << MMASK_SHIFT) + +#define Lookaside CMASK_GEN(C_Lookaside) +#define MemIssue CMASK_GEN(C_MemIssue) +#define Sync CMASK_GEN(C_Sync) +#define LoadLoad MMASK_GEN(M_LoadLoad) +#define StoreLoad MMASK_GEN(M_StoreLoad) +#define LoadStore MMASK_GEN(M_LoadStore) +#define StoreStore MMASK_GEN(M_StoreStore) + +#define casa(rs1, rs2, rd, asi) ({ \ + u_int __rd = (u_int32_t)(rd); \ + __asm __volatile("casa [%1] %2, %3, %0" \ + : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \ + __rd; \ +}) + +#define casxa(rs1, rs2, rd, asi) ({ \ + u_long __rd = (u_int64_t)(rd); \ + __asm __volatile("casxa [%1] %2, %3, %0" \ + : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \ + __rd; \ +}) + +#define flush(va) do { \ + __asm __volatile("flush %0" : : "r" (va)); \ +} while (0) + +#define ldxa(va, asi) ({ \ + u_long __r; \ + __asm __volatile("ldxa [%1] %2, %0" \ + : "=r" (__r) : "r" (va), "n" (asi)); \ + __r; \ +}) + +#define stxa(va, asi, val) do { \ + __asm __volatile("stxa %0, [%1] %2" \ + : : "r" (val), "r" (va), "n" (asi)); \ +} while (0) + +#define membar(mask) do { \ + __asm __volatile("membar %0" : : "n" (mask)); \ +} while (0) + +#define rd(name) ({ \ + u_int64_t __sr; \ + __asm __volatile("rd %%" #name ", %0" : "=r" (__sr) :); \ + __sr; \ +}) + +#define wr(name, val, xor) do { \ + __asm __volatile("wr %0, %1, %%" #name \ + : : "r" (val), "rI" (xor)); \ +} while (0) + +#define rdpr(name) ({ \ + u_int64_t __pr; \ + __asm __volatile("rdpr %%" #name", %0" : "=r" (__pr) :); \ + __pr; \ +}) + +#define wrpr(name, val, xor) do { \ + __asm __volatile("wrpr %0, %1, %%" #name \ + : : "r" (val), "rI" (xor)); \ +} while (0) + +static __inline void +breakpoint(void) +{ + __asm __volatile("ta 1"); +} + +/* + * XXX use %pil for these. + */ static __inline critical_t critical_enter(void) { - return (0); + critical_t ie; + + ie = rdpr(pstate); + if (ie & PSTATE_IE) + wrpr(pstate, ie, PSTATE_IE); + return (ie); } static __inline void critical_exit(critical_t ie) { + + if (ie & PSTATE_IE) + wrpr(pstate, ie, 0); +} + +#if 0 +#define HAVE_INLINE_FFS +/* + * See page 202 of the SPARC v9 Architecture Manual. + */ +static __inline int +ffs(int mask) +{ + int result; + int neg; + int tmp; + + __asm __volatile( + " neg %3, %1 ; " + " xnor %3, %1, %2 ; " + " popc %2, %0 ; " + " movrz %3, %%g0, %0 ; " + : "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask)); + return (result); } +#endif #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/sparc64/include/db_machdep.h b/sys/sparc64/include/db_machdep.h index 5d091ee09e48..0e4954e6a3e7 100644 --- a/sys/sparc64/include/db_machdep.h +++ b/sys/sparc64/include/db_machdep.h @@ -29,4 +29,48 @@ #ifndef _MACHINE_DB_MACHDEP_H_ #define _MACHINE_DB_MACHDEP_H_ +#include <machine/frame.h> +#include <machine/trap.h> + +#define BYTE_MSF (1) + +typedef vm_offset_t db_addr_t; +typedef u_long db_expr_t; + +struct db_regs { + u_long dr_global[8]; +}; + +typedef struct trapframe db_regs_t; +extern db_regs_t ddb_regs; +#define DDB_REGS (&ddb_regs) + +#define PC_REGS(regs) ((db_addr_t)(regs)->tf_tpc) + +#define BKPT_INST (0) +#define BKPT_SIZE (4) +#define BKPT_SET(inst) (BKPT_INST) + +#define FIXUP_PC_AFTER_BREAK do { \ + ddb_regs.tf_tpc = ddb_regs.tf_tnpc; \ + ddb_regs.tf_tnpc += BKPT_SIZE; \ +} while (0); + +#define db_clear_single_step(regs) +#define db_set_single_step(regs) + +#define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT) +#define IS_WATCHPOINT_TRAP(type, code) (0) + +#define inst_trap_return(ins) (0) +#define inst_return(ins) (0) +#define inst_call(ins) (0) +#define inst_load(ins) (0) +#define inst_store(ins) (0) + +#define DB_SMALL_VALUE_MAX (0x7fffffff) +#define DB_SMALL_VALUE_MIN (-0x40001) + +#define DB_ELFSIZE 64 + #endif /* !_MACHINE_DB_MACHDEP_H_ */ diff --git a/sys/sparc64/include/frame.h b/sys/sparc64/include/frame.h index da06e3c79bff..76c58de290e7 100644 --- a/sys/sparc64/include/frame.h +++ b/sys/sparc64/include/frame.h @@ -29,10 +29,47 @@ #ifndef _MACHINE_FRAME_H_ #define _MACHINE_FRAME_H_ +#define SPOFF 2047 + +struct trapframe { + u_long tf_global[8]; + u_long tf_out[8]; + u_long tf_tnpc; + u_long tf_tpc; + u_long tf_tstate; + u_long tf_type; + void *tf_arg; +}; +#define tf_sp tf_out[6] + +struct mmuframe { + u_long mf_sfar; + u_long mf_sfsr; + u_long mf_tar; +}; + +struct kdbframe { + u_long kf_fp; + u_long kf_cfp; + u_long kf_canrestore; + u_long kf_cansave; + u_long kf_cleanwin; + u_long kf_cwp; + u_long kf_otherwin; +}; + struct clockframe { + struct trapframe cf_tf; }; -struct trapframe { +struct frame { + u_long f_local[8]; + u_long f_in[8]; + u_long f_pad[8]; }; +#define f_fp f_in[6] +#define f_pc f_in[7] + +int kdb_trap(struct trapframe *tf); #endif /* !_MACHINE_FRAME_H_ */ diff --git a/sys/sparc64/include/globaldata.h b/sys/sparc64/include/globaldata.h index 38b351ed90e6..7af33754327b 100644 --- a/sys/sparc64/include/globaldata.h +++ b/sys/sparc64/include/globaldata.h @@ -37,7 +37,7 @@ struct globaldata { SLIST_ENTRY(globaldata) gd_allcpu; struct pcb *gd_curpcb; struct proc *gd_curproc; - struct proc *gd_fpproc; + struct proc *gd_fpcurproc; struct proc *gd_idleproc; u_int gd_cpuid; u_int gd_other_cpus; diff --git a/sys/sparc64/include/mutex.h b/sys/sparc64/include/mutex.h index 9c6bc1a71136..dd337bef8c74 100644 --- a/sys/sparc64/include/mutex.h +++ b/sys/sparc64/include/mutex.h @@ -32,6 +32,8 @@ static __inline void mtx_intr_enable(struct mtx *mtx) { + + mtx->mtx_savecrit |= PSTATE_IE; } #endif /* !_MACHINE_MUTEX_H_ */ diff --git a/sys/sparc64/include/param.h b/sys/sparc64/include/param.h index c1641349d946..a54bb1bb6786 100644 --- a/sys/sparc64/include/param.h +++ b/sys/sparc64/include/param.h @@ -85,15 +85,25 @@ #define ALIGNBYTES _ALIGNBYTES #define ALIGN(p) _ALIGN(p) -#define PAGE_SHIFT 13 /* LOG2(PAGE_SIZE) */ -#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */ -#define PAGE_MASK (PAGE_SIZE-1) -#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t))) +#define PAGE_SHIFT_8K 13 +#define PAGE_SIZE_8K (1<<PAGE_SHIFT_8K) +#define PAGE_MASK_8K (PAGE_SIZE_8K-1) -#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t))) -#define PDRSHIFT 22 /* LOG2(NBPDR) */ -#define NBPDR (1<<PDRSHIFT) /* bytes/page dir */ -#define PDRMASK (NBPDR-1) +#define PAGE_SHIFT_64K 16 +#define PAGE_SIZE_64K (1<<PAGE_SHIFT_64K) +#define PAGE_MASK_64K (PAGE_SIZE_64K-1) + +#define PAGE_SHIFT_512K 19 +#define PAGE_SIZE_512K (1<<PAGE_SHIFT_512K) +#define PAGE_MASK_512K (PAGE_SIZE_512K-1) + +#define PAGE_SHIFT_4M 22 +#define PAGE_SIZE_4M (1<<PAGE_SHIFT_4M) +#define PAGE_MASK_4M (PAGE_SIZE_4M-1) + +#define PAGE_SHIFT PAGE_SHIFT_8K /* LOG2(PAGE_SIZE) */ +#define PAGE_SIZE PAGE_SIZE_8K /* bytes/page */ +#define PAGE_MASK PAGE_MASK_8K #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #define DEV_BSIZE (1<<DEV_BSHIFT) @@ -126,32 +136,24 @@ */ /* clicks to bytes */ -#define ctob(x) ((x)<<PAGE_SHIFT) +#define ctob(x) ((unsigned long)(x)<<PAGE_SHIFT) /* bytes to clicks */ -#define btoc(x) (((unsigned)(x)+PAGE_MASK)>>PAGE_SHIFT) +#define btoc(x) (((unsigned long)(x)+PAGE_MASK)>>PAGE_SHIFT) -/* - * btodb() is messy and perhaps slow because `bytes' may be an off_t. We - * want to shift an unsigned type to avoid sign extension and we don't - * want to widen `bytes' unnecessarily. Assume that the result fits in - * a daddr_t. - */ +/* bytes to disk blocks */ #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ - (sizeof (bytes) > sizeof(long) \ - ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ - : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) + (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT) +/* disk blocks to bytes */ #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ - ((off_t)(db) << DEV_BSHIFT) + (off_t)((unsigned long)(db) << DEV_BSHIFT) /* * Mach derived conversion macros */ -#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK) #define round_page(x) (((unsigned long)(x) + PAGE_MASK) & ~PAGE_MASK) -#define trunc_4mpage(x) ((unsigned long)(x) & ~PDRMASK) -#define round_4mpage(x) ((((unsigned long)(x)) + PDRMASK) & ~PDRMASK) +#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK) #define atop(x) ((unsigned long)(x) >> PAGE_SHIFT) #define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT) diff --git a/sys/sparc64/include/pcb.h b/sys/sparc64/include/pcb.h index 3a14b747018f..d5ad7ebdaeee 100644 --- a/sys/sparc64/include/pcb.h +++ b/sys/sparc64/include/pcb.h @@ -30,6 +30,8 @@ #define _MACHINE_PCB_H_ struct pcb { + u_long pcb_fp; + u_long pcb_pc; caddr_t pcb_onfault; }; @@ -37,7 +39,7 @@ struct md_coredump { }; #ifdef _KERNEL -void savectx(struct pcb *pcb); +int savectx(struct pcb *pcb); #endif #endif /* !_MACHINE_PCB_H_ */ diff --git a/sys/sparc64/include/pcpu.h b/sys/sparc64/include/pcpu.h index 38b351ed90e6..7af33754327b 100644 --- a/sys/sparc64/include/pcpu.h +++ b/sys/sparc64/include/pcpu.h @@ -37,7 +37,7 @@ struct globaldata { SLIST_ENTRY(globaldata) gd_allcpu; struct pcb *gd_curpcb; struct proc *gd_curproc; - struct proc *gd_fpproc; + struct proc *gd_fpcurproc; struct proc *gd_idleproc; u_int gd_cpuid; u_int gd_other_cpus; diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h index 87400079c713..1a57d0e14782 100644 --- a/sys/sparc64/include/pmap.h +++ b/sys/sparc64/include/pmap.h @@ -29,30 +29,36 @@ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ -struct md_page { -}; +#include <sys/kobj.h> +#include <machine/tte.h> -struct pmap { - struct pmap_statistics pm_stats; -}; +#define PMAP_CONTEXT_MAX 8192 -typedef struct pmap *pmap_t; +#define pmap_resident_count(pm) (pm->pm_stats.resident_count) -extern struct pmap __kernel_pmap; -#define kernel_pmap (&__kernel_pmap) +typedef struct pmap *pmap_t; -#define pmap_resident_count(pm) (pm->pm_stats.resident_count) +struct md_page { +}; -#ifdef _KERNEL +struct pmap { + struct stte pm_stte; + u_int pm_active; + u_int pm_context; + u_int pm_count; + struct pmap_statistics pm_stats; +}; +void pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva); vm_offset_t pmap_kextract(vm_offset_t va); extern vm_offset_t avail_start; extern vm_offset_t avail_end; +extern vm_offset_t clean_eva; +extern vm_offset_t clean_sva; +extern struct pmap *kernel_pmap; extern vm_offset_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -#endif - #endif /* !_MACHINE_PMAP_H_ */ diff --git a/sys/sparc64/include/proc.h b/sys/sparc64/include/proc.h index e3f6e4d85bf0..8537f4909c78 100644 --- a/sys/sparc64/include/proc.h +++ b/sys/sparc64/include/proc.h @@ -30,6 +30,7 @@ #define _MACHINE_PROC_H_ #include <machine/globals.h> +#include <machine/tte.h> struct mdproc { }; diff --git a/sys/sparc64/include/pstate.h b/sys/sparc64/include/pstate.h new file mode 100644 index 000000000000..8ff55f3fd597 --- /dev/null +++ b/sys/sparc64/include/pstate.h @@ -0,0 +1,79 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_PSTATE_H_ +#define _MACHINE_PSTATE_H_ + +#define PSTATE_AG (1<<0) +#define PSTATE_IE (1<<1) +#define PSTATE_PRIV (1<<2) +#define PSTATE_AM (1<<3) +#define PSTATE_PEF (1<<4) +#define PSTATE_RED (1<<5) + +#define PSTATE_MM_SHIFT (6) +#define PSTATE_MM_MASK ((1<<PSTATE_MM_SHIFT)|(1<<(PSTATE_MM_SHIFT+1))) +#define PSTATE_MM_TSO (0<<PSTATE_MM_SHIFT) +#define PSTATE_MM_PSO (1<<PSTATE_MM_SHIFT) +#define PSTATE_MM_RMO (2<<PSTATE_MM_SHIFT) + +#define PSTATE_TLE (1<<8) +#define PSTATE_CLE (1<<9) +#define PSTATE_MG (1<<10) +#define PSTATE_IG (1<<11) + +#define VER_MANUF_SHIFT (48) +#define VER_IMPL_SHIFT (32) +#define VER_MASK_SHIFT (24) +#define VER_MAXTL_SHIFT (8) +#define VER_MAXWIN_SHIFT (0) + +#define VER_MANUF_SIZE (16) +#define VER_IMPL_SIZE (16) +#define VER_MASK_SIZE (8) +#define VER_MAXTL_SIZE (8) +#define VER_MAXWIN_SIZE (5) + +#define VER_MANUF_MASK (((1L<<VER_MANUF_SIZE)-1)<<VER_MANUF_SHIFT) +#define VER_IMPL_MASK (((1L<<VER_IMPL_SIZE)-1)<<VER_IMPL_SHIFT) +#define VER_MASK_MASK (((1L<<VER_MASK_SIZE)-1)<<VER_MASK_SHIFT) +#define VER_MAXTL_MASK (((1L<<VER_MAXTL_SIZE)-1)<<VER_MAXTL_SHIFT) +#define VER_MAXWIN_MASK (((1L<<VER_MAXWIN_SIZE)-1)<<VER_MAXWIN_SHIFT) + +#define VER_MANUF(ver) \ + (((ver) & VER_MANUF_MASK) >> VER_MANUF_SHIFT) +#define VER_IMPL(ver) \ + (((ver) & VER_IMPL_MASK) >> VER_IMPL_SHIFT) +#define VER_MASK(ver) \ + (((ver) & VER_MASK_MASK) >> VER_MASK_SHIFT) +#define VER_MAXTL(ver) \ + (((ver) & VER_MAXTL_MASK) >> VER_MAXTL_SHIFT) +#define VER_MAXWIN(ver) \ + (((ver) & VER_MAXWIN_MASK) >> VER_MAXWIN_SHIFT) + +#endif /* !_MACHINE_PSTATE_H_ */ diff --git a/sys/sparc64/include/pv.h b/sys/sparc64/include/pv.h new file mode 100644 index 000000000000..e36d3f8d0c4c --- /dev/null +++ b/sys/sparc64/include/pv.h @@ -0,0 +1,175 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_PV_H_ +#define _MACHINE_PV_H_ + +#define PV_LOCK() +#define PV_UNLOCK() + +#define ST_TTE offsetof(struct stte, st_tte) +#define ST_NEXT offsetof(struct stte, st_next) +#define ST_PREV offsetof(struct stte, st_prev) + +#define TTE_DATA offsetof(struct tte, tte_data) +#define TTE_TAG offsetof(struct tte, tte_tag) + +#define PV_OFF(pa) ((vm_offset_t)(pa) - avail_start) +#define PV_INDEX(pa) (PV_OFF(pa) >> PAGE_SHIFT) +#define PV_SHIFT (3) + +#define casxp(pa, exp, src) \ + casxa((vm_offset_t *)pa, exp, src, ASI_PHYS_USE_EC) +#define ldxp(pa) ldxa(pa, ASI_PHYS_USE_EC) +#define stxp(pa, val) stxa(pa, ASI_PHYS_USE_EC, val) + +extern vm_offset_t pv_table; +extern u_long pv_generation; + +static __inline vm_offset_t +pv_lookup(vm_offset_t pa) +{ + return (pv_table + (PV_INDEX(pa) << PV_SHIFT)); +} + +static __inline vm_offset_t +pv_get_first(vm_offset_t pvh) +{ + return (ldxp(pvh)); +} + +static __inline vm_offset_t +pv_get_next(vm_offset_t pstp) +{ + return (ldxp(pstp + ST_NEXT)); +} + +static __inline vm_offset_t +pv_get_prev(vm_offset_t pstp) +{ + return (ldxp(pstp + ST_PREV)); +} + +static __inline u_long +pv_get_tte_data(vm_offset_t pstp) +{ + return (ldxp(pstp + ST_TTE + TTE_DATA)); +} + +static __inline u_long +pv_get_tte_tag(vm_offset_t pstp) +{ + return (ldxp(pstp + ST_TTE + TTE_TAG)); +} + +#define pv_get_tte(pstp) ({ \ + struct tte __tte; \ + __tte.tte_tag = pv_get_tte_tag(pstp); \ + __tte.tte_data = pv_get_tte_data(pstp); \ + __tte; \ +}) + +static __inline void +pv_set_first(vm_offset_t pvh, vm_offset_t first) +{ + stxp(pvh, first); +} + +static __inline void +pv_set_next(vm_offset_t pstp, vm_offset_t next) +{ + stxp(pstp + ST_NEXT, next); +} + +static __inline void +pv_set_prev(vm_offset_t pstp, vm_offset_t prev) +{ + stxp(pstp + ST_PREV, prev); +} + +static __inline void +pv_remove_phys(vm_offset_t pstp) +{ + vm_offset_t pv_next; + vm_offset_t pv_prev; + + pv_next = pv_get_next(pstp); + pv_prev = pv_get_prev(pstp); + if (pv_next != 0) + pv_set_prev(pv_next, pv_prev); + stxp(pv_prev, pv_next); +} + +static __inline void +pv_bit_clear(vm_offset_t pstp, u_long bits) +{ + vm_offset_t dp; + vm_offset_t d1; + vm_offset_t d2; + vm_offset_t d3; + + dp = pstp + ST_TTE + TTE_DATA; + for (d1 = ldxp(dp);; d1 = d3) { + d2 = d1 & ~bits; + d3 = casxp(dp, d1, d2); + if (d1 == d3) + break; + } +} + +static __inline void +pv_bit_set(vm_offset_t pstp, u_long bits) +{ + vm_offset_t dp; + vm_offset_t d1; + vm_offset_t d2; + vm_offset_t d3; + + dp = pstp + ST_TTE + TTE_DATA; + for (d1 = ldxp(dp);; d1 = d3) { + d2 = d1 | bits; + d3 = casxp(dp, d1, d2); + if (d1 == d3) + break; + } +} + +static __inline int +pv_bit_test(vm_offset_t pstp, u_long bits) +{ + vm_offset_t dp; + + dp = pstp + ST_TTE + TTE_DATA; + return ((casxp(dp, 0, 0) & bits) != 0); +} + +void pv_dump(vm_offset_t pvh); +void pv_insert(pmap_t pm, vm_offset_t pa, vm_offset_t va, struct stte *stp); +void pv_remove_virt(struct stte *stp); + +#endif /* !_MACHINE_PV_H_ */ diff --git a/sys/sparc64/include/resource.h b/sys/sparc64/include/resource.h index 28fcc98ea1be..783a1c468a38 100644 --- a/sys/sparc64/include/resource.h +++ b/sys/sparc64/include/resource.h @@ -1,4 +1,3 @@ -/* $FreeBSD$ */ /* * Copyright 1998 Massachusetts Institute of Technology * @@ -26,6 +25,8 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $FreeBSD$ */ #ifndef _MACHINE_RESOURCE_H_ diff --git a/sys/sparc64/include/setjmp.h b/sys/sparc64/include/setjmp.h new file mode 100644 index 000000000000..a6a7ffb59c0e --- /dev/null +++ b/sys/sparc64/include/setjmp.h @@ -0,0 +1,55 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_SETJMP_H_ +#define _MACHINE_SETJMP_H_ + +#define _JBLEN 3 + +#define _JB_FP 0 +#define _JB_PC 1 +#define _JB_SP 2 + +/* + * jmp_buf and sigjmp_buf are encapsulated in different structs to force + * compile-time diagnostics for mismatches. The structs are the same + * internally to avoid some run-time errors for mismatches. + */ +#ifndef _ANSI_SOURCE +struct _sigjmp_buf { + long _sjb[_JBLEN + 1]; +}; +typedef struct _sigjmp_buf sigjmp_buf[1]; +#endif + +struct _jmp_buf { + long _jb[_JBLEN + 1]; +}; +typedef struct _jmp_buf jmp_buf[1]; + +#endif /* !_MACHINE_SETJMP_H_ */ diff --git a/sys/sparc64/include/stdarg.h b/sys/sparc64/include/stdarg.h index a4b2550f85d7..2f0fd8cadfea 100644 --- a/sys/sparc64/include/stdarg.h +++ b/sys/sparc64/include/stdarg.h @@ -33,6 +33,7 @@ * SUCH DAMAGE. * * from: @(#)stdarg.h 8.2 (Berkeley) 9/27/93 + * from: NetBSD: stdarg.h,v 1.11 2000/07/23 21:36:56 mycroft Exp * $FreeBSD$ */ diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h new file mode 100644 index 000000000000..d72303285d93 --- /dev/null +++ b/sys/sparc64/include/tlb.h @@ -0,0 +1,149 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_TLB_H_ +#define _MACHINE_TLB_H_ + +#define TLB_SLOT_COUNT 64 + +#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */ +#define TLB_SLOT_TSB_USER_PRIMARY 61 +#define TLB_SLOT_TSB_USER_SECONDARY 62 +#define TLB_SLOT_KERNEL 63 + +#define TLB_DAR_SLOT_SHIFT (3) +#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT) + +#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK) +#define TLB_TAR_CTX(ctx) (ctx) + +#define TLB_DEMAP_ID_SHIFT (4) +#define TLB_DEMAP_ID_PRIMARY (0) +#define TLB_DEMAP_ID_SECONDARY (1) +#define TLB_DEMAP_ID_NUCLEUS (2) + +#define TLB_DEMAP_TYPE_SHIFT (6) +#define TLB_DEMAP_TYPE_PAGE (0) +#define TLB_DEMAP_TYPE_CONTEXT (1) + +#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK) +#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT) +#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT) + +#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE)) +#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT)) + +#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY)) +#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY)) +#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS)) + +#define TLB_CTX_KERNEL (0) + +#define TLB_DTLB (1 << 0) +#define TLB_ITLB (1 << 1) + +static __inline void +tlb_dtlb_page_demap(u_int ctx, vm_offset_t va) +{ + if (ctx == TLB_CTX_KERNEL) { + stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, + ASI_DMMU_DEMAP, 0); + membar(Sync); + } else + TODO; +} + +static __inline void +tlb_dtlb_store(vm_offset_t va, struct tte tte) +{ + stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0)); + stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data); + membar(Sync); +} + +static __inline void +tlb_dtlb_store_slot(vm_offset_t va, struct tte tte, int slot) +{ + stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0)); + stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data); + membar(Sync); +} + +static __inline void +tlb_itlb_page_demap(u_int ctx, vm_offset_t va) +{ + if (ctx == TLB_CTX_KERNEL) { + stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, + ASI_IMMU_DEMAP, 0); + flush(KERNBASE); + } else + TODO; +} + +static __inline void +tlb_itlb_store(vm_offset_t va, struct tte tte) +{ + TODO; +} + +static __inline void +tlb_itlb_store_slot(vm_offset_t va, struct tte tte, int slot) +{ + stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0)); + stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data); + flush(va); +} + +static __inline void +tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va) +{ + if (tlb & TLB_DTLB) + tlb_dtlb_page_demap(ctx, va); + if (tlb & TLB_ITLB) + tlb_itlb_page_demap(ctx, va); +} + +static __inline void +tlb_store(u_int tlb, vm_offset_t va, struct tte tte) +{ + if (tlb & TLB_DTLB) + tlb_dtlb_store(va, tte); + if (tlb & TLB_ITLB) + tlb_itlb_store(va, tte); +} + +static __inline void +tlb_store_slot(u_int tlb, vm_offset_t va, struct tte tte, int slot) +{ + if (tlb & TLB_DTLB) + tlb_dtlb_store_slot(va, tte, slot); + if (tlb & TLB_ITLB) + tlb_itlb_store_slot(va, tte, slot); +} + +#endif /* !_MACHINE_TLB_H_ */ diff --git a/sys/sparc64/include/trap.h b/sys/sparc64/include/trap.h new file mode 100644 index 000000000000..4773592c7794 --- /dev/null +++ b/sys/sparc64/include/trap.h @@ -0,0 +1,70 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_TRAP_H_ +#define _MACHINE_TRAP_H_ + +#define T_RESERVED 0x0 +#define T_POWER_ON 0x1 +#define T_WATCHDOG 0x2 +#define T_RESET_EXT 0x3 +#define T_RESET_SOFT 0x4 +#define T_RED_STATE 0x5 +#define T_INSN_EXCPTN 0x6 +#define T_INSN_ERROR 0x7 +#define T_INSN_ILLEGAL 0x8 +#define T_PRIV_OPCODE 0x9 +#define T_FP_DISABLED 0xa +#define T_FP_IEEE 0xb +#define T_FP_OTHER 0xc +#define T_TAG_OVFLW 0xd +#define T_DIVIDE 0xe +#define T_DATA_EXCPTN 0xf +#define T_DATA_ERROR 0x10 +#define T_ALIGN 0x11 +#define T_ALIGN_LDDF 0x12 +#define T_ALIGN_STDF 0x13 +#define T_PRIV_ACTION 0x14 +#define T_INTERRUPT 0x15 +#define T_WATCH_PHYS 0x16 +#define T_WATCH_VIRT 0x17 +#define T_ECC 0x18 +#define T_IMMU_MISS 0x19 +#define T_DMMU_MISS 0x1a +#define T_DMMU_PROT 0x1b +#define T_SPILL 0x1c +#define T_FILL 0x1d +#define T_BREAKPOINT 0x1e + +#define T_KERNEL 0x20 + +#ifndef LOCORE +extern const char *trap_msg[]; +#endif + +#endif /* !_MACHINE_TRAP_H_ */ diff --git a/sys/sparc64/include/tsb.h b/sys/sparc64/include/tsb.h new file mode 100644 index 000000000000..5bc46cf8e020 --- /dev/null +++ b/sys/sparc64/include/tsb.h @@ -0,0 +1,220 @@ +/*- + * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: BSDI: pmap.v9.h,v 1.10.2.6 1999/08/23 22:18:44 cp Exp + * $FreeBSD$ + */ + +#ifndef _MACHINE_TSB_H_ +#define _MACHINE_TSB_H_ + +#define TSB_KERNEL_MIN_ADDRESS (0x6e000000000) +#define TSB_USER_MIN_ADDRESS (0x6f000000000) + +#define TSB_MASK_WIDTH (6) + +#define TSB_PRIMARY_BUCKET_SHIFT (2) +#define TSB_PRIMARY_BUCKET_SIZE (1 << TSB_PRIMARY_BUCKET_SHIFT) +#define TSB_PRIMARY_BUCKET_MASK (TSB_PRIMARY_BUCKET_SIZE - 1) +#define TSB_SECONDARY_BUCKET_SHIFT (3) +#define TSB_SECONDARY_BUCKET_SIZE (1 << TSB_SECONDARY_BUCKET_SHIFT) +#define TSB_SECONDARY_BUCKET_MASK (TSB_SECONDARY_BUCKET_SIZE - 1) + +#define TSB_SECONDARY_STTE_SHIFT \ + (STTE_SHIFT + TSB_SECONDARY_BUCKET_SHIFT) +#define TSB_SECONDARY_STTE_MASK (1 << TSB_SECONDARY_STTE_SHIFT) + +#define TSB_LEVEL1_BUCKET_MASK \ + ((TSB_SECONDARY_BUCKET_MASK & ~TSB_PRIMARY_BUCKET_MASK) << \ + (PAGE_SHIFT - TSB_PRIMARY_BUCKET_SHIFT)) +#define TSB_LEVEL1_BUCKET_SHIFT \ + (TSB_BUCKET_SPREAD_SHIFT + \ + (TSB_SECONDARY_BUCKET_SHIFT - TSB_PRIMARY_BUCKET_SHIFT)) + +#define TSB_BUCKET_SPREAD_SHIFT (2) + +#define TSB_DEPTH (7) + +#define TSB_KERNEL_PAGES (1) +#define TSB_KERNEL_SIZE (TSB_KERNEL_PAGES * PAGE_SIZE_4M) +#define TSB_KERNEL_MB (512) +#define TSB_KERNEL_VM_RANGE (TSB_KERNEL_MB * (1 << 20)) +#define TSB_KERNEL_RANGE \ + ((TSB_KERNEL_VM_RANGE / PAGE_SIZE) * sizeof (struct stte)) +#define TSB_KERNEL_MASK \ + ((TSB_KERNEL_RANGE / sizeof (struct stte)) - 1) + +#define TSB_1M_STTE_SHIFT (21) +#define TSB_1M_STTE_SIZE (1 << TSB_1M_SHIFT) + +#define TSB_SIZE_REG (7) + +extern vm_offset_t tsb_kernel_phys; + +static __inline struct stte * +tsb_base(u_int level) +{ + vm_offset_t base; + size_t len; + + if (level == 0) + base = TSB_USER_MIN_ADDRESS; + else { + len = 1UL << ((level * TSB_BUCKET_SPREAD_SHIFT) + + TSB_MASK_WIDTH + TSB_SECONDARY_BUCKET_SHIFT + + STTE_SHIFT); + base = TSB_USER_MIN_ADDRESS + len; + } + return (struct stte *)base; +} + +static __inline u_long +tsb_bucket_shift(u_int level) +{ + return (level == 0 ? + TSB_PRIMARY_BUCKET_SHIFT : TSB_SECONDARY_BUCKET_SHIFT); +} + +static __inline u_long +tsb_bucket_size(u_int level) +{ + return (1UL << tsb_bucket_shift(level)); +} + +static __inline u_long +tsb_bucket_mask(u_int level) +{ + return (tsb_bucket_size(level) - 1); +} + +static __inline u_long +tsb_mask_width(u_int level) +{ + return ((level * TSB_BUCKET_SPREAD_SHIFT) + TSB_MASK_WIDTH); +} + +static __inline u_long +tsb_mask(u_int level) +{ + return ((1UL << tsb_mask_width(level)) - 1); +} + +static __inline u_int +tsb_tlb_slot(u_int level) +{ + return (level == 0 ? + TLB_SLOT_TSB_USER_PRIMARY : TLB_SLOT_TSB_USER_SECONDARY); +} + +static __inline vm_offset_t +tsb_stte_vtophys(pmap_t pm, struct stte *stp) +{ + vm_offset_t va; + u_long data; + + va = (vm_offset_t)stp; + if (pm == kernel_pmap) + return (tsb_kernel_phys + + ((va - TSB_KERNEL_MIN_ADDRESS) << STTE_SHIFT)); + + if (trunc_page(va) == TSB_USER_MIN_ADDRESS) + data = pm->pm_stte.st_tte.tte_data; + else + data = ldxa(TLB_DAR_SLOT(tsb_tlb_slot(1)), + ASI_DTLB_DATA_ACCESS_REG); + return ((vm_offset_t)((TD_PA(data)) + (va & PAGE_MASK))); +} + +static __inline struct stte * +tsb_vpntobucket(vm_offset_t vpn, u_int level) +{ + return (tsb_base(level) + + ((vpn & tsb_mask(level)) << tsb_bucket_shift(level))); +} + +static __inline struct stte * +tsb_vtobucket(vm_offset_t va, u_int level) +{ + return (tsb_vpntobucket(va >> PAGE_SHIFT, level)); +} + +static __inline struct stte * +tsb_kvpntostte(vm_offset_t vpn) +{ + struct stte *stp; + + stp = (struct stte *)(TSB_KERNEL_MIN_ADDRESS + + ((vpn & TSB_KERNEL_MASK) << STTE_SHIFT)); + return (stp); +} + +static __inline struct stte * +tsb_kvtostte(vm_offset_t va) +{ + return (tsb_kvpntostte(va >> PAGE_SHIFT)); +} + +static __inline void +tsb_tte_enter_kernel(vm_offset_t va, struct tte tte) +{ + struct stte *stp; + + stp = tsb_kvtostte(va); + stp->st_tte = tte; +#if 1 + pv_insert(kernel_pmap, TD_PA(tte.tte_data), va, stp); +#endif +} + +static __inline void +tsb_remove_kernel(vm_offset_t va) +{ + struct stte *stp; + + stp = tsb_kvtostte(va); + tte_invalidate(&stp->st_tte); +#if 1 + pv_remove_virt(stp); +#endif +} + +struct stte *tsb_get_bucket(pmap_t pm, u_int level, vm_offset_t va, + int allocate); +int tsb_miss(pmap_t pm, u_int type, struct mmuframe *mf); +struct tte tsb_page_alloc(pmap_t pm, vm_offset_t va); +void tsb_page_fault(pmap_t pm, int level, vm_offset_t va, struct stte *stp); +void tsb_page_init(void *va, int level); +struct stte *tsb_stte_lookup(pmap_t pm, vm_offset_t va); +struct stte *tsb_stte_promote(pmap_t pm, vm_offset_t va, struct stte *stp); +void tsb_stte_remove(struct stte *stp); +struct stte *tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte); +void tsb_tte_local_remove(struct tte *tp); + +extern vm_offset_t tsb_bootstrap_pages[]; +extern int tsb_bootstrap_index; + +#endif /* !_MACHINE_TSB_H_ */ diff --git a/sys/sparc64/include/tte.h b/sys/sparc64/include/tte.h new file mode 100644 index 000000000000..f938560881f5 --- /dev/null +++ b/sys/sparc64/include/tte.h @@ -0,0 +1,146 @@ +/*- + * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: BSDI: pmap.v9.h,v 1.10.2.6 1999/08/23 22:18:44 cp Exp + * $FreeBSD$ + */ + +#ifndef _MACHINE_TTE_H_ +#define _MACHINE_TTE_H_ + +#include <machine/atomic.h> + +#define TTE_SHIFT 4 +#define STTE_SHIFT 5 + +#define TT_CTX_SHIFT (48) +#define TT_VA_SHIFT (22) +#define TT_VPN_SHIFT (9) + +#define TT_CTX_SIZE (13) +#define TT_VA_SIZE (42) + +#define TT_CTX_MASK ((1L << TT_CTX_SIZE) - 1) +#define TT_VA_MASK ((1L << TT_VA_SIZE) - 1) + +#define TT_G (1L << 63) +#define TT_CTX(ctx) (((u_long)(ctx) << TT_CTX_SHIFT) & TT_CTX_MASK) +#define TT_VA(va) (((u_long)(va) >> TT_VA_SHIFT) & TT_VA_MASK) + +#define TD_SIZE_SHIFT (61) +#define TD_SOFT2_SHIFT (50) +#define TD_DIAG_SHIFT (41) +#define TD_PA_SHIFT (13) +#define TD_SOFT_SHIFT (7) + +#define TD_SIZE_SIZE (2) +#define TD_SOFT2_SIZE (9) +#define TD_DIAG_SIZE (9) +#define TD_PA_SIZE (28) +#define TD_SOFT_SIZE (6) + +#define TD_SIZE_MASK (((1L << TD_SIZE_SIZE) - 1) << TD_SIZE_SHIFT) +#define TD_SOFT2_MASK (((1L << TD_SOFT2_SIZE) - 1) << TD_SOFT2_SHIFT) +#define TD_DIAG_MASK (((1L << TD_DIAG_SIZE) - 1) << TD_DIAG_SHIFT) +#define TD_PA_MASK (((1L << TD_PA_SIZE) - 1) << TD_PA_SHIFT) +#define TD_SOFT_MASK (((1L << TD_SOFT_SIZE) - 1) << TD_SOFT_SHIFT) + +#define TD_VA_LOW_SHIFT TD_SOFT2_SHIFT +#define TD_VA_LOW_MASK TD_SOFT2_MASK + +#define TS_EXEC (1L << 3) +#define TS_MOD (1L << 2) +#define TS_REF (1L << 1) +#define TS_TSB (1L << 0) + +#define TD_V (1L << 63) +#define TD_8K (0L << TD_SIZE_SHIFT) +#define TD_64K (1L << TD_SIZE_SHIFT) +#define TD_512K (2L << TD_SIZE_SHIFT) +#define TD_4M (3L << TD_SIZE_SHIFT) +#define TD_NFO (1L << 60) +#define TD_IE (1L << 59) +#define TD_VPN_LOW(vpn) ((vpn << TD_SOFT2_SHIFT) & TD_SOFT2_MASK) +#define TD_VA_LOW(va) (TD_VPN_LOW((va) >> PAGE_SHIFT)) +#define TD_PA(pa) ((pa) & TD_PA_MASK) +#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT) +#define TD_MOD (TS_MOD << TD_SOFT_SHIFT) +#define TD_REF (TS_REF << TD_SOFT_SHIFT) +#define TD_TSB (TS_TSB << TD_SOFT_SHIFT) +#define TD_L (1L << 6) +#define TD_CP (1L << 5) +#define TD_CV (1L << 4) +#define TD_E (1L << 3) +#define TD_P (1L << 2) +#define TD_W (1L << 1) +#define TD_G (1L << 0) + +struct tte { + u_long tte_tag; + u_long tte_data; +}; + +struct stte { + struct tte st_tte; + vm_offset_t st_next; + vm_offset_t st_prev; +}; + +static __inline u_int +tte_get_ctx(struct tte tte) +{ + return ((tte.tte_tag & TT_CTX_MASK) >> TT_CTX_SHIFT); +} + +static __inline vm_offset_t +tte_get_vpn(struct tte tte) +{ + return (((tte.tte_tag & TT_VA_MASK) << TT_VPN_SHIFT) | + ((tte.tte_data & TD_VA_LOW_MASK) >> TD_VA_LOW_SHIFT)); +} + +static __inline vm_offset_t +tte_get_va(struct tte tte) +{ + return (tte_get_vpn(tte) << PAGE_SHIFT); +} + +static __inline void +tte_invalidate(struct tte *tp) +{ + atomic_clear_long(&tp->tte_data, TD_V); +} + +static __inline int +tte_match(struct tte tte, vm_offset_t va) +{ + return ((tte.tte_data & TD_V) != 0 && + ((tte.tte_tag ^ TT_VA(va)) & TT_VA_MASK) == 0 && + ((tte.tte_data ^ TD_VA_LOW(va)) & TD_VA_LOW_MASK) == 0); +} + +#endif /* !_MACHINE_TTE_H_ */ diff --git a/sys/sparc64/include/vmparam.h b/sys/sparc64/include/vmparam.h index 81858cc4fdad..677b4f2729fe 100644 --- a/sys/sparc64/include/vmparam.h +++ b/sys/sparc64/include/vmparam.h @@ -62,23 +62,23 @@ */ #define MAXSLP 20 -#define VM_MAXUSER_ADDRESS 0 +#define VM_MAXUSER_ADDRESS (0x5ffffffffff) #define USRSTACK VM_MAXUSER_ADDRESS -#define VM_MIN_ADDRESS 0 +#define VM_MIN_ADDRESS (0) /* * Virtual size (bytes) for various kernel submaps. */ #ifndef VM_KMEM_SIZE -#define VM_KMEM_SIZE (12*1024*1024) +#define VM_KMEM_SIZE (12*1024*1024) #endif -#define VM_MIN_KERNEL_ADDRESS (0) -#define VM_MAX_KERNEL_ADDRESS (0) +#define VM_MIN_KERNEL_ADDRESS (0x60000000000) +#define VM_MAX_KERNEL_ADDRESS (0x6e000000000) -#define KERNBASE (0) +#define KERNBASE (0x60000000000) /* * Initial pagein size of beginning of executable file. |