aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64
diff options
context:
space:
mode:
authorJung-uk Kim <jkim@FreeBSD.org>2014-03-04 20:16:00 +0000
committerJung-uk Kim <jkim@FreeBSD.org>2014-03-04 20:16:00 +0000
commitbe2d4fcf68f8340a22b567060fc308d793805edd (patch)
treecadc2eacdbbc40f461f8b3ed243fd2114d10e48b /sys/amd64
parent12a5ad4762969252c0fc10dbe9290d4bea509704 (diff)
downloadsrc-be2d4fcf68f8340a22b567060fc308d793805edd.tar.gz
src-be2d4fcf68f8340a22b567060fc308d793805edd.zip
Revert accidentally committed changes in 262748.
Notes
Notes: svn path=/head/; revision=262750
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/mpboot.S40
-rw-r--r--sys/amd64/amd64/pmap.c38
2 files changed, 6 insertions, 72 deletions
diff --git a/sys/amd64/amd64/mpboot.S b/sys/amd64/amd64/mpboot.S
index 1fd56e181e9a..ec30c72ecb9b 100644
--- a/sys/amd64/amd64/mpboot.S
+++ b/sys/amd64/amd64/mpboot.S
@@ -36,7 +36,6 @@
.p2align 4,0
.globl mptramp_start
mptramp_start:
-#ifndef __clang__
.code16
/*
* The AP enters here in response to the startup IPI.
@@ -66,43 +65,6 @@ mptramp_start:
/* Enable protected mode */
movl $CR0_PE, %eax
mov %eax, %cr0
-#else
- /*
- * The AP enters here in response to the startup IPI.
- * We are in real mode. %cs is the only segment register set.
- */
- cli /* make sure no interrupts */
- mov %cs, %eax /* copy %cs to %ds. Remember these */
- mov %eax, %ds /* are offsets rather than selectors */
- mov %eax, %ss
-
- /*
- * Find relocation base and patch the gdt descript and ljmp targets
- */
- .byte 0x66
- xorl %ebx, %ebx
- mov %cs, %ebx
- .byte 0x66
- sall $4, %ebx /* %ebx is now our relocation base */
- .byte 0x66, 0x09, 0x1e
- .word lgdt_desc-mptramp_start+2
- .byte 0x66, 0x09, 0x1e
- .word jmp_32-mptramp_start+2
- .byte 0x66, 0x09, 0x1e
- .word jmp_64-mptramp_start+1
-
- /*
- * Load the descriptor table pointer. We'll need it when running
- * in 16 bit protected mode.
- */
- .byte 0x0f, 0x01, 0x16
- .word lgdt_desc-mptramp_start
-
- /* Enable protected mode */
- .byte 0x66
- movl $CR0_PE, %eax
- mov %eax, %cr0
-#endif
/*
* Now execute a far jump to turn on protected mode. This
@@ -126,7 +88,7 @@ jmp_32:
.code32
protmode:
mov $bootdata-gdt, %eax
- mov %eax, %ds
+ mov %ax, %ds
/* Turn on the PAE, PSE and PGE bits for when paging is enabled */
mov %cr4, %eax
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 98fc57fc74bc..10490df6fbd4 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -146,13 +146,6 @@ __FBSDID("$FreeBSD$");
#endif
static __inline boolean_t
-pmap_type_guest(pmap_t pmap)
-{
-
- return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
-}
-
-static __inline boolean_t
pmap_emulate_ad_bits(pmap_t pmap)
{
@@ -166,7 +159,6 @@ pmap_valid_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
- case PT_RVI:
mask = X86_PG_V;
break;
case PT_EPT:
@@ -189,7 +181,6 @@ pmap_rw_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
- case PT_RVI:
mask = X86_PG_RW;
break;
case PT_EPT:
@@ -214,7 +205,6 @@ pmap_global_bit(pmap_t pmap)
case PT_X86:
mask = X86_PG_G;
break;
- case PT_RVI:
case PT_EPT:
mask = 0;
break;
@@ -232,7 +222,6 @@ pmap_accessed_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
- case PT_RVI:
mask = X86_PG_A;
break;
case PT_EPT:
@@ -255,7 +244,6 @@ pmap_modified_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
- case PT_RVI:
mask = X86_PG_M;
break;
case PT_EPT:
@@ -1114,9 +1102,6 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
if ((entry & x86_pat_bits) != 0)
entry ^= x86_pat_bits;
break;
- case PT_RVI:
- /* XXX: PAT support. */
- break;
case PT_EPT:
/*
* Nothing to do - the memory attributes are represented
@@ -1160,11 +1145,6 @@ pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
cache_bits |= PG_NC_PWT;
break;
- case PT_RVI:
- /* XXX: PAT support. */
- cache_bits = 0;
- break;
-
case PT_EPT:
cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
break;
@@ -1185,10 +1165,6 @@ pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
case PT_X86:
mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
break;
- case PT_RVI:
- /* XXX: PAT support. */
- mask = 0;
- break;
case PT_EPT:
mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
break;
@@ -1213,7 +1189,6 @@ pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
switch (pmap->pm_type) {
case PT_X86:
break;
- case PT_RVI:
case PT_EPT:
/*
* XXX
@@ -1249,7 +1224,7 @@ pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
{
pt_entry_t PG_G;
- if (pmap_type_guest(pmap))
+ if (pmap->pm_type == PT_EPT)
return;
KASSERT(pmap->pm_type == PT_X86,
@@ -1363,7 +1338,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
cpuset_t other_cpus;
u_int cpuid;
- if (pmap_type_guest(pmap)) {
+ if (pmap->pm_type == PT_EPT) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1441,7 +1416,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t addr;
u_int cpuid;
- if (pmap_type_guest(pmap)) {
+ if (pmap->pm_type == PT_EPT) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1500,7 +1475,7 @@ pmap_invalidate_all(pmap_t pmap)
uint64_t cr3;
u_int cpuid;
- if (pmap_type_guest(pmap)) {
+ if (pmap->pm_type == PT_EPT) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1620,7 +1595,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- if (pmap == kernel_pmap || pmap_type_guest(pmap))
+ if (pmap == kernel_pmap || pmap->pm_type == PT_EPT)
active = all_cpus;
else {
active = pmap->pm_active;
@@ -1658,7 +1633,6 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invlpg(va);
break;
- case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
@@ -1678,7 +1652,6 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
break;
- case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
@@ -1696,7 +1669,6 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invltlb();
break;
- case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;