Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
matisse
android_kernel_samsung_matisse
Commits
71b264f8
Commit
71b264f8
authored
17 years ago
by
Tony Luck
Browse files
Options
Download
Plain Diff
Pull miscellaneous into release branch
Conflicts: arch/ia64/kernel/mca.c
parents
f4df39cb
072f042d
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
337 additions
and
111 deletions
+337
-111
arch/ia64/Kconfig
arch/ia64/Kconfig
+3
-0
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+39
-17
arch/ia64/kernel/asm-offsets.c
arch/ia64/kernel/asm-offsets.c
+7
-0
arch/ia64/kernel/crash.c
arch/ia64/kernel/crash.c
+36
-20
arch/ia64/kernel/fsys.S
arch/ia64/kernel/fsys.S
+30
-4
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+1
-1
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/kprobes.c
+107
-26
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca.c
+5
-6
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+2
-2
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+23
-0
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+1
-1
arch/ia64/kernel/unaligned.c
arch/ia64/kernel/unaligned.c
+2
-1
arch/ia64/mm/contig.c
arch/ia64/mm/contig.c
+1
-3
arch/ia64/mm/discontig.c
arch/ia64/mm/discontig.c
+1
-3
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+3
-11
arch/ia64/sn/kernel/xpc_main.c
arch/ia64/sn/kernel/xpc_main.c
+4
-4
arch/ia64/sn/kernel/xpc_partition.c
arch/ia64/sn/kernel/xpc_partition.c
+1
-1
include/asm-ia64/kprobes.h
include/asm-ia64/kprobes.h
+6
-1
include/asm-ia64/meminit.h
include/asm-ia64/meminit.h
+2
-1
include/asm-ia64/pal.h
include/asm-ia64/pal.h
+63
-9
No files found.
arch/ia64/Kconfig
View file @
71b264f8
...
...
@@ -622,6 +622,9 @@ config IRQ_PER_CPU
bool
default y
config IOMMU_HELPER
def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/Kconfig.debug"
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/hp/common/sba_iommu.c
View file @
71b264f8
...
...
@@ -35,6 +35,7 @@
#include <linux/nodemask.h>
#include <linux/bitops.h>
/* hweight64() */
#include <linux/crash_dump.h>
#include <linux/iommu-helper.h>
#include <asm/delay.h>
/* ia64_get_itc() */
#include <asm/io.h>
...
...
@@ -460,6 +461,13 @@ get_iovp_order (unsigned long size)
return
order
;
}
static
unsigned
long
ptr_to_pide
(
struct
ioc
*
ioc
,
unsigned
long
*
res_ptr
,
unsigned
int
bitshiftcnt
)
{
return
(((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
)
<<
3
)
+
bitshiftcnt
;
}
/**
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
...
...
@@ -471,15 +479,25 @@ get_iovp_order (unsigned long size)
* Cool perf optimization: search for log2(size) bits at a time.
*/
static
SBA_INLINE
unsigned
long
sba_search_bitmap
(
struct
ioc
*
ioc
,
unsigned
long
bits_wanted
,
int
use_hint
)
sba_search_bitmap
(
struct
ioc
*
ioc
,
struct
device
*
dev
,
unsigned
long
bits_wanted
,
int
use_hint
)
{
unsigned
long
*
res_ptr
;
unsigned
long
*
res_end
=
(
unsigned
long
*
)
&
(
ioc
->
res_map
[
ioc
->
res_size
]);
unsigned
long
flags
,
pide
=
~
0UL
;
unsigned
long
flags
,
pide
=
~
0UL
,
tpide
;
unsigned
long
boundary_size
;
unsigned
long
shift
;
int
ret
;
ASSERT
(((
unsigned
long
)
ioc
->
res_hint
&
(
sizeof
(
unsigned
long
)
-
1UL
))
==
0
);
ASSERT
(
res_ptr
<
res_end
);
boundary_size
=
(
unsigned
long
long
)
dma_get_seg_boundary
(
dev
)
+
1
;
boundary_size
=
ALIGN
(
boundary_size
,
1ULL
<<
iovp_shift
)
>>
iovp_shift
;
BUG_ON
(
ioc
->
ibase
&
~
iovp_mask
);
shift
=
ioc
->
ibase
>>
iovp_shift
;
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
/* Allow caller to force a search through the entire resource space */
...
...
@@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
if
(
likely
(
*
res_ptr
!=
~
0UL
))
{
bitshiftcnt
=
ffz
(
*
res_ptr
);
*
res_ptr
|=
(
1UL
<<
bitshiftcnt
);
pide
=
((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
);
pide
<<=
3
;
/* convert to bit address */
pide
+=
bitshiftcnt
;
pide
=
ptr_to_pide
(
ioc
,
res_ptr
,
bitshiftcnt
);
ioc
->
res_bitshift
=
bitshiftcnt
+
bits_wanted
;
goto
found_it
;
}
...
...
@@ -535,11 +551,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
DBG_RES
(
" %p %lx %lx
\n
"
,
res_ptr
,
mask
,
*
res_ptr
);
ASSERT
(
0
!=
mask
);
for
(;
mask
;
mask
<<=
o
,
bitshiftcnt
+=
o
)
{
if
(
0
==
((
*
res_ptr
)
&
mask
))
{
tpide
=
ptr_to_pide
(
ioc
,
res_ptr
,
bitshiftcnt
);
ret
=
iommu_is_span_boundary
(
tpide
,
bits_wanted
,
shift
,
boundary_size
);
if
((
0
==
((
*
res_ptr
)
&
mask
))
&&
!
ret
)
{
*
res_ptr
|=
mask
;
/* mark resources busy! */
pide
=
((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
);
pide
<<=
3
;
/* convert to bit address */
pide
+=
bitshiftcnt
;
pide
=
tpide
;
ioc
->
res_bitshift
=
bitshiftcnt
+
bits_wanted
;
goto
found_it
;
}
...
...
@@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
end
=
res_end
-
qwords
;
for
(;
res_ptr
<
end
;
res_ptr
++
)
{
tpide
=
ptr_to_pide
(
ioc
,
res_ptr
,
0
);
ret
=
iommu_is_span_boundary
(
tpide
,
bits_wanted
,
shift
,
boundary_size
);
if
(
ret
)
goto
next_ptr
;
for
(
i
=
0
;
i
<
qwords
;
i
++
)
{
if
(
res_ptr
[
i
]
!=
0
)
goto
next_ptr
;
...
...
@@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
res_ptr
[
i
]
=
~
0UL
;
res_ptr
[
i
]
|=
RESMAP_MASK
(
bits
);
pide
=
((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
);
pide
<<=
3
;
/* convert to bit address */
pide
=
tpide
;
res_ptr
+=
qwords
;
ioc
->
res_bitshift
=
bits
;
goto
found_it
;
...
...
@@ -605,7 +627,7 @@ found_it:
* resource bit map.
*/
static
int
sba_alloc_range
(
struct
ioc
*
ioc
,
size_t
size
)
sba_alloc_range
(
struct
ioc
*
ioc
,
struct
device
*
dev
,
size_t
size
)
{
unsigned
int
pages_needed
=
size
>>
iovp_shift
;
#ifdef PDIR_SEARCH_TIMING
...
...
@@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size)
/*
** "seek and ye shall find"...praying never hurts either...
*/
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
,
1
);
pide
=
sba_search_bitmap
(
ioc
,
dev
,
pages_needed
,
1
);
if
(
unlikely
(
pide
>=
(
ioc
->
res_size
<<
3
)))
{
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
,
0
);
pide
=
sba_search_bitmap
(
ioc
,
dev
,
pages_needed
,
0
);
if
(
unlikely
(
pide
>=
(
ioc
->
res_size
<<
3
)))
{
#if DELAYED_RESOURCE_CNT > 0
unsigned
long
flags
;
...
...
@@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
}
spin_unlock_irqrestore
(
&
ioc
->
saved_lock
,
flags
);
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
,
0
);
pide
=
sba_search_bitmap
(
ioc
,
dev
,
pages_needed
,
0
);
if
(
unlikely
(
pide
>=
(
ioc
->
res_size
<<
3
)))
panic
(
__FILE__
": I/O MMU @ %p is out of mapping resources
\n
"
,
ioc
->
ioc_hpa
);
...
...
@@ -936,7 +958,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
pide
=
sba_alloc_range
(
ioc
,
size
);
pide
=
sba_alloc_range
(
ioc
,
dev
,
size
);
iovp
=
(
dma_addr_t
)
pide
<<
iovp_shift
;
...
...
@@ -1373,7 +1395,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
dma_len
=
(
dma_len
+
dma_offset
+
~
iovp_mask
)
&
iovp_mask
;
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
dma_sg
->
dma_address
=
(
dma_addr_t
)
(
PIDE_FLAG
|
(
sba_alloc_range
(
ioc
,
dma_len
)
<<
iovp_shift
)
|
(
sba_alloc_range
(
ioc
,
dev
,
dma_len
)
<<
iovp_shift
)
|
dma_offset
);
n_mappings
++
;
}
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/asm-offsets.c
View file @
71b264f8
...
...
@@ -7,6 +7,7 @@
#define ASM_OFFSETS_C 1
#include <linux/sched.h>
#include <linux/pid.h>
#include <linux/clocksource.h>
#include <asm-ia64/processor.h>
...
...
@@ -34,6 +35,9 @@ void foo(void)
DEFINE
(
SIGFRAME_SIZE
,
sizeof
(
struct
sigframe
));
DEFINE
(
UNW_FRAME_INFO_SIZE
,
sizeof
(
struct
unw_frame_info
));
BUILD_BUG_ON
(
sizeof
(
struct
upid
)
!=
32
);
DEFINE
(
IA64_UPID_SHIFT
,
5
);
BLANK
();
DEFINE
(
TI_FLAGS
,
offsetof
(
struct
thread_info
,
flags
));
...
...
@@ -51,6 +55,9 @@ void foo(void)
DEFINE
(
IA64_TASK_BLOCKED_OFFSET
,
offsetof
(
struct
task_struct
,
blocked
));
DEFINE
(
IA64_TASK_CLEAR_CHILD_TID_OFFSET
,
offsetof
(
struct
task_struct
,
clear_child_tid
));
DEFINE
(
IA64_TASK_GROUP_LEADER_OFFSET
,
offsetof
(
struct
task_struct
,
group_leader
));
DEFINE
(
IA64_TASK_TGIDLINK_OFFSET
,
offsetof
(
struct
task_struct
,
pids
[
PIDTYPE_PID
].
pid
));
DEFINE
(
IA64_PID_LEVEL_OFFSET
,
offsetof
(
struct
pid
,
level
));
DEFINE
(
IA64_PID_UPID_OFFSET
,
offsetof
(
struct
pid
,
numbers
[
0
]));
DEFINE
(
IA64_TASK_PENDING_OFFSET
,
offsetof
(
struct
task_struct
,
pending
));
DEFINE
(
IA64_TASK_PID_OFFSET
,
offsetof
(
struct
task_struct
,
pid
));
DEFINE
(
IA64_TASK_REAL_PARENT_OFFSET
,
offsetof
(
struct
task_struct
,
real_parent
));
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/crash.c
View file @
71b264f8
...
...
@@ -24,6 +24,7 @@ int kdump_status[NR_CPUS];
static
atomic_t
kdump_cpu_frozen
;
atomic_t
kdump_in_progress
;
static
int
kdump_on_init
=
1
;
static
int
kdump_on_fatal_mca
=
1
;
static
inline
Elf64_Word
*
append_elf_note
(
Elf64_Word
*
buf
,
char
*
name
,
unsigned
type
,
void
*
data
,
...
...
@@ -118,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt)
static
void
machine_kdump_on_init
(
void
)
{
crash_save_vmcoreinfo
();
local_irq_disable
();
kexec_disable_iosapic
();
machine_kexec
(
ia64_kimage
);
...
...
@@ -148,7 +150,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
struct
ia64_mca_notify_die
*
nd
;
struct
die_args
*
args
=
data
;
if
(
!
kdump_on_init
)
if
(
!
kdump_on_init
&&
!
kdump_on_fatal_mca
)
return
NOTIFY_DONE
;
if
(
!
ia64_kimage
)
{
...
...
@@ -173,32 +175,38 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
return
NOTIFY_DONE
;
switch
(
val
)
{
case
DIE_INIT_MONARCH_PROCESS
:
case
DIE_INIT_MONARCH_PROCESS
:
if
(
kdump_on_init
)
{
atomic_set
(
&
kdump_in_progress
,
1
);
*
(
nd
->
monarch_cpu
)
=
-
1
;
break
;
case
DIE_INIT_MONARCH_LEAVE
:
}
break
;
case
DIE_INIT_MONARCH_LEAVE
:
if
(
kdump_on_init
)
machine_kdump_on_init
();
break
;
case
DIE_INIT_SLAVE_LEAVE
:
if
(
atomic_read
(
&
kdump_in_progress
))
unw_init_running
(
kdump_cpu_freeze
,
NULL
);
break
;
case
DIE_MCA_RENDZVOUS_LEAVE
:
if
(
atomic_read
(
&
kdump_in_progress
))
unw_init_running
(
kdump_cpu_freeze
,
NULL
);
break
;
case
DIE_MCA_MONARCH_LEAVE
:
/* die_register->signr indicate if MCA is recoverable */
if
(
!
args
->
signr
)
machine_kdump_on_init
();
break
;
break
;
case
DIE_INIT_SLAVE_LEAVE
:
if
(
atomic_read
(
&
kdump_in_progress
))
unw_init_running
(
kdump_cpu_freeze
,
NULL
);
break
;
case
DIE_MCA_RENDZVOUS_LEAVE
:
if
(
atomic_read
(
&
kdump_in_progress
))
unw_init_running
(
kdump_cpu_freeze
,
NULL
);
break
;
case
DIE_MCA_MONARCH_LEAVE
:
/* die_register->signr indicate if MCA is recoverable */
if
(
kdump_on_fatal_mca
&&
!
args
->
signr
)
{
atomic_set
(
&
kdump_in_progress
,
1
);
*
(
nd
->
monarch_cpu
)
=
-
1
;
machine_kdump_on_init
();
}
break
;
}
return
NOTIFY_DONE
;
}
#ifdef CONFIG_SYSCTL
static
ctl_table
kdump_
on_init
_table
[]
=
{
static
ctl_table
kdump_
ctl
_table
[]
=
{
{
.
ctl_name
=
CTL_UNNUMBERED
,
.
procname
=
"kdump_on_init"
,
...
...
@@ -207,6 +215,14 @@ static ctl_table kdump_on_init_table[] = {
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
,
},
{
.
ctl_name
=
CTL_UNNUMBERED
,
.
procname
=
"kdump_on_fatal_mca"
,
.
data
=
&
kdump_on_fatal_mca
,
.
maxlen
=
sizeof
(
int
),
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
,
},
{
.
ctl_name
=
0
}
};
...
...
@@ -215,7 +231,7 @@ static ctl_table sys_table[] = {
.
ctl_name
=
CTL_KERN
,
.
procname
=
"kernel"
,
.
mode
=
0555
,
.
child
=
kdump_
on_init
_table
,
.
child
=
kdump_
ctl
_table
,
},
{
.
ctl_name
=
0
}
};
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/fsys.S
View file @
71b264f8
...
...
@@ -61,13 +61,29 @@ ENTRY(fsys_getpid)
.
prologue
.
altrp
b6
.
body
add
r17
=
IA64_TASK_GROUP_LEADER_OFFSET
,
r16
;;
ld8
r17
=[
r17
]
//
r17
=
current
->
group_leader
add
r9
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r16
;;
ld4
r9
=[
r9
]
add
r
8
=
IA64_TASK_TGID_OFFSET
,
r1
6
add
r
17
=
IA64_TASK_TGID
LINK
_OFFSET
,
r1
7
;;
and
r9
=
TIF_ALLWORK_MASK
,
r9
ld4
r8
=[
r8
]
//
r8
=
current
->
tgid
ld8
r17
=[
r17
]
//
r17
=
current
->
group_leader
->
pids
[
PIDTYPE_PID
]
.
pid
;;
add
r8
=
IA64_PID_LEVEL_OFFSET
,
r17
;;
ld4
r8
=[
r8
]
//
r8
=
pid
->
level
add
r17
=
IA64_PID_UPID_OFFSET
,
r17
//
r17
=
&
pid
->
numbers
[
0
]
;;
shl
r8
=
r8
,
IA64_UPID_SHIFT
;;
add
r17
=
r17
,
r8
//
r17
=
&
pid
->
numbers
[
pid
->
level
]
;;
ld4
r8
=[
r17
]
//
r8
=
pid
->
numbers
[
pid
->
level
]
.
nr
;;
mov
r17
=
0
;;
cmp.ne
p8
,
p0
=
0
,
r9
(
p8
)
br.spnt.many
fsys_fallback_syscall
...
...
@@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address)
.
altrp
b6
.
body
add
r9
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r16
add
r17
=
IA64_TASK_TGIDLINK_OFFSET
,
r16
;;
ld4
r9
=[
r9
]
tnat.z
p6
,
p7
=
r32
//
check
argument
register
for
being
NaT
ld8
r17
=[
r17
]
//
r17
=
current
->
pids
[
PIDTYPE_PID
]
.
pid
;;
and
r9
=
TIF_ALLWORK_MASK
,
r9
add
r8
=
IA64_
TASK_PID
_OFFSET
,
r1
6
add
r8
=
IA64_
PID_LEVEL
_OFFSET
,
r1
7
add
r18
=
IA64_TASK_CLEAR_CHILD_TID_OFFSET
,
r16
;;
ld4
r8
=[
r8
]
ld4
r8
=[
r8
]
//
r8
=
pid
->
level
add
r17
=
IA64_PID_UPID_OFFSET
,
r17
//
r17
=
&
pid
->
numbers
[
0
]
;;
shl
r8
=
r8
,
IA64_UPID_SHIFT
;;
add
r17
=
r17
,
r8
//
r17
=
&
pid
->
numbers
[
pid
->
level
]
;;
ld4
r8
=[
r17
]
//
r8
=
pid
->
numbers
[
pid
->
level
]
.
nr
;;
cmp.ne
p8
,
p0
=
0
,
r9
mov
r17
=-
1
;;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/irq_ia64.c
View file @
71b264f8
...
...
@@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
static
unsigned
char
count
;
static
long
last_time
;
if
(
jiffies
-
last_time
>
5
*
HZ
)
if
(
time_after
(
jiffies
,
last_time
+
5
*
HZ
)
)
count
=
0
;
if
(
++
count
<
5
)
{
last_time
=
jiffies
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/kprobes.c
View file @
71b264f8
...
...
@@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = {
{
u
,
u
,
u
},
/* 1F */
};
/* Insert a long branch code */
static
void
__kprobes
set_brl_inst
(
void
*
from
,
void
*
to
)
{
s64
rel
=
((
s64
)
to
-
(
s64
)
from
)
>>
4
;
bundle_t
*
brl
;
brl
=
(
bundle_t
*
)
((
u64
)
from
&
~
0xf
);
brl
->
quad0
.
template
=
0x05
;
/* [MLX](stop) */
brl
->
quad0
.
slot0
=
NOP_M_INST
;
/* nop.m 0x0 */
brl
->
quad0
.
slot1_p0
=
((
rel
>>
20
)
&
0x7fffffffff
)
<<
2
;
brl
->
quad1
.
slot1_p1
=
(((
rel
>>
20
)
&
0x7fffffffff
)
<<
2
)
>>
(
64
-
46
);
/* brl.cond.sptk.many.clr rel<<4 (qp=0) */
brl
->
quad1
.
slot2
=
BRL_INST
(
rel
>>
59
,
rel
&
0xfffff
);
}
/*
* In this function we check to see if the instruction
* is IP relative instruction and update the kprobe
...
...
@@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
regs
->
b0
=
((
struct
fnptr
*
)
kretprobe_trampoline
)
->
ip
;
}
/* Check the instruction in the slot is break */
static
int
__kprobes
__is_ia64_break_inst
(
bundle_t
*
bundle
,
uint
slot
)
{
unsigned
int
major_opcode
;
unsigned
int
template
=
bundle
->
quad0
.
template
;
unsigned
long
kprobe_inst
;
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
if
(
slot
==
1
&&
bundle_encoding
[
template
][
1
]
==
L
)
slot
++
;
/* Get Kprobe probe instruction at given slot*/
get_kprobe_inst
(
bundle
,
slot
,
&
kprobe_inst
,
&
major_opcode
);
/* For break instruction,
* Bits 37:40 Major opcode to be zero
* Bits 27:32 X6 to be zero
* Bits 32:35 X3 to be zero
*/
if
(
major_opcode
||
((
kprobe_inst
>>
27
)
&
0x1FF
))
{
/* Not a break instruction */
return
0
;
}
/* Is a break instruction */
return
1
;
}
/*
* In this function, we check whether the target bundle modifies IP or
* it triggers an exception. If so, it cannot be boostable.
*/
static
int
__kprobes
can_boost
(
bundle_t
*
bundle
,
uint
slot
,
unsigned
long
bundle_addr
)
{
unsigned
int
template
=
bundle
->
quad0
.
template
;
do
{
if
(
search_exception_tables
(
bundle_addr
+
slot
)
||
__is_ia64_break_inst
(
bundle
,
slot
))
return
0
;
/* exception may occur in this bundle*/
}
while
((
++
slot
)
<
3
);
template
&=
0x1e
;
if
(
template
>=
0x10
/* including B unit */
||
template
==
0x04
/* including X unit */
||
template
==
0x06
)
/* undefined */
return
0
;
return
1
;
}
/* Prepare long jump bundle and disables other boosters if need */
static
void
__kprobes
prepare_booster
(
struct
kprobe
*
p
)
{
unsigned
long
addr
=
(
unsigned
long
)
p
->
addr
&
~
0xFULL
;
unsigned
int
slot
=
(
unsigned
long
)
p
->
addr
&
0xf
;
struct
kprobe
*
other_kp
;
if
(
can_boost
(
&
p
->
ainsn
.
insn
[
0
].
bundle
,
slot
,
addr
))
{
set_brl_inst
(
&
p
->
ainsn
.
insn
[
1
].
bundle
,
(
bundle_t
*
)
addr
+
1
);
p
->
ainsn
.
inst_flag
|=
INST_FLAG_BOOSTABLE
;
}
/* disables boosters in previous slots */
for
(;
addr
<
(
unsigned
long
)
p
->
addr
;
addr
++
)
{
other_kp
=
get_kprobe
((
void
*
)
addr
);
if
(
other_kp
)
other_kp
->
ainsn
.
inst_flag
&=
~
INST_FLAG_BOOSTABLE
;
}
}
int
__kprobes
arch_prepare_kprobe
(
struct
kprobe
*
p
)
{
unsigned
long
addr
=
(
unsigned
long
)
p
->
addr
;
...
...
@@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
prepare_break_inst
(
template
,
slot
,
major_opcode
,
kprobe_inst
,
p
,
qp
);
prepare_booster
(
p
);
return
0
;
}
...
...
@@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
src
=
&
p
->
opcode
.
bundle
;
flush_icache_range
((
unsigned
long
)
p
->
ainsn
.
insn
,
(
unsigned
long
)
p
->
ainsn
.
insn
+
sizeof
(
kprobe_opcode_t
));
(
unsigned
long
)
p
->
ainsn
.
insn
+
sizeof
(
kprobe_opcode_t
)
*
MAX_INSN_SIZE
);
switch
(
p
->
ainsn
.
slot
)
{
case
0
:
dest
->
quad0
.
slot0
=
src
->
quad0
.
slot0
;
...
...
@@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void
__kprobes
arch_remove_kprobe
(
struct
kprobe
*
p
)
{
mutex_lock
(
&
kprobe_mutex
);
free_insn_slot
(
p
->
ainsn
.
insn
,
0
);
free_insn_slot
(
p
->
ainsn
.
insn
,
p
->
ainsn
.
inst_flag
&
INST_FLAG_BOOSTABLE
);
mutex_unlock
(
&
kprobe_mutex
);
}
/*
* We are resuming execution after a single step fault, so the pt_regs
* structure reflects the register state after we executed the instruction
* located in the kprobe (p->ainsn.insn
.
bundle). We still need to adjust
* located in the kprobe (p->ainsn.insn
->
bundle). We still need to adjust
* the ip to point back to the original stack address. To set the IP address
* to original stack address, handle the case where we need to fixup the
* relative IP address and/or fixup branch register.
...
...
@@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
if
(
slot
==
1
&&
bundle_encoding
[
template
][
1
]
==
L
)
slot
=
2
;
if
(
p
->
ainsn
.
inst_flag
)
{
if
(
p
->
ainsn
.
inst_flag
&
~
INST_FLAG_BOOSTABLE
)
{
if
(
p
->
ainsn
.
inst_flag
&
INST_FLAG_FIX_RELATIVE_IP_ADDR
)
{
/* Fix relative IP address */
...
...
@@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
static
int
__kprobes
is_ia64_break_inst
(
struct
pt_regs
*
regs
)
{
unsigned
int
slot
=
ia64_psr
(
regs
)
->
ri
;
unsigned
int
template
,
major_opcode
;
unsigned
long
kprobe_inst
;
unsigned
long
*
kprobe_addr
=
(
unsigned
long
*
)
regs
->
cr_iip
;
bundle_t
bundle
;
memcpy
(
&
bundle
,
kprobe_addr
,
sizeof
(
bundle_t
));
template
=
bundle
.
quad0
.
template
;
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
if
(
slot
==
1
&&
bundle_encoding
[
template
][
1
]
==
L
)
slot
++
;
/* Get Kprobe probe instruction at given slot*/
get_kprobe_inst
(
&
bundle
,
slot
,
&
kprobe_inst
,
&
major_opcode
);
/* For break instruction,
* Bits 37:40 Major opcode to be zero
* Bits 27:32 X6 to be zero
* Bits 32:35 X3 to be zero
*/
if
(
major_opcode
||
((
kprobe_inst
>>
27
)
&
0x1FF
)
)
{
/* Not a break instruction */
return
0
;
}
/* Is a break instruction */
return
1
;
return
__is_ia64_break_inst
(
&
bundle
,
slot
);
}
static
int
__kprobes
pre_kprobes_handler
(
struct
die_args
*
args
)
...
...
@@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
return
1
;
ss_probe:
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
if
(
p
->
ainsn
.
inst_flag
==
INST_FLAG_BOOSTABLE
&&
!
p
->
post_handler
)
{
/* Boost up -- we can execute copied instructions directly */
ia64_psr
(
regs
)
->
ri
=
p
->
ainsn
.
slot
;
regs
->
cr_iip
=
(
unsigned
long
)
&
p
->
ainsn
.
insn
->
bundle
&
~
0xFULL
;
/* turn single stepping off */
ia64_psr
(
regs
)
->
ss
=
0
;
reset_current_kprobe
();
preempt_enable_no_resched
();
return
1
;
}
#endif
prepare_ss
(
p
,
regs
);
kcb
->
kprobe_status
=
KPROBE_HIT_SS
;
return
1
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/mca.c
View file @
71b264f8
...
...
@@ -69,6 +69,7 @@
* 2007-04-27 Russ Anderson <rja@sgi.com>
* Support multiple cpus going through OS_MCA in the same event.
*/
#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
...
...
@@ -295,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void)
if
(
mlogbuf_finished
)
return
;
if
(
mlogbuf_timestamp
&&
(
mlogbuf_timestamp
+
30
*
HZ
>
jiffies
))
{
if
(
mlogbuf_timestamp
&&
time_before
(
jiffies
,
mlogbuf_timestamp
+
30
*
HZ
))
{
printk
(
KERN_ERR
"INIT: mlogbuf_dump is interrupted by INIT "
" and the system seems to be messed up.
\n
"
);
ia64_mlogbuf_finish
(
0
);
...
...
@@ -1311,20 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
}
else
{
/* Dump buffered message to console */
ia64_mlogbuf_finish
(
1
);
#ifdef CONFIG_KEXEC
atomic_set
(
&
kdump_in_progress
,
1
);
monarch_cpu
=
-
1
;
#endif
}
if
(
__get_cpu_var
(
ia64_mca_tr_reload
))
{
mca_insert_tr
(
0x1
);
/*Reload dynamic itrs*/
mca_insert_tr
(
0x2
);
/*Reload dynamic itrs*/
}
if
(
notify_die
(
DIE_MCA_MONARCH_LEAVE
,
"MCA"
,
regs
,
(
long
)
&
nd
,
0
,
recover
)
==
NOTIFY_STOP
)
ia64_mca_spin
(
__func__
);
if
(
atomic_dec_return
(
&
mca_count
)
>
0
)
{
int
i
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/perfmon.c
View file @
71b264f8
...
...
@@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx)
do_each_thread
(
g
,
t
)
{
if
(
t
->
thread
.
pfm_context
==
ctx
)
{
ret
=
0
;
break
;
goto
out
;
}
}
while_each_thread
(
g
,
t
);
out:
read_unlock
(
&
tasklist_lock
);
DPRINT
((
"pfm_check_task_exist: ret=%d ctx=%p
\n
"
,
ret
,
ctx
));
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/setup.c
View file @
71b264f8
...
...
@@ -177,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
return
0
;
}
/*
* Similar to "filter_rsvd_memory()", but the reserved memory ranges
* are not filtered out.
*/
int
__init
filter_memory
(
unsigned
long
start
,
unsigned
long
end
,
void
*
arg
)
{
void
(
*
func
)(
unsigned
long
,
unsigned
long
,
int
);
#if IGNORE_PFN0
if
(
start
==
PAGE_OFFSET
)
{
printk
(
KERN_WARNING
"warning: skipping physical page 0
\n
"
);
start
+=
PAGE_SIZE
;
if
(
start
>=
end
)
return
0
;
}
#endif
func
=
arg
;
if
(
start
<
end
)
call_pernode_memory
(
__pa
(
start
),
end
-
start
,
func
);
return
0
;
}
static
void
__init
sort_regions
(
struct
rsvd_region
*
rsvd_region
,
int
max
)
{
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/smpboot.c
View file @
71b264f8
...
...
@@ -400,9 +400,9 @@ smp_callin (void)
/* Setup the per cpu irq handling data structures */
__setup_vector_irq
(
cpuid
);
cpu_set
(
cpuid
,
cpu_online_map
);
unlock_ipi_calllock
();
per_cpu
(
cpu_state
,
cpuid
)
=
CPU_ONLINE
;
spin_unlock
(
&
vector_lock
);
unlock_ipi_calllock
();
smp_setup_percpu_timer
();
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/unaligned.c
View file @
71b264f8
...
...
@@ -13,6 +13,7 @@
* 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
* 2001/01/17 Add support emulation of unaligned kernel accesses.
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tty.h>
...
...
@@ -1290,7 +1291,7 @@ within_logging_rate_limit (void)
{
static
unsigned
long
count
,
last_time
;
if
(
jiffies
-
last_time
>
5
*
HZ
)
if
(
time_after
(
jiffies
,
last_time
+
5
*
HZ
)
)
count
=
0
;
if
(
count
<
5
)
{
last_time
=
jiffies
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/mm/contig.c
View file @
71b264f8
...
...
@@ -45,8 +45,6 @@ void show_mem(void)
printk
(
KERN_INFO
"Mem-info:
\n
"
);
show_free_areas
();
printk
(
KERN_INFO
"Free swap: %6ldkB
\n
"
,
nr_swap_pages
<<
(
PAGE_SHIFT
-
10
));
printk
(
KERN_INFO
"Node memory in pages:
\n
"
);
for_each_online_pgdat
(
pgdat
)
{
unsigned
long
present
;
...
...
@@ -255,7 +253,7 @@ paging_init (void)
max_zone_pfns
[
ZONE_NORMAL
]
=
max_low_pfn
;
#ifdef CONFIG_VIRTUAL_MEM_MAP
efi_memmap_walk
(
register_active_ranges
,
NULL
);
efi_memmap_walk
(
filter_memory
,
register_active_ranges
);
efi_memmap_walk
(
find_largest_hole
,
(
u64
*
)
&
max_gap
);
if
(
max_gap
<
LARGE_GAP
)
{
vmem_map
=
(
struct
page
*
)
0
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/mm/discontig.c
View file @
71b264f8
...
...
@@ -445,7 +445,7 @@ void __init find_memory(void)
mem_data
[
node
].
min_pfn
=
~
0UL
;
}
efi_memmap_walk
(
register_active_ranges
,
NULL
);
efi_memmap_walk
(
filter_memory
,
register_active_ranges
);
/*
* Initialize the boot memory maps in reverse order since that's
...
...
@@ -519,8 +519,6 @@ void show_mem(void)
printk
(
KERN_INFO
"Mem-info:
\n
"
);
show_free_areas
();
printk
(
KERN_INFO
"Free swap: %6ldkB
\n
"
,
nr_swap_pages
<<
(
PAGE_SHIFT
-
10
));
printk
(
KERN_INFO
"Node memory in pages:
\n
"
);
for_each_online_pgdat
(
pgdat
)
{
unsigned
long
present
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/mm/init.c
View file @
71b264f8
...
...
@@ -58,7 +58,6 @@ __ia64_sync_icache_dcache (pte_t pte)
{
unsigned
long
addr
;
struct
page
*
page
;
unsigned
long
order
;
page
=
pte_page
(
pte
);
addr
=
(
unsigned
long
)
page_address
(
page
);
...
...
@@ -66,12 +65,7 @@ __ia64_sync_icache_dcache (pte_t pte)
if
(
test_bit
(
PG_arch_1
,
&
page
->
flags
))
return
;
/* i-cache is already coherent with d-cache */
if
(
PageCompound
(
page
))
{
order
=
compound_order
(
page
);
flush_icache_range
(
addr
,
addr
+
(
1UL
<<
order
<<
PAGE_SHIFT
));
}
else
flush_icache_range
(
addr
,
addr
+
PAGE_SIZE
);
flush_icache_range
(
addr
,
addr
+
(
PAGE_SIZE
<<
compound_order
(
page
)));
set_bit
(
PG_arch_1
,
&
page
->
flags
);
/* mark page as clean */
}
...
...
@@ -553,12 +547,10 @@ find_largest_hole (u64 start, u64 end, void *arg)
#endif
/* CONFIG_VIRTUAL_MEM_MAP */
int
__init
register_active_ranges
(
u64
start
,
u64
en
d
,
void
*
arg
)
register_active_ranges
(
u64
start
,
u64
l
en
,
int
nid
)
{
int
nid
=
paddr_to_nid
(
__pa
(
start
))
;
u64
end
=
start
+
len
;
if
(
nid
<
0
)
nid
=
0
;
#ifdef CONFIG_KEXEC
if
(
start
>
crashk_res
.
start
&&
start
<
crashk_res
.
end
)
start
=
crashk_res
.
end
;
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/sn/kernel/xpc_main.c
View file @
71b264f8
...
...
@@ -199,7 +199,7 @@ xpc_timeout_partition_disengage_request(unsigned long data)
struct
xpc_partition
*
part
=
(
struct
xpc_partition
*
)
data
;
DBUG_ON
(
jiffies
<
part
->
disengage_request_timeout
);
DBUG_ON
(
time_before
(
jiffies
,
part
->
disengage_request_timeout
)
)
;
(
void
)
xpc_partition_disengaged
(
part
);
...
...
@@ -230,7 +230,7 @@ xpc_hb_beater(unsigned long dummy)
{
xpc_vars
->
heartbeat
++
;
if
(
jiffies
>=
xpc_hb_check_timeout
)
{
if
(
time_after_eq
(
jiffies
,
xpc_hb_check_timeout
)
)
{
wake_up_interruptible
(
&
xpc_act_IRQ_wq
);
}
...
...
@@ -270,7 +270,7 @@ xpc_hb_checker(void *ignore)
/* checking of remote heartbeats is skewed by IRQ handling */
if
(
jiffies
>=
xpc_hb_check_timeout
)
{
if
(
time_after_eq
(
jiffies
,
xpc_hb_check_timeout
)
)
{
dev_dbg
(
xpc_part
,
"checking remote heartbeats
\n
"
);
xpc_check_remote_hb
();
...
...
@@ -305,7 +305,7 @@ xpc_hb_checker(void *ignore)
/* wait for IRQ or timeout */
(
void
)
wait_event_interruptible
(
xpc_act_IRQ_wq
,
(
last_IRQ_count
<
atomic_read
(
&
xpc_act_IRQ_rcvd
)
||
jiffies
>=
xpc_hb_check_timeout
||
time_after_eq
(
jiffies
,
xpc_hb_check_timeout
)
||
(
volatile
int
)
xpc_exiting
));
}
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/sn/kernel/xpc_partition.c
View file @
71b264f8
...
...
@@ -877,7 +877,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
disengaged
=
(
xpc_partition_engaged
(
1UL
<<
partid
)
==
0
);
if
(
part
->
disengage_request_timeout
)
{
if
(
!
disengaged
)
{
if
(
jiffies
<
part
->
disengage_request_timeout
)
{
if
(
time_before
(
jiffies
,
part
->
disengage_request_timeout
)
)
{
/* timelimit hasn't been reached yet */
return
0
;
}
...
...
This diff is collapsed.
Click to expand it.
include/asm-ia64/kprobes.h
View file @
71b264f8
...
...
@@ -30,8 +30,12 @@
#include <asm/break.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE
1
#define MAX_INSN_SIZE
2
/* last half is for kprobe-booster */
#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
#define NOP_M_INST (long)(1<<27)
#define BRL_INST(i1, i2) ((long)((0xcL << 37) |
/* brl */
\
(0x1L << 12) |
/* many */
\
(((i1) & 1) << 36) | ((i2) << 13)))
/* imm */
typedef
union
cmp_inst
{
struct
{
...
...
@@ -112,6 +116,7 @@ struct arch_specific_insn {
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
#define INST_FLAG_FIX_BRANCH_REG 2
#define INST_FLAG_BREAK_INST 4
#define INST_FLAG_BOOSTABLE 8
unsigned
long
inst_flag
;
unsigned
short
target_br_reg
;
unsigned
short
slot
;
...
...
This diff is collapsed.
Click to expand it.
include/asm-ia64/meminit.h
View file @
71b264f8
...
...
@@ -35,6 +35,7 @@ extern void find_memory (void);
extern
void
reserve_memory
(
void
);
extern
void
find_initrd
(
void
);
extern
int
filter_rsvd_memory
(
unsigned
long
start
,
unsigned
long
end
,
void
*
arg
);
extern
int
filter_memory
(
unsigned
long
start
,
unsigned
long
end
,
void
*
arg
);
extern
unsigned
long
efi_memmap_init
(
unsigned
long
*
s
,
unsigned
long
*
e
);
extern
int
find_max_min_low_pfn
(
unsigned
long
,
unsigned
long
,
void
*
);
...
...
@@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
#define IGNORE_PFN0 1
/* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
extern
int
register_active_ranges
(
u64
start
,
u64
en
d
,
void
*
arg
);
extern
int
register_active_ranges
(
u64
start
,
u64
l
en
,
int
nid
);
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000
/* Use virtual mem map if hole is > than this */
...
...
This diff is collapsed.
Click to expand it.
include/asm-ia64/pal.h
View file @
71b264f8
...
...
@@ -13,6 +13,7 @@
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
* Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
*
* 99/10/01 davidm Make sure we pass zero for reserved parameters.
* 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
...
...
@@ -73,6 +74,8 @@
#define PAL_CACHE_SHARED_INFO 43
/* returns information on caches shared by logical processor */
#define PAL_GET_HW_POLICY 48
/* Get current hardware resource sharing policy */
#define PAL_SET_HW_POLICY 49
/* Set current hardware resource sharing policy */
#define PAL_VP_INFO 50
/* Information about virtual processor features */
#define PAL_MC_HW_TRACKING 51
/* Hardware tracking status */
#define PAL_COPY_PAL 256
/* relocate PAL procedures and PAL PMI */
#define PAL_HALT_INFO 257
/* return the low power capabilities of processor */
...
...
@@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s {
wiv
:
1
,
/* Way field valid */
reserved2
:
1
,
dp
:
1
,
/* Data poisoned on MBE */
reserved3
:
8
,
reserved3
:
6
,
hlth
:
2
,
/* Health indicator */
index
:
20
,
/* Cache line index */
reserved4
:
2
,
...
...
@@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s {
dtc
:
1
,
/* Fail in data TC */
itc
:
1
,
/* Fail in inst. TC */
op
:
4
,
/* Cache operation */
reserved3
:
30
,
reserved3
:
6
,
hlth
:
2
,
/* Health indicator */
reserved4
:
22
,
is
:
1
,
/* instruction set (1 == ia32) */
iv
:
1
,
/* instruction set field valid */
...
...
@@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s {
way
:
6
,
/* Way of structure */
wv
:
1
,
/* way valid */
xv
:
1
,
/* index valid */
reserved1
:
8
,
reserved1
:
6
,
hlth
:
2
,
/* Health indicator */
index
:
8
,
/* Index or set of the uarch
* structure that failed.
*/
...
...
@@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void)
/* Return the machine check dynamic processor state */
static
inline
s64
ia64_pal_mc_dynamic_state
(
u64
offset
,
u64
*
size
,
u64
*
pds
)
ia64_pal_mc_dynamic_state
(
u64
info_type
,
u64
dy_buffer
,
u64
*
size
)
{
struct
ia64_pal_retval
iprv
;
PAL_CALL
(
iprv
,
PAL_MC_DYNAMIC_STATE
,
offset
,
0
,
0
);
PAL_CALL
(
iprv
,
PAL_MC_DYNAMIC_STATE
,
info_type
,
dy_buffer
,
0
);
if
(
size
)
*
size
=
iprv
.
v0
;
if
(
pds
)
*
pds
=
iprv
.
v1
;
return
iprv
.
status
;
}
...
...
@@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous)
return
iprv
.
status
;
}
typedef
union
pal_hw_tracking_u
{
u64
pht_data
;
struct
{
u64
itc
:
4
,
/* Instruction cache tracking */
dct
:
4
,
/* Date cache tracking */
itt
:
4
,
/* Instruction TLB tracking */
ddt
:
4
,
/* Data TLB tracking */
reserved:
48
;
}
pal_hw_tracking_s
;
}
pal_hw_tracking_u_t
;
/*
* Hardware tracking status.
*/
static
inline
s64
ia64_pal_mc_hw_tracking
(
u64
*
status
)
{
struct
ia64_pal_retval
iprv
;
PAL_CALL
(
iprv
,
PAL_MC_HW_TRACKING
,
0
,
0
,
0
);
if
(
status
)
*
status
=
iprv
.
v0
;
return
iprv
.
status
;
}
/* Register a platform dependent location with PAL to which it can save
* minimal processor state in the event of a machine check or initialization
* event.
*/
static
inline
s64
ia64_pal_mc_register_mem
(
u64
physical_addr
)
ia64_pal_mc_register_mem
(
u64
physical_addr
,
u64
size
,
u64
*
req_size
)
{
struct
ia64_pal_retval
iprv
;
PAL_CALL
(
iprv
,
PAL_MC_REGISTER_MEM
,
physical_addr
,
0
,
0
);
PAL_CALL
(
iprv
,
PAL_MC_REGISTER_MEM
,
physical_addr
,
size
,
0
);
if
(
req_size
)
*
req_size
=
iprv
.
v0
;
return
iprv
.
status
;
}
...
...
@@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
return
iprv
.
status
;
}
typedef
union
pal_vp_info_u
{
u64
pvi_val
;
struct
{
u64
index
:
48
,
/* virtual feature set info */
vmm_id:
16
;
/* feature set id */
}
pal_vp_info_s
;
}
pal_vp_info_u_t
;
/*
* Returns infomation about virtual processor features
*/
static
inline
s64
ia64_pal_vp_info
(
u64
feature_set
,
u64
vp_buffer
,
u64
*
vp_info
,
u64
*
vmm_id
)
{
struct
ia64_pal_retval
iprv
;
PAL_CALL
(
iprv
,
PAL_VP_INFO
,
feature_set
,
vp_buffer
,
0
);
if
(
vp_info
)
*
vp_info
=
iprv
.
v0
;
if
(
vmm_id
)
*
vmm_id
=
iprv
.
v1
;
return
iprv
.
status
;
}
typedef
union
pal_itr_valid_u
{
u64
piv_val
;
struct
{
...
...
This diff is collapsed.
Click to expand it.
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment