fix gcc dependent code

git-svn-id: https://yap.svn.sf.net/svnroot/yap/trunk@367 b08c6af1-5177-4d33-ba66-4b1c6b8b522a
This commit is contained in:
vsc 2002-02-18 15:26:41 +00:00
parent 2b8c0a2961
commit f3756717fb
2 changed files with 83 additions and 79 deletions

View File

@ -22,7 +22,7 @@ static char SccsId[] = "%W% %G%";
#include "yapio.h"
#define EARLY_RESET 1
#if !defined(TABLING) && HAVE_GCC
#if !defined(TABLING)
#define EASY_SHUNTING 1
#endif
#define HYBRID_SCHEME 1
@ -291,7 +291,8 @@ GC_ALLOC_NEW_MASPACE(void)
if ((char *)gc_ma_h_top > TrailTop-1024)
growtrail(64 * 1024L);
gc_ma_h_top++;
cont_top0 = cont_top = (cont *)gc_ma_h_top;
cont_top = (cont *)gc_ma_h_top;
sTR = (tr_fr_ptr)cont_top;
return(new);
}
@ -342,7 +343,8 @@ GC_NEW_MAHASH(gc_ma_h_inner_struct *top) {
time = ++timestamp;
}
gc_ma_h_top = top;
cont_top0 = cont_top = (cont *)gc_ma_h_top;
cont_top = (cont *)gc_ma_h_top;
sTR = (tr_fr_ptr)cont_top;
live_list = NULL;
}
@ -1160,7 +1162,8 @@ mark_trail(tr_fr_ptr trail_ptr, tr_fr_ptr trail_base, CELL *gc_H, choiceptr gc_B
else
TrailTerm(endsTR) = (CELL)nsTR;
endsTR = nsTR;
cont_top0 = cont_top = (cont *)(nsTR+3);
cont_top = (cont *)(nsTR+3);
sTR = (tr_fr_ptr)cont_top;
gc_ma_h_top = (gc_ma_h_inner_struct *)(nsTR+3);
RESET_VARIABLE(cptr);
MARK(cptr);
@ -1239,7 +1242,7 @@ mark_trail(tr_fr_ptr trail_ptr, tr_fr_ptr trail_base, CELL *gc_H, choiceptr gc_B
live_list = live_list->ma_list;
}
#endif
cont_top0 = old_cont_top0;
sTR = (tr_fr_ptr)old_cont_top0;
#ifdef EASY_SHUNTING
while (begsTR != NULL) {
tr_fr_ptr newsTR = (tr_fr_ptr)TrailTerm(begsTR);
@ -2536,7 +2539,8 @@ marking_phase(tr_fr_ptr old_TR, CELL *current_env, yamop *curp, CELL *max)
#ifdef EASY_SHUNTING
sTR0 = (tr_fr_ptr)db_vec;
#endif
cont_top0 = cont_top = (cont *)db_vec;
cont_top = (cont *)db_vec;
sTR = (tr_fr_ptr)db_vec;
/* These two must be marked first so that our trail optimisation won't lose
values */
mark_regs(old_TR); /* active registers & trail */

View File

@ -116,89 +116,89 @@ void rw_lock_voodoo(void) {
/* code taken from the Linux kernel, it handles shifting between locks */
/* Read/writer locks, as usual this is overly clever to make it as fast as possible. */
/* caches... */
__asm__ __volatile__("
___rw_read_enter_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_read_enter
ldstub [%g1 + 3], %g2
b ___rw_read_enter_spin_on_wlock
ldub [%g1 + 3], %g2
___rw_read_exit_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_read_exit
ldstub [%g1 + 3], %g2
b ___rw_read_exit_spin_on_wlock
ldub [%g1 + 3], %g2
___rw_write_enter_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_write_enter
ldstub [%g1 + 3], %g2
b ___rw_write_enter_spin_on_wlock
ld [%g1], %g2
__asm__ __volatile__(
"___rw_read_enter_spin_on_wlock:"
" orcc %g2, 0x0, %g0"
" be,a ___rw_read_enter"
" ldstub [%g1 + 3], %g2"
" b ___rw_read_enter_spin_on_wlock"
" ldub [%g1 + 3], %g2"
"___rw_read_exit_spin_on_wlock:"
" orcc %g2, 0x0, %g0"
" be,a ___rw_read_exit"
" ldstub [%g1 + 3], %g2"
" b ___rw_read_exit_spin_on_wlock"
" ldub [%g1 + 3], %g2"
"___rw_write_enter_spin_on_wlock:"
" orcc %g2, 0x0, %g0"
" be,a ___rw_write_enter"
" ldstub [%g1 + 3], %g2"
" b ___rw_write_enter_spin_on_wlock"
" ld [%g1], %g2"
""
" .globl ___rw_read_enter"
"___rw_read_enter:"
" orcc %g2, 0x0, %g0"
" bne,a ___rw_read_enter_spin_on_wlock"
" ldub [%g1 + 3], %g2"
" ld [%g1], %g2"
" add %g2, 1, %g2"
" st %g2, [%g1]"
" retl"
" mov %g4, %o7"
.globl ___rw_read_enter
___rw_read_enter:
orcc %g2, 0x0, %g0
bne,a ___rw_read_enter_spin_on_wlock
ldub [%g1 + 3], %g2
ld [%g1], %g2
add %g2, 1, %g2
st %g2, [%g1]
retl
mov %g4, %o7
" .globl ___rw_read_exit"
"___rw_read_exit:"
" orcc %g2, 0x0, %g0"
" bne,a ___rw_read_exit_spin_on_wlock"
" ldub [%g1 + 3], %g2"
" ld [%g1], %g2"
" sub %g2, 0x1ff, %g2"
" st %g2, [%g1]"
" retl"
" mov %g4, %o7"
.globl ___rw_read_exit
___rw_read_exit:
orcc %g2, 0x0, %g0
bne,a ___rw_read_exit_spin_on_wlock
ldub [%g1 + 3], %g2
ld [%g1], %g2
sub %g2, 0x1ff, %g2
st %g2, [%g1]
retl
mov %g4, %o7
.globl ___rw_write_enter
___rw_write_enter:
orcc %g2, 0x0, %g0
bne ___rw_write_enter_spin_on_wlock
ld [%g1], %g2
andncc %g2, 0xff, %g0
bne,a ___rw_write_enter_spin_on_wlock
stb %g0, [%g1 + 3]
retl
mov %g4, %o7
");
" .globl ___rw_write_enter"
"___rw_write_enter:"
" orcc %g2, 0x0, %g0"
" bne ___rw_write_enter_spin_on_wlock"
" ld [%g1], %g2"
" andncc %g2, 0xff, %g0"
" bne,a ___rw_write_enter_spin_on_wlock"
" stb %g0, [%g1 + 3]"
" retl"
" mov %g4, %o7"
);
}
#endif /* sparc */
#ifdef i386
asm(
"
.align 4
.globl __write_lock_failed
__write_lock_failed:
lock; addl $" RW_LOCK_BIAS_STR ",(%eax)
1: cmpl $" RW_LOCK_BIAS_STR ",(%eax)
jne 1b
lock; subl $" RW_LOCK_BIAS_STR ",(%eax)
jnz __write_lock_failed
ret
".align 4"
".globl __write_lock_failed"
"__write_lock_failed:"
" lock; addl $" RW_LOCK_BIAS_STR ",(%eax)"
"1: cmpl $" RW_LOCK_BIAS_STR ",(%eax)"
" jne 1b"
""
" lock; subl $" RW_LOCK_BIAS_STR ",(%eax)"
" jnz __write_lock_failed"
" ret"
""
""
".align 4"
".globl __read_lock_failed"
"__read_lock_failed:"
" lock ; incl (%eax)"
"1: cmpl $1,(%eax)"
" js 1b"
""
" lock ; decl (%eax)"
" js __read_lock_failed"
" ret"
.align 4
.globl __read_lock_failed
__read_lock_failed:
lock ; incl (%eax)
1: cmpl $1,(%eax)
js 1b
lock ; decl (%eax)
js __read_lock_failed
ret
"
);
#endif /* i386 */
#endif /* YAPOR */