Skip to content

Commit

Permalink
Merge pull request #84 from snaury/fix-sparc
Browse files Browse the repository at this point in the history
Fix stack switching on sparc
  • Loading branch information
snaury committed May 16, 2015
2 parents 3fe4ebc + c51f16c commit fb9cf72
Show file tree
Hide file tree
Showing 2 changed files with 70 additions and 71 deletions.
97 changes: 52 additions & 45 deletions greenlet.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,50 @@ static int (*g_initialstub)(void*);
} while(0)
#endif

/*
* the following macros are spliced into the OS/compiler
* specific code, in order to simplify maintenance.
*/

#define SLP_SAVE_STATE(stackref, stsizediff) \
stackref += STACK_MAGIC; \
if (slp_save_state((char*)stackref)) return -1; \
if (!PyGreenlet_ACTIVE(ts_target)) return 1; \
stsizediff = ts_target->stack_start - (char*)stackref

#define SLP_RESTORE_STATE() \
slp_restore_state()


#define SLP_EVAL
#define slp_switch GREENLET_NOINLINE(slp_switch)
#include "slp_platformselect.h"
#undef slp_switch

#ifndef STACK_MAGIC
#error "greenlet needs to be ported to this platform, or teached how to detect your compiler properly."
#endif /* !STACK_MAGIC */

#ifdef EXTERNAL_ASM
/* CCP addition: Make these functions, to be called from assembler.
* The token include file for the given platform should enable the
* EXTERNAL_ASM define so that this is included.
*/

intptr_t slp_save_state_asm(intptr_t *ref) {
intptr_t diff;
SLP_SAVE_STATE(ref, diff);
return diff;
}

void slp_restore_state_asm(void) {
SLP_RESTORE_STATE();
}

extern int slp_switch(void);

#endif

/***********************************************************/

static int g_save(PyGreenlet* g, char* stop)
Expand Down Expand Up @@ -345,6 +389,10 @@ static void GREENLET_NOINLINE(slp_restore_state)(void)
PyGreenlet* g = ts_target;
PyGreenlet* owner = ts_current;

#ifdef SLP_BEFORE_RESTORE_STATE
SLP_BEFORE_RESTORE_STATE();
#endif

/* Restore the heap copy back into the C stack */
if (g->stack_saved != 0) {
memcpy(g->stack_start, g->stack_copy, g->stack_saved);
Expand All @@ -370,6 +418,10 @@ static int GREENLET_NOINLINE(slp_save_state)(char* stackref)
else
owner->stack_start = stackref;

#ifdef SLP_BEFORE_SAVE_STATE
SLP_BEFORE_SAVE_STATE();
#endif

while (owner->stack_stop < target_stop) {
/* ts_current is entierely within the area to free */
if (g_save(owner, owner->stack_stop))
Expand All @@ -383,51 +435,6 @@ static int GREENLET_NOINLINE(slp_save_state)(char* stackref)
return 0;
}


/*
* the following macros are spliced into the OS/compiler
* specific code, in order to simplify maintenance.
*/

#define SLP_SAVE_STATE(stackref, stsizediff) \
stackref += STACK_MAGIC; \
if (slp_save_state((char*)stackref)) return -1; \
if (!PyGreenlet_ACTIVE(ts_target)) return 1; \
stsizediff = ts_target->stack_start - (char*)stackref

#define SLP_RESTORE_STATE() \
slp_restore_state()


#define SLP_EVAL
#define slp_switch GREENLET_NOINLINE(slp_switch)
#include "slp_platformselect.h"
#undef slp_switch

#ifndef STACK_MAGIC
#error "greenlet needs to be ported to this platform, or teached how to detect your compiler properly."
#endif /* !STACK_MAGIC */

#ifdef EXTERNAL_ASM
/* CCP addition: Make these functions, to be called from assembler.
* The token include file for the given platform should enable the
* EXTERNAL_ASM define so that this is included.
*/

intptr_t slp_save_state_asm(intptr_t *ref) {
intptr_t diff;
SLP_SAVE_STATE(ref, diff);
return diff;
}

void slp_restore_state_asm(void) {
SLP_RESTORE_STATE();
}

extern int slp_switch(void);

#endif

static int g_switchstack(void)
{
/* Perform a stack switch according to some global variables
Expand Down
44 changes: 18 additions & 26 deletions platform/switch_sparc_sun_gcc.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
* this is the internal transfer function.
*
* HISTORY
* 16-May-15 Alexey Borzenkov <snaury@gmail.com>
* Move stack spilling code inside save/restore functions
* 30-Aug-13 Floris Bruynooghe <flub@devork.be>
Clean the register windows again before returning.
This does not clobber the PIC register as it leaves
Expand Down Expand Up @@ -33,32 +35,29 @@


#define STACK_MAGIC 0
#define ST_FLUSH_WINDOWS 0x03
#define ST_CLEAN_WINDOWS 0x04


#if defined(__sparcv9)
#define SLP_FLUSHW __asm__ volatile ("flushw")
#else
#define SLP_FLUSHW __asm__ volatile ("ta 3") /* ST_FLUSH_WINDOWS */
#endif

/* On sparc we need to spill register windows inside save/restore functions */
#define SLP_BEFORE_SAVE_STATE() SLP_FLUSHW
#define SLP_BEFORE_RESTORE_STATE() SLP_FLUSHW


static int
slp_switch(void)
{
register int err;
register int *stackref, stsizediff;

/* Flush SPARC register windows onto the stack, so they can be used to
* restore the registers after the stack has been switched out and
* restored. This also ensures the current window (pointed at by
* the CWP register) is the only window left in the registers
* (CANSAVE=0, CANRESTORE=0), that means the registers of our
* caller are no longer there and when we return they will always
* be loaded from the stack by a window underflow/fill trap.
*
* On SPARC v9 and above it might be more efficient to use the
* FLUSHW instruction instead of TA ST_FLUSH_WINDOWS. But that
* requires the correct -mcpu flag to gcc.
*
* Then put the stack pointer into stackref. */
__asm__ volatile (
"ta %1\n\t"
"mov %%sp, %0"
: "=r" (stackref) : "i" (ST_FLUSH_WINDOWS));
/* Put current stack pointer into stackref.
* Register spilling is done in save/restore.
*/
__asm__ volatile ("mov %%sp, %0" : "=r" (stackref));

{
/* Thou shalt put SLP_SAVE_STATE into a local block */
Expand All @@ -74,13 +73,6 @@ slp_switch(void)
/* Copy new stack from it's save store on the heap */
SLP_RESTORE_STATE();

/* No need to set the return value register, the return
* statement below does this just fine. After returning a restore
* instruction is given and a fill-trap will load all the registers
* from the stack if needed. However in a multi-threaded environment
* we can't guarantee the other register windows are fine to use by
* their threads anymore, so tell the CPU to clean them. */
__asm__ volatile ("ta %0" : : "i" (ST_CLEAN_WINDOWS));
__asm__ volatile ("mov %1, %0" : "=r" (err) : "i" (0));
return err;
}
Expand Down

0 comments on commit fb9cf72

Please sign in to comment.