[Vm-dev] [commit][3210] Add the x86-64 Alien plugin files.
commits at squeakvm.org
commits at squeakvm.org
Tue Jan 6 22:15:43 UTC 2015
Revision: 3210
Author: eliot
Date: 2015-01-06 14:15:43 -0800 (Tue, 06 Jan 2015)
Log Message:
-----------
Add the x86-64 Alien plugin files.
Added Paths:
-----------
trunk/platforms/Cross/plugins/IA32ABI/dax64business.h
trunk/platforms/Cross/plugins/IA32ABI/x64ia32abicc.c
Added: trunk/platforms/Cross/plugins/IA32ABI/dax64business.h
===================================================================
--- trunk/platforms/Cross/plugins/IA32ABI/dax64business.h (rev 0)
+++ trunk/platforms/Cross/plugins/IA32ABI/dax64business.h 2015-01-06 22:15:43 UTC (rev 3210)
@@ -0,0 +1,145 @@
+/*
+ * dax64business.h
+ *
+ * Written by Eliot Miranda 12/14
+ *
+ * Body of the various callIA32XXXReturn functions.
+ * Call a foreign function according to x64-ish ABI rules.
+ * N.B. In Cog Stack and Cogit VMs numArgs is negative to access args from
+ * the downward-growing stack.
+ */
+ long i, size;
+ sqInt funcAlien, resultMaybeAlien;
+ char *argvec;
+ char *argstart;
+
+#if STACKVM /* Need to access args downwards from first arg */
+ if (numArgs < 0)
+ for (i = size = 0; --i >= numArgs;) {
+ sqInt arg = argVector[i+1];
+ if (objIsAlien(arg) && sizeField(arg))
+ size += moduloPOT(sizeof(long),abs(sizeField(arg)));
+ else if (interpreterProxy->isFloatObject(arg))
+ size += sizeof(double);
+ else /* assume an integer or pointer. check below. */
+ size += sizeof(long);
+ }
+ else
+#endif /* STACKVM */
+ for (i = numArgs, size = 0; --i >= 0;) {
+ sqInt arg = argVector[i];
+ if (objIsAlien(arg) && sizeField(arg))
+ size += moduloPOT(sizeof(long),abs(sizeField(arg)));
+ else if (interpreterProxy->isFloatObject(arg))
+ size += sizeof(double);
+ else /* assume an integer or pointer. check below. */
+ size += sizeof(long);
+ }
+
+ /* At point of call stack must be aligned to STACK_ALIGN_BYTES. So alloca
+ * at least enough for this plus the argvector, and start writing argvector
+ * at aligned point. Then just prior to call cut-back stack to aligned.
+ */
+ argvec = alloca(STACK_ALIGN_BYTES + moduloPOT(STACK_ALIGN_BYTES,size));
+ argvec = alignModuloPOT(STACK_ALIGN_BYTES, argvec);
+ argstart = argvec;
+
+#if STACKVM /* Need to access args downwards from first arg */
+ if (numArgs < 0)
+ for (i = size = 0; --i >= numArgs;) {
+ sqInt arg = argVector[i+1];
+ if (isSmallInt(arg)) {
+ *(long *)argvec = intVal(arg);
+ argvec += sizeof(long);
+ }
+ else if (objIsAlien(arg)) {
+ long argByteSize;
+
+ if (!(size = sizeField(arg)))
+ size = argByteSize = sizeof(void *);
+ else
+ argByteSize = abs(size);
+ memcpy(argvec, startOfDataWithSize(arg,size), argByteSize);
+ argvec += moduloPOT(sizeof(long), argByteSize);
+ }
+ else if (objIsUnsafeAlien(arg)) {
+ sqInt bitsObj = interpreterProxy->fetchPointerofObject(0,arg);
+ void *v = interpreterProxy->firstIndexableField(bitsObj);
+ *(void **)argvec = v;
+ argvec += sizeof(long);
+ }
+ else if (interpreterProxy->isFloatObject(arg)) {
+ double d = interpreterProxy->floatValueOf(arg);
+ *(double *)argvec = d;
+ argvec += sizeof(double);
+ }
+ else {
+ long v = interpreterProxy->signed32BitValueOf(arg);
+ if (interpreterProxy->failed()) {
+ interpreterProxy->primitiveFailFor(0);
+ v = interpreterProxy->positive32BitValueOf(arg);
+ if (interpreterProxy->failed())
+ return PrimErrBadArgument;
+ }
+ *(long *)argvec = v;
+ argvec += sizeof(long);
+ }
+ }
+ else
+#endif /* STACKVM */
+ for (i = 0; i < numArgs; i++) {
+ sqInt arg = argVector[i];
+ if (isSmallInt(arg)) {
+ *(long *)argvec = intVal(arg);
+ argvec += sizeof(long);
+ }
+ else if (objIsAlien(arg)) {
+ long argByteSize;
+
+ if (!(size = sizeField(arg)))
+ size = argByteSize = sizeof(void *);
+ else
+ argByteSize = abs(size);
+ memcpy(argvec, startOfDataWithSize(arg,size), argByteSize);
+ argvec += moduloPOT(sizeof(long), argByteSize);
+ }
+ else if (objIsUnsafeAlien(arg)) {
+ sqInt bitsObj = interpreterProxy->fetchPointerofObject(0,arg);
+ void *v = interpreterProxy->firstIndexableField(bitsObj);
+ *(void **)argvec = v;
+ argvec += sizeof(long);
+ }
+ else if (interpreterProxy->isFloatObject(arg)) {
+ double d = interpreterProxy->floatValueOf(arg);
+ *(double *)argvec = d;
+ argvec += sizeof(double);
+ }
+ else {
+ long v = interpreterProxy->signed32BitValueOf(arg);
+ if (interpreterProxy->failed()) {
+ interpreterProxy->primitiveFailFor(0);
+ v = interpreterProxy->positive32BitValueOf(arg);
+ if (interpreterProxy->failed())
+ return PrimErrBadArgument;
+ }
+ *(long *)argvec = v;
+ argvec += sizeof(long);
+ }
+ }
+
+ funcAlien = interpreterProxy->stackValue(funcOffset);
+ f = *(void **)startOfParameterData(funcAlien);
+ /* cut stack back to start of aligned args */
+ setsp(argstart);
+ r = f();
+ /* post call need to refresh stack pointer in case of call-back and GC. */
+ resultMaybeAlien = interpreterProxy->stackValue(resultOffset);
+ if (objIsAlien(resultMaybeAlien)) {
+ if (!(size = sizeField(resultMaybeAlien)))
+ size = sizeof(void *);
+ memcpy(startOfDataWithSize(resultMaybeAlien,size),
+ &r,
+ min((unsigned)abs(size), sizeof(r)));
+ }
+
+ return PrimNoErr;
Added: trunk/platforms/Cross/plugins/IA32ABI/x64ia32abicc.c
===================================================================
--- trunk/platforms/Cross/plugins/IA32ABI/x64ia32abicc.c (rev 0)
+++ trunk/platforms/Cross/plugins/IA32ABI/x64ia32abicc.c 2015-01-06 22:15:43 UTC (rev 3210)
@@ -0,0 +1,289 @@
+/*
+ * x86ia32abicc.c
+ *
+ * Support for Call-outs and Call-backs from the Plugin on x86_64.
+ * Written by Eliot Miranda 12/14
+ *
+ * Based on
+ * System V Application Binary Interface
+ * AMD64 Architecture Processor Supplement
+ * Draft Version 0.99.6
+ * here referred to as x64ABI
+ */
+
+#if defined(_MSC_VER) || defined(__MINGW32__)
+# include "windows.h" /* for GetSystemInfo & VirtualAlloc */
+#elif __APPLE__ && __MACH__
+# include <sys/mman.h> /* for mprotect */
+# if OBJC_DEBUG /* define this to get debug info for struct objc_class et al */
+# include <objc/objc.h>
+# include <objc/objc-class.h>
+
+struct objc_class *baz;
+
+void setbaz(void *p) { baz = p; }
+void *getbaz() { return baz; }
+# endif
+# include <stdlib.h> /* for valloc */
+# include <sys/mman.h> /* for mprotect */
+#else
+# include <stdlib.h> /* for valloc */
+# include <sys/mman.h> /* for mprotect */
+#endif
+
+#include <string.h> /* for memcpy et al */
+#include <setjmp.h>
+#include <stdio.h> /* for fprintf(stderr,...) */
+
+#include "vmCallback.h"
+#include "sqAssert.h"
+#include "sqMemoryAccess.h"
+#include "sqVirtualMachine.h"
+#include "ia32abi.h"
+
+#if !defined(min)
+# define min(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+#ifdef SQUEAK_BUILTIN_PLUGIN
+extern
+#endif
+struct VirtualMachine* interpreterProxy;
+
+#if __GNUC__
+# define setsp(sp) asm volatile ("movq %0,%%rsp" : : "m"(sp))
+# define getsp() ({ void *sp; asm volatile ("movq %%rsp,%0" : "=r"(sp) : ); sp;})
+#endif
+#define STACK_ALIGN_BYTES 32 /* x64ABI 3.2.2 */
+
+#if !defined(setsp)
+# define setsp(ignored) 0
+#endif
+
+#define moduloPOT(m,v) (((v)+(m)-1) & ~((m)-1))
+#define alignModuloPOT(m,v) ((void *)moduloPOT(m,(unsigned long)(v)))
+
+#define objIsAlien(anOop) (interpreterProxy->includesBehaviorThatOf(interpreterProxy->fetchClassOf(anOop), interpreterProxy->classAlien()))
+#define objIsUnsafeAlien(anOop) (interpreterProxy->includesBehaviorThatOf(interpreterProxy->fetchClassOf(anOop), interpreterProxy->classUnsafeAlien()))
+
+#define sizeField(alien) (*(long *)pointerForOop((sqInt)(alien) + BaseHeaderSize))
+#define dataPtr(alien) pointerForOop((sqInt)(alien) + BaseHeaderSize + BytesPerOop)
+#define isIndirect(alien) (sizeField(alien) < 0)
+#define startOfParameterData(alien) (isIndirect(alien) \
+ ? *(void **)dataPtr(alien) \
+ : (void *)dataPtr(alien))
+#define isIndirectSize(size) ((size) < 0)
+#define startOfDataWithSize(alien,size) (isIndirectSize(size) \
+ ? *(void **)dataPtr(alien) \
+ : (void *)dataPtr(alien))
+
+#define isSmallInt(oop) (((oop)&7)==1)
+#define intVal(oop) (((long)(oop))>>3)
+
+/*
+ * Call a foreign function that answers an integral result in %rax (and
+ * possibly %rdx?) according to x64-ish ABI rules.
+ */
+sqInt
+callIA32IntegralReturn(SIGNATURE) {
+long (*f)(), r;
+#include "dax64business.h"
+}
+
+/*
+ * Call a foreign function that answers a single-precision floating-point
+ * result in %xmm0 according to x64-ish ABI rules.
+ */
+sqInt
+callIA32FloatReturn(SIGNATURE) { float (*f)(), r;
+#include "dax64business.h"
+}
+
+/*
+ * Call a foreign function that answers a double-precision floating-point
+ * result in %xmm0 according to x64-ish ABI rules.
+ */
+sqInt
+callIA32DoubleReturn(SIGNATURE) { double (*f)(), r;
+#include "dax64business.h"
+}
+
+/* Queueing order for callback returns. To ensure that callback returns occur
+ * in LIFO order we provide mostRecentCallbackContext which is tested by the
+ * return primitive primReturnFromContextThrough. Note that in the threaded VM
+ * this does not have to be thread-specific or locked since it is within the
+ * bounds of the ownVM/disownVM pair.
+ */
+static VMCallbackContext *mostRecentCallbackContext = 0;
+
+VMCallbackContext *
+getMostRecentCallbackContext() { return mostRecentCallbackContext; }
+
+#define getRMCC(t) mostRecentCallbackContext
+#define setRMCC(t) (mostRecentCallbackContext = (void *)(t))
+
+/*
+ * Entry-point for call-back thunks. Args are thunk address and stack pointer,
+ * where the stack pointer is pointing one word below the return address of the
+ * thunk's callee, 4 bytes below the thunk's first argument. The stack is:
+ * callback
+ * arguments
+ * retpc (thunk) <--\
+ * address of retpc-/ <--\
+ * address of address of ret pc-/
+ * thunkp
+ * esp->retpc (thunkEntry)
+ *
+ * This function's roles are to use setjmp/longjmp to save the call point
+ * and return to it, and to return any of the various values from the callback.
+ *
+ * To support x86-64, which has 6 register arguments, the function takes 8
+ * arguments, the 6 register args as longs, followed by the thunkp and stackp
+ * passed on the stack. The register args get copied into a struct on the
+ * stack. A pointer to the struct is then passed as an element of the
+ * VMCallbackContext.
+ */
+#if defined(__GNUC__)
+# define getfparg(n) asm volatile ("movq %%xmm" #n ", %0" : "=m"(fpargs[n]) : )
+#endif
+
+long
+thunkEntry(long a0, long a1, long a2, long a3, long a4, long a5,
+ void *thunkp, long *stackp)
+{
+ VMCallbackContext vmcc;
+ VMCallbackContext *previousCallbackContext;
+ long flags, returnType;
+ long intargs[6];
+ double fpargs[8];
+
+ intargs[0] = a0;
+ intargs[1] = a1;
+ intargs[2] = a2;
+ intargs[3] = a3;
+ intargs[4] = a4;
+ intargs[5] = a5;
+
+ getfparg(0);
+ getfparg(1);
+ getfparg(2);
+ getfparg(3);
+ getfparg(4);
+ getfparg(5);
+ getfparg(6);
+ getfparg(7);
+
+ vmcc.intregargsp = intargs;
+ vmcc.floatregargsp = fpargs;
+
+ if ((flags = interpreterProxy->ownVM(0)) < 0) {
+ fprintf(stderr,"Warning; callback failed to own the VM\n");
+ return -1;
+ }
+
+ if (!(returnType = setjmp(vmcc.trampoline))) {
+ previousCallbackContext = getRMCC();
+ setRMCC(&vmcc);
+ vmcc.thunkp = thunkp;
+ vmcc.stackp = stackp + 2; /* skip address of retpc & retpc (thunk) */
+ vmcc.intregargsp = 0;
+ vmcc.floatregargsp = 0;
+ interpreterProxy->sendInvokeCallbackContext(&vmcc);
+ fprintf(stderr,"Warning; callback failed to invoke\n");
+ setRMCC(previousCallbackContext);
+ interpreterProxy->disownVM(flags);
+ return -1;
+ }
+ setRMCC(previousCallbackContext);
+ interpreterProxy->disownVM(flags);
+
+ switch (returnType) {
+
+ case retword: return vmcc.rvs.valword;
+
+ case retword64: {
+ long vhigh = vmcc.rvs.valleint64.high;
+#if _MSC_VER
+ _asm mov edx, dword ptr vhigh;
+#elif __GNUC__
+ asm("mov %0,%%edx" : : "m"(vhigh));
+#else
+# error need to load edx with vmcc.rvs.valleint64.high on this compiler
+#endif
+ return vmcc.rvs.valleint64.low;
+ }
+
+ case retdouble: {
+ double valflt64 = vmcc.rvs.valflt64;
+#if _MSC_VER
+ _asm fld qword ptr valflt64;
+#elif __GNUC__
+ asm("fldl %0" : : "m"(valflt64));
+#else
+# error need to load %f0 with vmcc.rvs.valflt64 on this compiler
+#endif
+ return 0;
+ }
+
+ case retstruct: memcpy( (void *)(stackp[1]),
+ vmcc.rvs.valstruct.addr,
+ vmcc.rvs.valstruct.size);
+ return stackp[1];
+ }
+ fprintf(stderr,"Warning; invalid callback return type\n");
+ return 0;
+}
+
+/*
+ * Thunk allocation support. Since thunks must be exectuable and some OSs
+ * may not provide default execute permission on memory returned by malloc
+ * we must provide memory that is guaranteed to be executable. The abstraction
+ * is to answer an Alien that references an executable piece of memory that
+ * is some (possiby unitary) multiple of the pagesize.
+ *
+ * We assume the Smalltalk image code will manage subdividing the executable
+ * page amongst thunks so there is no need to free these pages, since the image
+ * will recycle parts of the page for reclaimed thunks.
+ */
+#if defined(_MSC_VER) || defined(__MINGW32__)
+static unsigned long pagesize = 0;
+#endif
+
+void *
+allocateExecutablePage(long *size)
+{
+ void *mem;
+
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#if !defined(MEM_TOP_DOWN)
+# define MEM_TOP_DOWN 0x100000
+#endif
+ if (!pagesize) {
+ SYSTEM_INFO sysinf;
+
+ GetSystemInfo(&sysinf);
+
+ pagesize = sysinf.dwPageSize;
+ }
+ /* N.B. VirtualAlloc MEM_COMMIT initializes the memory returned to zero. */
+ mem = VirtualAlloc( 0,
+ pagesize,
+ MEM_COMMIT | MEM_TOP_DOWN,
+ PAGE_EXECUTE_READWRITE);
+ if (mem)
+ *size = pagesize;
+#else
+ long pagesize = getpagesize();
+
+ if (!(mem = valloc(pagesize)))
+ return 0;
+
+ memset(mem, 0, pagesize);
+ if (mprotect(mem, pagesize, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
+ free(mem);
+ return 0;
+ }
+ *size = pagesize;
+#endif
+ return mem;
+}
More information about the Vm-dev
mailing list