mirror of
https://github.com/mkxp-z/mkxp-z.git
synced 2025-08-23 15:23:44 +02:00
Apply patch from ruby/ruby#12995 in libretro builds
This commit is contained in:
parent
de5216c3ce
commit
12f12d91a1
2 changed files with 6 additions and 173 deletions
|
@ -135,12 +135,16 @@ $(DOWNLOADS)/crossruby/configure: $(DOWNLOADS)/crossruby/configure.ac $(RUBY)
|
|||
cd $(DOWNLOADS)/crossruby && $(RUBY) tool/downloader.rb -d tool -e gnu config.guess config.sub
|
||||
cd $(DOWNLOADS)/crossruby && $(AUTORECONF) -i
|
||||
|
||||
$(DOWNLOADS)/crossruby/configure.ac:
|
||||
$(DOWNLOADS)/crossruby/configure.ac: $(DOWNLOADS)/ruby-12995.patch
|
||||
mkdir -p $(DOWNLOADS)
|
||||
$(CLONE) $(GITHUB)/ruby/ruby $(DOWNLOADS)/crossruby -b $(RUBY_VERSION)
|
||||
cd $(DOWNLOADS)/crossruby && $(GIT) apply ${PWD}/ruby-setjmp.patch
|
||||
cd $(DOWNLOADS)/crossruby && $(GIT) apply $(DOWNLOADS)/ruby-12995.patch
|
||||
echo '#include "${PWD}/ruby-bindings.h"' >> $(DOWNLOADS)/crossruby/gc.c
|
||||
|
||||
$(DOWNLOADS)/ruby-12995.patch:
|
||||
mkdir -p $(DOWNLOADS)
|
||||
$(CURL) -Lo $(DOWNLOADS)/ruby-12995.patch $(GITHUB)/ruby/ruby/pull/12995.diff
|
||||
|
||||
# Base Ruby (targets the build machine)
|
||||
|
||||
$(RUBY): $(DOWNLOADS)/baseruby/Makefile
|
||||
|
|
|
@ -1,171 +0,0 @@
|
|||
# Fixes two memory leaks in the setjmp handler in WASI builds of Ruby.
|
||||
# One of them causes the stack pointer to get corrupted after a longjmp.
|
||||
# The other one causes VM jump buffers being leaked after a longjmp.
|
||||
|
||||
--- a/eval_intern.h
|
||||
+++ b/eval_intern.h
|
||||
@@ -110,11 +110,11 @@ extern int select_large_fdset(int, fd_set *, fd_set *, fd_set *, struct timeval
|
||||
_tag.tag = Qundef; \
|
||||
_tag.prev = _ec->tag; \
|
||||
_tag.lock_rec = rb_ec_vm_lock_rec(_ec); \
|
||||
- rb_vm_tag_jmpbuf_init(&_tag.buf); \
|
||||
+ rb_vm_tag_jmpbuf_init(&_tag);
|
||||
|
||||
#define EC_POP_TAG() \
|
||||
_ec->tag = _tag.prev; \
|
||||
- rb_vm_tag_jmpbuf_deinit(&_tag.buf); \
|
||||
+ rb_vm_tag_jmpbuf_deinit(&_tag); \
|
||||
} while (0)
|
||||
|
||||
#define EC_TMPPOP_TAG() \
|
||||
--- a/vm_core.h
|
||||
+++ b/vm_core.h
|
||||
@@ -907,52 +907,79 @@ typedef void *rb_jmpbuf_t[5];
|
||||
Therefore, we allocates the buffer on the heap on such
|
||||
environments.
|
||||
*/
|
||||
-typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
|
||||
+typedef struct _rb_vm_tag_jmpbuf {
|
||||
+ struct _rb_vm_tag_jmpbuf *next;
|
||||
+ rb_jmpbuf_t buf;
|
||||
+} *rb_vm_tag_jmpbuf_t;
|
||||
|
||||
-#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
|
||||
+#define RB_VM_TAG_JMPBUF_GET(jmpbuf) ((jmpbuf)->buf)
|
||||
+#else
|
||||
+typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
|
||||
+
|
||||
+#define RB_VM_TAG_JMPBUF_GET(jmpbuf) (jmpbuf)
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
+ the members which are written in EC_PUSH_TAG() should be placed at
|
||||
+ the beginning and the end, so that entire region is accessible.
|
||||
+*/
|
||||
+struct rb_vm_tag {
|
||||
+ VALUE tag;
|
||||
+ VALUE retval;
|
||||
+ rb_vm_tag_jmpbuf_t buf;
|
||||
+ struct rb_vm_tag *prev;
|
||||
+ enum ruby_tag_type state;
|
||||
+ unsigned int lock_rec;
|
||||
+};
|
||||
+
|
||||
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
||||
+static inline void
|
||||
+_rb_vm_tag_jmpbuf_deinit_internal(rb_vm_tag_jmpbuf_t jmpbuf)
|
||||
+{
|
||||
+ rb_vm_tag_jmpbuf_t buf = jmpbuf;
|
||||
+ while (buf != NULL) {
|
||||
+ rb_vm_tag_jmpbuf_t next = buf->next;
|
||||
+ ruby_xfree(buf);
|
||||
+ buf = next;
|
||||
+ }
|
||||
+}
|
||||
|
||||
static inline void
|
||||
-rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
|
||||
+rb_vm_tag_jmpbuf_init(struct rb_vm_tag *tag)
|
||||
{
|
||||
- *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
|
||||
+ if (tag->prev != NULL && tag->prev->buf->next != NULL) {
|
||||
+ _rb_vm_tag_jmpbuf_deinit_internal(tag->prev->buf->next);
|
||||
+ tag->prev->buf->next = NULL;
|
||||
+ }
|
||||
+ tag->buf = ruby_xmalloc(sizeof *tag->buf);
|
||||
+ tag->buf->next = NULL;
|
||||
+ if (tag->prev != NULL) {
|
||||
+ tag->prev->buf->next = tag->buf;
|
||||
+ }
|
||||
}
|
||||
|
||||
static inline void
|
||||
-rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
|
||||
+rb_vm_tag_jmpbuf_deinit(struct rb_vm_tag *tag)
|
||||
{
|
||||
- ruby_xfree(*jmpbuf);
|
||||
+ if (tag->prev != NULL) {
|
||||
+ tag->prev->buf->next = NULL;
|
||||
+ }
|
||||
+ _rb_vm_tag_jmpbuf_deinit_internal(tag->buf);
|
||||
}
|
||||
#else
|
||||
-typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
|
||||
-
|
||||
-#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
|
||||
-
|
||||
static inline void
|
||||
-rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
|
||||
+rb_vm_tag_jmpbuf_init(struct rb_vm_tag *tag)
|
||||
{
|
||||
// no-op
|
||||
}
|
||||
|
||||
static inline void
|
||||
-rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
|
||||
+rb_vm_tag_jmpbuf_deinit(struct rb_vm_tag *tag)
|
||||
{
|
||||
// no-op
|
||||
}
|
||||
#endif
|
||||
|
||||
-/*
|
||||
- the members which are written in EC_PUSH_TAG() should be placed at
|
||||
- the beginning and the end, so that entire region is accessible.
|
||||
-*/
|
||||
-struct rb_vm_tag {
|
||||
- VALUE tag;
|
||||
- VALUE retval;
|
||||
- rb_vm_tag_jmpbuf_t buf;
|
||||
- struct rb_vm_tag *prev;
|
||||
- enum ruby_tag_type state;
|
||||
- unsigned int lock_rec;
|
||||
-};
|
||||
-
|
||||
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
|
||||
STATIC_ASSERT(rb_vm_tag_buf_end,
|
||||
offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
|
||||
--- a/wasm/setjmp.c
|
||||
+++ b/wasm/setjmp.c
|
||||
@@ -143,9 +143,11 @@ rb_wasm_try_catch_init(struct rb_wasm_try_catch *try_catch,
|
||||
try_catch->try_f = try_f;
|
||||
try_catch->catch_f = catch_f;
|
||||
try_catch->context = context;
|
||||
+ try_catch->stack_pointer = NULL;
|
||||
}
|
||||
|
||||
// NOTE: This function is not processed by Asyncify due to a call of asyncify_stop_rewind
|
||||
+__attribute__((noinline))
|
||||
void
|
||||
rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf *target)
|
||||
{
|
||||
@@ -154,6 +156,10 @@ rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf
|
||||
|
||||
target->state = JMP_BUF_STATE_CAPTURED;
|
||||
|
||||
+ if (try_catch->stack_pointer == NULL) {
|
||||
+ try_catch->stack_pointer = rb_wasm_get_stack_pointer();
|
||||
+ }
|
||||
+
|
||||
switch ((enum try_catch_phase)try_catch->state) {
|
||||
case TRY_CATCH_PHASE_MAIN:
|
||||
// may unwind
|
||||
@@ -175,6 +181,8 @@ rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf
|
||||
// stop unwinding
|
||||
// (but call stop_rewind to update the asyncify state to "normal" from "unwind")
|
||||
asyncify_stop_rewind();
|
||||
+ // reset the stack pointer to what it was before the most recent call to try_f or catch_f
|
||||
+ rb_wasm_set_stack_pointer(try_catch->stack_pointer);
|
||||
// clear the active jmpbuf because it's already stopped
|
||||
_rb_wasm_active_jmpbuf = NULL;
|
||||
// reset jmpbuf state to be able to unwind again
|
||||
--- a/wasm/setjmp.h
|
||||
+++ b/wasm/setjmp.h
|
||||
@@ -65,6 +65,7 @@ struct rb_wasm_try_catch {
|
||||
rb_wasm_try_catch_func_t try_f;
|
||||
rb_wasm_try_catch_func_t catch_f;
|
||||
void *context;
|
||||
+ void *stack_pointer;
|
||||
int state;
|
||||
};
|
||||
|
Loading…
Add table
Reference in a new issue