Fix a second Ruby memory leak

Okay, I think that fixes all the memory leaks. Finally! I'll submit a
pull request to Ruby to fix both of the memory leaks.
This commit is contained in:
刘皓 2025-03-27 00:28:37 -04:00
parent 82444abed2
commit fac72be163
No known key found for this signature in database
GPG key ID: 7901753DB465B711
2 changed files with 144 additions and 5 deletions

View file

@ -27,7 +27,7 @@
#define WASM_PAGE_SIZE ((uint64_t)65536U)
#define WASM_MIN_PAGES ((uint32_t)4096U)
#define WASM_MIN_PAGES ((uint32_t)1536U)
extern "C" bool wasm_rt_is_initialized(void) {
return true;

View file

@ -1,8 +1,143 @@
# Fixes a bug in WASI builds of Ruby where the stack pointer gets corrupted after a `longjmp` gets caught by `rb_wasm_try_catch_loop_run`.
# Fixes two memory leaks in the setjmp handler in WASI builds of Ruby.
# One of them causes the stack pointer to get corrupted after a longjmp.
# The other one causes VM jump buffers being leaked after a longjmp.
--- a/cont.c
+++ b/cont.c
@@ -1360,6 +1360,7 @@ cont_init(rb_context_t *cont, rb_thread_t *th)
/* save thread context */
cont_save_thread(cont, th);
cont->saved_ec.thread_ptr = th;
+ cont->saved_ec.tag = NULL;
cont->saved_ec.local_storage = NULL;
cont->saved_ec.local_storage_recursive_hash = Qnil;
cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
--- a/eval_intern.h
+++ b/eval_intern.h
@@ -110,11 +110,11 @@ extern int select_large_fdset(int, fd_set *, fd_set *, fd_set *, struct timeval
_tag.tag = Qundef; \
_tag.prev = _ec->tag; \
_tag.lock_rec = rb_ec_vm_lock_rec(_ec); \
- rb_vm_tag_jmpbuf_init(&_tag.buf); \
+ rb_vm_tag_jmpbuf_init(&_tag);
#define EC_POP_TAG() \
_ec->tag = _tag.prev; \
- rb_vm_tag_jmpbuf_deinit(&_tag.buf); \
+ rb_vm_tag_jmpbuf_deinit(&_tag); \
} while (0)
#define EC_TMPPOP_TAG() \
--- a/vm_core.h
+++ b/vm_core.h
@@ -907,52 +907,79 @@ typedef void *rb_jmpbuf_t[5];
Therefore, we allocates the buffer on the heap on such
environments.
*/
-typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
+typedef struct _rb_vm_tag_jmpbuf {
+ struct _rb_vm_tag_jmpbuf *next;
+ rb_jmpbuf_t buf;
+} *rb_vm_tag_jmpbuf_t;
-#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
+#define RB_VM_TAG_JMPBUF_GET(jmpbuf) ((jmpbuf)->buf)
+#else
+typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
+
+#define RB_VM_TAG_JMPBUF_GET(jmpbuf) (jmpbuf)
+#endif
+
+/*
+ the members which are written in EC_PUSH_TAG() should be placed at
+ the beginning and the end, so that entire region is accessible.
+*/
+struct rb_vm_tag {
+ VALUE tag;
+ VALUE retval;
+ rb_vm_tag_jmpbuf_t buf;
+ struct rb_vm_tag *prev;
+ enum ruby_tag_type state;
+ unsigned int lock_rec;
+};
+
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+static inline void
+_rb_vm_tag_jmpbuf_deinit_internal(rb_vm_tag_jmpbuf_t jmpbuf)
+{
+ rb_vm_tag_jmpbuf_t buf = jmpbuf;
+ while (buf != NULL) {
+ rb_vm_tag_jmpbuf_t next = buf->next;
+ ruby_xfree(buf);
+ buf = next;
+ }
+}
static inline void
-rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
+rb_vm_tag_jmpbuf_init(struct rb_vm_tag *tag)
{
- *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
+ if (tag->prev != NULL && tag->prev->buf->next != NULL) {
+ _rb_vm_tag_jmpbuf_deinit_internal(tag->prev->buf->next);
+ tag->prev->buf->next = NULL;
+ }
+ tag->buf = ruby_xmalloc(sizeof *tag->buf);
+ tag->buf->next = NULL;
+ if (tag->prev != NULL) {
+ tag->prev->buf->next = tag->buf;
+ }
}
static inline void
-rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
+rb_vm_tag_jmpbuf_deinit(struct rb_vm_tag *tag)
{
- ruby_xfree(*jmpbuf);
+ if (tag->prev != NULL) {
+ tag->prev->buf->next = NULL;
+ }
+ _rb_vm_tag_jmpbuf_deinit_internal(tag->buf);
}
#else
-typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
-
-#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
-
static inline void
-rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
+rb_vm_tag_jmpbuf_init(struct rb_vm_tag *tag)
{
// no-op
}
static inline void
-rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
+rb_vm_tag_jmpbuf_deinit(struct rb_vm_tag *tag)
{
// no-op
}
#endif
-/*
- the members which are written in EC_PUSH_TAG() should be placed at
- the beginning and the end, so that entire region is accessible.
-*/
-struct rb_vm_tag {
- VALUE tag;
- VALUE retval;
- rb_vm_tag_jmpbuf_t buf;
- struct rb_vm_tag *prev;
- enum ruby_tag_type state;
- unsigned int lock_rec;
-};
-
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
STATIC_ASSERT(rb_vm_tag_buf_end,
offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
--- a/wasm/setjmp.c
+++ b/wasm/setjmp.c
@@ -143,6 +143,7 @@ rb_wasm_try_catch_init(struct rb_wasm_try_catch *try_catch,
@@ -143,9 +143,11 @@ rb_wasm_try_catch_init(struct rb_wasm_try_catch *try_catch,
try_catch->try_f = try_f;
try_catch->catch_f = catch_f;
try_catch->context = context;
@ -10,7 +145,11 @@
}
// NOTE: This function is not processed by Asyncify due to a call of asyncify_stop_rewind
@@ -154,6 +155,10 @@ rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf
+__attribute__((noinline))
void
rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf *target)
{
@@ -154,6 +156,10 @@ rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf
target->state = JMP_BUF_STATE_CAPTURED;
@ -21,7 +160,7 @@
switch ((enum try_catch_phase)try_catch->state) {
case TRY_CATCH_PHASE_MAIN:
// may unwind
@@ -175,6 +180,8 @@ rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf
@@ -175,6 +181,8 @@ rb_wasm_try_catch_loop_run(struct rb_wasm_try_catch *try_catch, rb_wasm_jmp_buf
// stop unwinding
// (but call stop_rewind to update the asyncify state to "normal" from "unwind")
asyncify_stop_rewind();