Skip to content

Commit 1f8dbca

Browse files
committed
feat: add ENABLE_MI_MALLOC to on/off mimalloc support
1 parent 4bed95d commit 1f8dbca

35 files changed

+8585
-7974
lines changed

CMakeLists.txt

+2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ set(C_STANDARD 17)
44
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
55
set(MI_OVERRIDE OFF)
66

7+
add_definitions(-DENABLE_MI_MALLOC)
8+
79
include(CheckIPOSupported)
810
check_ipo_supported(RESULT supported OUTPUT error)
911
if( supported )

include/quickjs/cutils.h

+65-8
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
* C utilities
3-
*
3+
*
44
* Copyright (c) 2017 Fabrice Bellard
55
* Copyright (c) 2018 Charlie Gordon
66
*
@@ -28,14 +28,31 @@
2828
#include <stdlib.h>
2929
#include <inttypes.h>
3030

31+
#if ENABLE_MI_MALLOC
32+
#include "mimalloc.h"
33+
#endif
34+
35+
#ifdef _MSC_VER
36+
#include <intrin.h>
37+
#endif
38+
3139
/* set if CPU is big endian */
3240
#undef WORDS_BIGENDIAN
3341

42+
#ifdef _MSC_VER
43+
#define likely(x) (x)
44+
#define unlikely(x) (x)
45+
#define force_inline __forceinline
46+
#define no_inline __declspec(noinline)
47+
#define __maybe_unused
48+
#define __attribute__(...)
49+
#else
3450
#define likely(x) __builtin_expect(!!(x), 1)
3551
#define unlikely(x) __builtin_expect(!!(x), 0)
3652
#define force_inline inline __attribute__((always_inline))
3753
#define no_inline __attribute__((noinline))
3854
#define __maybe_unused __attribute__((unused))
55+
#endif
3956

4057
#define xglue(x, y) x ## y
4158
#define glue(x, y) xglue(x, y)
@@ -114,27 +131,66 @@ static inline int64_t min_int64(int64_t a, int64_t b)
114131
/* WARNING: undefined if a = 0 */
115132
static inline int clz32(unsigned int a)
116133
{
134+
#ifdef _MSC_VER
135+
unsigned long idx;
136+
_BitScanReverse(&idx, a);
137+
return 31 ^ idx;
138+
#else
117139
return __builtin_clz(a);
140+
#endif
118141
}
119142

120143
/* WARNING: undefined if a = 0 */
121144
static inline int clz64(uint64_t a)
122145
{
146+
#ifdef _MSC_VER
147+
unsigned long idx;
148+
_BitScanReverse64(&idx, a);
149+
return 63 ^ idx;
150+
#else
123151
return __builtin_clzll(a);
152+
#endif
124153
}
125154

126155
/* WARNING: undefined if a = 0 */
127156
static inline int ctz32(unsigned int a)
128157
{
158+
#ifdef _MSC_VER
159+
unsigned long idx;
160+
_BitScanForward(&idx, a);
161+
return 31 ^ idx;
162+
#else
129163
return __builtin_ctz(a);
164+
#endif
130165
}
131166

132167
/* WARNING: undefined if a = 0 */
133168
static inline int ctz64(uint64_t a)
134169
{
170+
#ifdef _MSC_VER
171+
unsigned long idx;
172+
_BitScanForward64(&idx, a);
173+
return 63 ^ idx;
174+
#else
135175
return __builtin_ctzll(a);
176+
#endif
136177
}
137178

179+
#ifdef _MSC_VER
180+
#pragma pack(push, 1)
181+
struct packed_u64 {
182+
uint64_t v;
183+
};
184+
185+
struct packed_u32 {
186+
uint32_t v;
187+
};
188+
189+
struct packed_u16 {
190+
uint16_t v;
191+
};
192+
#pragma pack(pop)
193+
#else
138194
struct __attribute__((packed)) packed_u64 {
139195
uint64_t v;
140196
};
@@ -146,6 +202,7 @@ struct __attribute__((packed)) packed_u32 {
146202
struct __attribute__((packed)) packed_u16 {
147203
uint16_t v;
148204
};
205+
#endif
149206

150207
static inline uint64_t get_u64(const uint8_t *tab)
151208
{
@@ -220,13 +277,13 @@ static inline uint32_t bswap32(uint32_t v)
220277

221278
static inline uint64_t bswap64(uint64_t v)
222279
{
223-
return ((v & ((uint64_t)0xff << (7 * 8))) >> (7 * 8)) |
224-
((v & ((uint64_t)0xff << (6 * 8))) >> (5 * 8)) |
225-
((v & ((uint64_t)0xff << (5 * 8))) >> (3 * 8)) |
226-
((v & ((uint64_t)0xff << (4 * 8))) >> (1 * 8)) |
227-
((v & ((uint64_t)0xff << (3 * 8))) << (1 * 8)) |
228-
((v & ((uint64_t)0xff << (2 * 8))) << (3 * 8)) |
229-
((v & ((uint64_t)0xff << (1 * 8))) << (5 * 8)) |
280+
return ((v & ((uint64_t)0xff << (7 * 8))) >> (7 * 8)) |
281+
((v & ((uint64_t)0xff << (6 * 8))) >> (5 * 8)) |
282+
((v & ((uint64_t)0xff << (5 * 8))) >> (3 * 8)) |
283+
((v & ((uint64_t)0xff << (4 * 8))) >> (1 * 8)) |
284+
((v & ((uint64_t)0xff << (3 * 8))) << (1 * 8)) |
285+
((v & ((uint64_t)0xff << (2 * 8))) << (3 * 8)) |
286+
((v & ((uint64_t)0xff << (1 * 8))) << (5 * 8)) |
230287
((v & ((uint64_t)0xff << (0 * 8))) << (7 * 8));
231288
}
232289

include/quickjs/libbf.h

+10-10
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
* Tiny arbitrary precision floating point library
3-
*
3+
*
44
* Copyright (c) 2017-2021 Fabrice Bellard
55
*
66
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -171,7 +171,7 @@ static inline bf_flags_t bf_set_exp_bits(int n)
171171
#define BF_ST_UNDERFLOW (1 << 3)
172172
#define BF_ST_INEXACT (1 << 4)
173173
/* indicate that a memory allocation error occured. NaN is returned */
174-
#define BF_ST_MEM_ERROR (1 << 5)
174+
#define BF_ST_MEM_ERROR (1 << 5)
175175

176176
#define BF_RADIX_MAX 36 /* maximum radix for bf_atof() and bf_ftoa() */
177177

@@ -284,7 +284,7 @@ int bf_sub(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags)
284284
int bf_add_si(bf_t *r, const bf_t *a, int64_t b1, limb_t prec, bf_flags_t flags);
285285
int bf_mul(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags);
286286
int bf_mul_ui(bf_t *r, const bf_t *a, uint64_t b1, limb_t prec, bf_flags_t flags);
287-
int bf_mul_si(bf_t *r, const bf_t *a, int64_t b1, limb_t prec,
287+
int bf_mul_si(bf_t *r, const bf_t *a, int64_t b1, limb_t prec,
288288
bf_flags_t flags);
289289
int bf_mul_2exp(bf_t *r, slimb_t e, limb_t prec, bf_flags_t flags);
290290
int bf_div(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags);
@@ -341,12 +341,12 @@ int bf_mul_pow_radix(bf_t *r, const bf_t *T, limb_t radix,
341341
/* fractional format: prec digits after the decimal point rounded with
342342
(flags & BF_RND_MASK) */
343343
#define BF_FTOA_FORMAT_FRAC (1 << 16)
344-
/* free format:
345-
344+
/* free format:
345+
346346
For binary radices with bf_ftoa() and for bfdec_ftoa(): use the minimum
347347
number of digits to represent 'a'. The precision and the rounding
348348
mode are ignored.
349-
349+
350350
For the non binary radices with bf_ftoa(): use as many digits as
351351
necessary so that bf_atof() return the same number when using
352352
precision 'prec', rounding to nearest and the subnormal
@@ -373,7 +373,7 @@ char *bf_ftoa(size_t *plen, const bf_t *a, int radix, limb_t prec,
373373
bf_flags_t flags);
374374

375375
/* modulo 2^n instead of saturation. NaN and infinity return 0 */
376-
#define BF_GET_INT_MOD (1 << 0)
376+
#define BF_GET_INT_MOD (1 << 0)
377377
int bf_get_int32(int *pres, const bf_t *a, int flags);
378378
int bf_get_int64(int64_t *pres, const bf_t *a, int flags);
379379
int bf_get_uint64(uint64_t *pres, const bf_t *a);
@@ -387,10 +387,10 @@ int bf_normalize_and_round(bf_t *r, limb_t prec1, bf_flags_t flags);
387387
int bf_can_round(const bf_t *a, slimb_t prec, bf_rnd_t rnd_mode, slimb_t k);
388388
slimb_t bf_mul_log2_radix(slimb_t a1, unsigned int radix, int is_inv,
389389
int is_ceil1);
390-
int mp_mul(bf_context_t *s, limb_t *result,
391-
const limb_t *op1, limb_t op1_size,
390+
int mp_mul(bf_context_t *s, limb_t *result,
391+
const limb_t *op1, limb_t op1_size,
392392
const limb_t *op2, limb_t op2_size);
393-
limb_t mp_add(limb_t *res, const limb_t *op1, const limb_t *op2,
393+
limb_t mp_add(limb_t *res, const limb_t *op1, const limb_t *op2,
394394
limb_t n, limb_t carry);
395395
limb_t mp_add_ui(limb_t *tab, limb_t b, size_t n);
396396
int mp_sqrtrem(bf_context_t *s, limb_t *tabs, limb_t *taba, limb_t n);

include/quickjs/libunicode-table.h

+1
Original file line numberDiff line numberDiff line change
@@ -4447,3 +4447,4 @@ static const uint16_t unicode_prop_len_table[] = {
44474447
};
44484448

44494449
#endif /* CONFIG_ALL_UNICODE */
4450+

include/quickjs/quickjs.h

+31-4
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,11 @@ extern "C" {
4646
#else
4747
#define js_likely(x) (x)
4848
#define js_unlikely(x) (x)
49+
#ifdef _MSC_VER
50+
#define js_force_inline __forceinline
51+
#else
4952
#define js_force_inline inline
53+
#endif
5054
#define __js_printf_like(a, b)
5155
#endif
5256

@@ -70,6 +74,10 @@ typedef uint32_t JSAtom;
7074
#define JS_NAN_BOXING
7175
#endif
7276

77+
#ifdef _MSC_VER
78+
typedef size_t ssize_t;
79+
#endif
80+
7381
enum {
7482
/* all tags with a reference count are negative */
7583
JS_TAG_FIRST = -11, /* first negative tag */
@@ -215,8 +223,23 @@ typedef struct JSValue {
215223
#define JS_VALUE_GET_FLOAT64(v) ((v).u.float64)
216224
#define JS_VALUE_GET_PTR(v) ((v).u.ptr)
217225

226+
#ifdef _MSC_VER
227+
static inline JSValue JS_MKVAL(int tag, int32_t val) {
228+
JSValue v;
229+
v.u.int32 = val;
230+
v.tag = tag;
231+
return v;
232+
}
233+
static inline JSValue JS_MKPTR(int tag, void *val) {
234+
JSValue v;
235+
v.u.ptr = val;
236+
v.tag = tag;
237+
return v;
238+
}
239+
#else
218240
#define JS_MKVAL(tag, val) (JSValue){ (JSValueUnion){ .int32 = val }, tag }
219241
#define JS_MKPTR(tag, p) (JSValue){ (JSValueUnion){ .ptr = p }, tag }
242+
#endif
220243

221244
#define JS_TAG_IS_FLOAT64(tag) ((unsigned)(tag) == JS_TAG_FLOAT64)
222245

@@ -331,6 +354,8 @@ JSRuntime *JS_NewRuntime(void);
331354
void JS_SetRuntimeInfo(JSRuntime *rt, const char *info);
332355
void JS_SetMemoryLimit(JSRuntime *rt, size_t limit);
333356
void JS_SetGCThreshold(JSRuntime *rt, size_t gc_threshold);
357+
void JS_TurnOffGC(JSRuntime *rt);
358+
void JS_TurnOnGC(JSRuntime *rt);
334359
/* use 0 to disable maximum stack size check */
335360
void JS_SetMaxStackSize(JSRuntime *rt, size_t stack_size);
336361
/* should be called when changing thread to update the stack top value
@@ -433,12 +458,13 @@ static const char js_atom_init[] =
433458
#define DEF(name, str) str "\0"
434459
#include "quickjs/quickjs-atom.h"
435460
#undef DEF
436-
;
461+
;
437462

438463
JSAtom JS_NewAtomLen(JSContext *ctx, const char *str, size_t len);
439464
JSAtom JS_NewAtom(JSContext *ctx, const char *str);
440465
JSAtom JS_NewAtomUInt32(JSContext *ctx, uint32_t n);
441466
JSAtom JS_DupAtom(JSContext *ctx, JSAtom v);
467+
JSAtom JS_DupAtomRT(JSRuntime *runtime, JSAtom v);
442468
void JS_FreeAtom(JSContext *ctx, JSAtom v);
443469
void JS_FreeAtomRT(JSRuntime *rt, JSAtom v);
444470
JSValue JS_AtomToValue(JSContext *ctx, JSAtom atom);
@@ -526,7 +552,7 @@ static js_force_inline JSValue JS_NewCatchOffset(JSContext *ctx, int32_t val)
526552
static js_force_inline JSValue JS_NewInt64(JSContext *ctx, int64_t val) {
527553
JSValue v;
528554
if (val == (int32_t)val) {
529-
v = JS_NewInt32(ctx, val);
555+
v = JS_NewInt32(ctx, (int32_t) val);
530556
} else {
531557
v = __JS_NewFloat64(ctx, val);
532558
}
@@ -662,15 +688,15 @@ static inline JSValue JS_DupValue(JSContext *ctx, JSValueConst v) {
662688
JSRefCountHeader* p = (JSRefCountHeader*)JS_VALUE_GET_PTR(v);
663689
p->ref_count++;
664690
}
665-
return (JSValue)v;
691+
return v;
666692
}
667693

668694
static inline JSValue JS_DupValueRT(JSRuntime *rt, JSValueConst v) {
669695
if (JS_VALUE_HAS_REF_COUNT(v)) {
670696
JSRefCountHeader* p = (JSRefCountHeader*)JS_VALUE_GET_PTR(v);
671697
p->ref_count++;
672698
}
673-
return (JSValue)v;
699+
return v;
674700
}
675701

676702
int JS_ToBool(JSContext *ctx, JSValueConst val); /* return -1 for JS_EXCEPTION */
@@ -729,6 +755,7 @@ int JS_SetPropertyInternalWithIC(JSContext* ctx, JSValueConst this_obj, JSAtom p
729755
static inline int JS_SetProperty(JSContext* ctx, JSValueConst this_obj, JSAtom prop, JSValue val) {
730756
return JS_SetPropertyInternal(ctx, this_obj, prop, val, JS_PROP_THROW, NULL);
731757
}
758+
732759
int JS_SetPropertyUint32(JSContext* ctx, JSValueConst this_obj, uint32_t idx, JSValue val);
733760
int JS_SetPropertyInt64(JSContext* ctx, JSValueConst this_obj, int64_t idx, JSValue val);
734761
int JS_SetPropertyStr(JSContext* ctx, JSValueConst this_obj, const char* prop, JSValue val);

0 commit comments

Comments
 (0)