1 /*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
4 * Copyright (c) 2008-2022 Ivan Maidanski
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15 16 #include "private/gc_pmark.h" /*< includes `gc_priv.h` file */
17 18 #ifdef GC_GCJ_SUPPORT
19 20 /*
21 * This is an allocator interface tuned for `gcj` (the GNU static Java
22 * compiler).
23 *
24 * Each allocated object has a pointer in its beginning to a "vtable",
25 * which for our purposes is simply a structure describing the type of
26 * the object. This descriptor structure contains a GC marking
27 * descriptor at offset `GC_GCJ_MARK_DESCR_OFFSET`.
28 *
29 * It is hoped that this interface may also be useful for other systems,
30 * possibly with some tuning of the constants. But the immediate goal
31 * is to get better `gcj` performance.
32 *
33 * We assume: counting on explicit initialization of this interface is OK.
34 */
35 36 # include "gc/gc_gcj.h"
37 38 /* Object kind for objects with descriptors in "vtable". */
39 int GC_gcj_kind = 0;
40 41 /* The kind of objects that are always marked with a mark procedure call. */
42 int GC_gcj_debug_kind = 0;
43 44 STATIC struct GC_ms_entry *GC_CALLBACK
45 GC_gcj_fake_mark_proc(word *addr, struct GC_ms_entry *mark_stack_top,
46 struct GC_ms_entry *mark_stack_limit, word env)
47 {
48 UNUSED_ARG(addr);
49 UNUSED_ARG(mark_stack_limit);
50 UNUSED_ARG(env);
51 # if defined(FUNCPTR_IS_DATAPTR) && defined(CPPCHECK)
52 GC_noop1((word)(GC_funcptr_uint)(&GC_init_gcj_malloc));
53 # endif
54 ABORT_RET("No client gcj mark proc is specified");
55 return mark_stack_top;
56 }
57 58 # ifdef FUNCPTR_IS_DATAPTR
59 GC_API void GC_CALL
60 GC_init_gcj_malloc(int mp_index, void *mp)
61 {
62 GC_init_gcj_malloc_mp((unsigned)mp_index,
63 CAST_THRU_UINTPTR(GC_mark_proc, mp),
64 GC_GCJ_MARK_DESCR_OFFSET);
65 }
66 # endif /* FUNCPTR_IS_DATAPTR */
67 68 GC_API void GC_CALL
69 GC_init_gcj_malloc_mp(unsigned mp_index, GC_mark_proc mp, size_t descr_offset)
70 {
71 # ifndef GC_IGNORE_GCJ_INFO
72 GC_bool ignore_gcj_info;
73 # endif
74 75 GC_STATIC_ASSERT(GC_GCJ_MARK_DESCR_OFFSET >= sizeof(ptr_t));
76 if (0 == mp) {
77 /* In case `GC_DS_PROC` is unused. */
78 mp = GC_gcj_fake_mark_proc;
79 }
80 81 /* Initialize the collector just in case it is not done yet. */
82 GC_init();
83 if (descr_offset != GC_GCJ_MARK_DESCR_OFFSET)
84 ABORT("GC_init_gcj_malloc_mp: bad offset");
85 86 LOCK();
87 if (GC_gcjobjfreelist != NULL) {
88 /* Already initialized. */
89 UNLOCK();
90 return;
91 }
92 # ifdef GC_IGNORE_GCJ_INFO
93 /* This is useful for debugging on platforms with missing `getenv()`. */
94 # define ignore_gcj_info TRUE
95 # else
96 ignore_gcj_info = GETENV("GC_IGNORE_GCJ_INFO") != NULL;
97 # endif
98 if (ignore_gcj_info) {
99 GC_COND_LOG_PRINTF("Gcj-style type information is disabled!\n");
100 }
101 GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /*< unused */
102 GC_mark_procs[mp_index] = mp;
103 if (mp_index >= GC_n_mark_procs)
104 ABORT("GC_init_gcj_malloc_mp: bad index");
105 /* Set up object kind `gcj`-style indirect descriptor. */
106 GC_gcjobjfreelist = (ptr_t *)GC_new_free_list_inner();
107 if (ignore_gcj_info) {
108 /*
109 * Use a simple length-based descriptor, thus forcing a fully
110 * conservative scan.
111 */
112 GC_gcj_kind = (int)GC_new_kind_inner((void **)GC_gcjobjfreelist,
113 /* 0 | */ GC_DS_LENGTH, TRUE, TRUE);
114 GC_gcj_debug_kind = GC_gcj_kind;
115 } else {
116 GC_gcj_kind = (int)GC_new_kind_inner(
117 (void **)GC_gcjobjfreelist,
118 (((word)(-(GC_signed_word)GC_GCJ_MARK_DESCR_OFFSET
119 - GC_INDIR_PER_OBJ_BIAS))
120 | GC_DS_PER_OBJECT),
121 FALSE, TRUE);
122 /* Set up object kind for objects that require mark procedure call. */
123 GC_gcj_debug_kind = (int)GC_new_kind_inner(
124 GC_new_free_list_inner(),
125 GC_MAKE_PROC(mp_index, 1 /* allocated with debug info */), FALSE,
126 TRUE);
127 }
128 UNLOCK();
129 # undef ignore_gcj_info
130 }
131 132 # ifdef THREAD_LOCAL_ALLOC
133 GC_INNER
134 # else
135 STATIC
136 # endif
137 void *
138 GC_core_gcj_malloc(size_t lb, const void *vtable_ptr, unsigned flags)
139 {
140 ptr_t op;
141 size_t lg;
142 143 GC_DBG_COLLECT_AT_MALLOC(lb);
144 LOCK();
145 if (SMALL_OBJ(lb)
146 && (op = GC_gcjobjfreelist[lg = GC_size_map[lb]], LIKELY(op != NULL))) {
147 GC_gcjobjfreelist[lg] = (ptr_t)obj_link(op);
148 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
149 GC_ASSERT(NULL == ((void **)op)[1]);
150 } else {
151 /*
152 * A mechanism to release the allocator lock and invoke finalizers.
153 * We do not really have an opportunity to do this on a rarely
154 * executed path on which the allocator lock is not held. Thus we
155 * check at a rarely executed point at which it is safe to release
156 * the allocator lock; we do this even where we could just call
157 * `GC_notify_or_invoke_finalizers()`, since it is probably cheaper
158 * and certainly more uniform.
159 */
160 /* TODO: Consider doing the same elsewhere? */
161 if (GC_gc_no != GC_last_finalized_no) {
162 UNLOCK();
163 GC_notify_or_invoke_finalizers();
164 LOCK();
165 GC_last_finalized_no = GC_gc_no;
166 }
167 168 op = (ptr_t)GC_generic_malloc_inner(lb, GC_gcj_kind, flags);
169 if (NULL == op) {
170 GC_oom_func oom_fn = GC_oom_fn;
171 UNLOCK();
172 return (*oom_fn)(lb);
173 }
174 }
175 *(const void **)op = vtable_ptr;
176 UNLOCK();
177 GC_dirty(op);
178 REACHABLE_AFTER_DIRTY(vtable_ptr);
179 return GC_clear_stack(op);
180 }
181 182 # ifndef THREAD_LOCAL_ALLOC
183 GC_API GC_ATTR_MALLOC void *GC_CALL
184 GC_gcj_malloc(size_t lb, const void *vtable_ptr)
185 {
186 return GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */);
187 }
188 # endif /* !THREAD_LOCAL_ALLOC */
189 190 GC_API GC_ATTR_MALLOC void *GC_CALL
191 GC_gcj_malloc_ignore_off_page(size_t lb, const void *vtable_ptr)
192 {
193 return GC_core_gcj_malloc(lb, vtable_ptr, IGNORE_OFF_PAGE);
194 }
195 196 #endif /* GC_GCJ_SUPPORT */
197