194 punwind->FrameRegister = 0;
195 punwind->FrameOffset = 0;
196 punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
197 (char*)low;
198 punwind->ExceptionData[0] = 0;
199
200 // This structure describes the covered dynamic code area.
201 // Addresses are relative to the beginning on the code cache area
202 prt = &pDCD->rt;
203 prt->BeginAddress = 0;
204 prt->EndAddress = (ULONG)(high - low);
205 prt->UnwindData = ((char *)punwind - low);
206
207 guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
208 "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
209
210 #endif // AMD64
211 return true;
212 }
213
214 // Atomics and Stub Functions
215
216 typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
217 typedef int64_t xchg_long_func_t (int64_t, volatile int64_t*);
218 typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
219 typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t);
220 typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t);
221 typedef int32_t add_func_t (int32_t, volatile int32_t*);
222 typedef int64_t add_long_func_t (int64_t, volatile int64_t*);
223
224 #ifdef AMD64
225
226 int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
227 // try to use the stub:
228 xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
229
230 if (func != NULL) {
231 os::atomic_xchg_func = func;
232 return (*func)(exchange_value, dest);
233 }
234 assert(Threads::number_of_threads() == 0, "for bootstrap only");
235
236 int32_t old_value = *dest;
237 *dest = exchange_value;
238 return old_value;
239 }
240
241 int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
242 // try to use the stub:
243 xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
244
245 if (func != NULL) {
246 os::atomic_xchg_long_func = func;
247 return (*func)(exchange_value, dest);
248 }
249 assert(Threads::number_of_threads() == 0, "for bootstrap only");
250
251 int64_t old_value = *dest;
252 *dest = exchange_value;
253 return old_value;
254 }
255
256
257 int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
258 // try to use the stub:
259 cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
260
261 if (func != NULL) {
262 os::atomic_cmpxchg_func = func;
263 return (*func)(exchange_value, dest, compare_value);
264 }
265 assert(Threads::number_of_threads() == 0, "for bootstrap only");
266
267 int32_t old_value = *dest;
268 if (old_value == compare_value)
269 *dest = exchange_value;
270 return old_value;
271 }
272
273 int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
274 // try to use the stub:
275 cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
276
277 if (func != NULL) {
278 os::atomic_cmpxchg_byte_func = func;
279 return (*func)(exchange_value, dest, compare_value);
280 }
281 assert(Threads::number_of_threads() == 0, "for bootstrap only");
282
283 int8_t old_value = *dest;
284 if (old_value == compare_value)
285 *dest = exchange_value;
286 return old_value;
287 }
288
289 #endif // AMD64
290
291 int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
292 // try to use the stub:
293 cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
294
295 if (func != NULL) {
296 os::atomic_cmpxchg_long_func = func;
297 return (*func)(exchange_value, dest, compare_value);
298 }
299 assert(Threads::number_of_threads() == 0, "for bootstrap only");
300
301 int64_t old_value = *dest;
302 if (old_value == compare_value)
303 *dest = exchange_value;
304 return old_value;
305 }
306
307 #ifdef AMD64
308
309 int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
310 // try to use the stub:
311 add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
312
313 if (func != NULL) {
314 os::atomic_add_func = func;
315 return (*func)(add_value, dest);
316 }
317 assert(Threads::number_of_threads() == 0, "for bootstrap only");
318
319 return (*dest) += add_value;
320 }
321
322 int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
323 // try to use the stub:
324 add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
325
326 if (func != NULL) {
327 os::atomic_add_long_func = func;
328 return (*func)(add_value, dest);
329 }
330 assert(Threads::number_of_threads() == 0, "for bootstrap only");
331
332 return (*dest) += add_value;
333 }
334
335 xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
336 xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstrap;
337 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
338 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
339 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
340 add_long_func_t* os::atomic_add_long_func = os::atomic_add_long_bootstrap;
341
342 #endif // AMD64
343
344 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
345
346 #ifdef AMD64
347 /*
348 * Windows/x64 does not use stack frames the way expected by Java:
349 * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
350 * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
351 * not be RBP.
352 * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
353 *
354 * So it's not possible to print the native stack using the
355 * while (...) {... fr = os::get_sender_for_C_frame(&fr); }
356 * loop in vmError.cpp. We need to roll our own loop.
357 */
358 bool os::platform_print_native_stack(outputStream* st, const void* context,
359 char *buf, int buf_size)
360 {
361 CONTEXT ctx;
362 if (context != NULL) {
363 memcpy(&ctx, context, sizeof(ctx));
364 } else {
365 RtlCaptureContext(&ctx);
|
194 punwind->FrameRegister = 0;
195 punwind->FrameOffset = 0;
196 punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
197 (char*)low;
198 punwind->ExceptionData[0] = 0;
199
200 // This structure describes the covered dynamic code area.
201 // Addresses are relative to the beginning on the code cache area
202 prt = &pDCD->rt;
203 prt->BeginAddress = 0;
204 prt->EndAddress = (ULONG)(high - low);
205 prt->UnwindData = ((char *)punwind - low);
206
207 guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
208 "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
209
210 #endif // AMD64
211 return true;
212 }
213
214 #ifdef AMD64
215 /*
216 * Windows/x64 does not use stack frames the way expected by Java:
217 * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
218 * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
219 * not be RBP.
220 * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
221 *
222 * So it's not possible to print the native stack using the
223 * while (...) {... fr = os::get_sender_for_C_frame(&fr); }
224 * loop in vmError.cpp. We need to roll our own loop.
225 */
226 bool os::platform_print_native_stack(outputStream* st, const void* context,
227 char *buf, int buf_size)
228 {
229 CONTEXT ctx;
230 if (context != NULL) {
231 memcpy(&ctx, context, sizeof(ctx));
232 } else {
233 RtlCaptureContext(&ctx);
|