203 prt->BeginAddress = 0;
204 prt->EndAddress = (ULONG)(high - low);
205 prt->UnwindData = ((char *)punwind - low);
206
207 guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
208 "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
209
210 #endif // AMD64
211 return true;
212 }
213
214 void os::initialize_thread(Thread* thr) {
215 // Nothing to do.
216 }
217
218 // Atomics and Stub Functions
219
220 typedef jint xchg_func_t (jint, volatile jint*);
221 typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*);
222 typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
223 typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
224 typedef jint add_func_t (jint, volatile jint*);
225 typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
226
227 #ifdef AMD64
228
229 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
230 // try to use the stub:
231 xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
232
233 if (func != NULL) {
234 os::atomic_xchg_func = func;
235 return (*func)(exchange_value, dest);
236 }
237 assert(Threads::number_of_threads() == 0, "for bootstrap only");
238
239 jint old_value = *dest;
240 *dest = exchange_value;
241 return old_value;
242 }
255 *dest = exchange_value;
256 return old_value;
257 }
258
259
260 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
261 // try to use the stub:
262 cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
263
264 if (func != NULL) {
265 os::atomic_cmpxchg_func = func;
266 return (*func)(exchange_value, dest, compare_value);
267 }
268 assert(Threads::number_of_threads() == 0, "for bootstrap only");
269
270 jint old_value = *dest;
271 if (old_value == compare_value)
272 *dest = exchange_value;
273 return old_value;
274 }
275 #endif // AMD64
276
277 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
278 // try to use the stub:
279 cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
280
281 if (func != NULL) {
282 os::atomic_cmpxchg_long_func = func;
283 return (*func)(exchange_value, dest, compare_value);
284 }
285 assert(Threads::number_of_threads() == 0, "for bootstrap only");
286
287 jlong old_value = *dest;
288 if (old_value == compare_value)
289 *dest = exchange_value;
290 return old_value;
291 }
292
293 #ifdef AMD64
294
304
305 return (*dest) += add_value;
306 }
307
308 intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
309 // try to use the stub:
310 add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
311
312 if (func != NULL) {
313 os::atomic_add_ptr_func = func;
314 return (*func)(add_value, dest);
315 }
316 assert(Threads::number_of_threads() == 0, "for bootstrap only");
317
318 return (*dest) += add_value;
319 }
320
321 xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
322 xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
323 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
324 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
325 add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
326
327 #endif // AMD64
328
329 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
330
331 #ifdef AMD64
332 /*
333 * Windows/x64 does not use stack frames the way expected by Java:
334 * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
335 * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
336 * not be RBP.
337 * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
338 *
339 * So it's not possible to print the native stack using the
340 * while (...) {... fr = os::get_sender_for_C_frame(&fr); }
341 * loop in vmError.cpp. We need to roll our own loop.
342 */
343 bool os::platform_print_native_stack(outputStream* st, void* context,
|
203 prt->BeginAddress = 0;
204 prt->EndAddress = (ULONG)(high - low);
205 prt->UnwindData = ((char *)punwind - low);
206
207 guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
208 "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
209
210 #endif // AMD64
211 return true;
212 }
213
214 void os::initialize_thread(Thread* thr) {
215 // Nothing to do.
216 }
217
218 // Atomics and Stub Functions
219
220 typedef jint xchg_func_t (jint, volatile jint*);
221 typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*);
222 typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
223 typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte);
224 typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
225 typedef jint add_func_t (jint, volatile jint*);
226 typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
227
228 #ifdef AMD64
229
230 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
231 // try to use the stub:
232 xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
233
234 if (func != NULL) {
235 os::atomic_xchg_func = func;
236 return (*func)(exchange_value, dest);
237 }
238 assert(Threads::number_of_threads() == 0, "for bootstrap only");
239
240 jint old_value = *dest;
241 *dest = exchange_value;
242 return old_value;
243 }
256 *dest = exchange_value;
257 return old_value;
258 }
259
260
261 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
262 // try to use the stub:
263 cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
264
265 if (func != NULL) {
266 os::atomic_cmpxchg_func = func;
267 return (*func)(exchange_value, dest, compare_value);
268 }
269 assert(Threads::number_of_threads() == 0, "for bootstrap only");
270
271 jint old_value = *dest;
272 if (old_value == compare_value)
273 *dest = exchange_value;
274 return old_value;
275 }
276
277 jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
278 // try to use the stub:
279 cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
280
281 if (func != NULL) {
282 os::atomic_cmpxchg_byte_func = func;
283 return (*func)(exchange_value, dest, compare_value);
284 }
285 assert(Threads::number_of_threads() == 0, "for bootstrap only");
286
287 jbyte old_value = *dest;
288 if (old_value == compare_value)
289 *dest = exchange_value;
290 return old_value;
291 }
292
293 #endif // AMD64
294
295 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
296 // try to use the stub:
297 cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
298
299 if (func != NULL) {
300 os::atomic_cmpxchg_long_func = func;
301 return (*func)(exchange_value, dest, compare_value);
302 }
303 assert(Threads::number_of_threads() == 0, "for bootstrap only");
304
305 jlong old_value = *dest;
306 if (old_value == compare_value)
307 *dest = exchange_value;
308 return old_value;
309 }
310
311 #ifdef AMD64
312
322
323 return (*dest) += add_value;
324 }
325
326 intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
327 // try to use the stub:
328 add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
329
330 if (func != NULL) {
331 os::atomic_add_ptr_func = func;
332 return (*func)(add_value, dest);
333 }
334 assert(Threads::number_of_threads() == 0, "for bootstrap only");
335
336 return (*dest) += add_value;
337 }
338
339 xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
340 xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
341 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
342 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
343 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
344 add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
345
346 #endif // AMD64
347
348 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
349
350 #ifdef AMD64
351 /*
352 * Windows/x64 does not use stack frames the way expected by Java:
353 * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
354 * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
355 * not be RBP.
356 * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
357 *
358 * So it's not possible to print the native stack using the
359 * while (...) {... fr = os::get_sender_for_C_frame(&fr); }
360 * loop in vmError.cpp. We need to roll our own loop.
361 */
362 bool os::platform_print_native_stack(outputStream* st, void* context,
|