< prev index next >

src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp

Print this page




 202   // Addresses are relative to the beginning on the code cache area
 203   prt = &pDCD->rt;
 204   prt->BeginAddress = 0;
 205   prt->EndAddress = (ULONG)(high - low);
 206   prt->UnwindData = ((char *)punwind - low);
 207 
 208   guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
 209             "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
 210 
 211 #endif // AMD64
 212   return true;
 213 }
 214 
 215 void os::initialize_thread(Thread* thr) {
 216 // Nothing to do.
 217 }
 218 
 219 // Atomics and Stub Functions
 220 
 221 typedef jint      xchg_func_t            (jint,     volatile jint*);
 222 typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
 223 typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
 224 typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
 225 typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
 226 typedef jint      add_func_t             (jint,     volatile jint*);
 227 typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
 228 
 229 #ifdef AMD64
 230 
 231 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
 232   // try to use the stub:
 233   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 234 
 235   if (func != NULL) {
 236     os::atomic_xchg_func = func;
 237     return (*func)(exchange_value, dest);
 238   }
 239   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 240 
 241   jint old_value = *dest;
 242   *dest = exchange_value;
 243   return old_value;
 244 }
 245 
 246 intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
 247   // try to use the stub:
 248   xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
 249 
 250   if (func != NULL) {
 251     os::atomic_xchg_ptr_func = func;
 252     return (*func)(exchange_value, dest);
 253   }
 254   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 255 
 256   intptr_t old_value = *dest;
 257   *dest = exchange_value;
 258   return old_value;
 259 }
 260 
 261 
 262 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
 263   // try to use the stub:
 264   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 265 
 266   if (func != NULL) {
 267     os::atomic_cmpxchg_func = func;
 268     return (*func)(exchange_value, dest, compare_value);
 269   }
 270   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 271 


 321   }
 322   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 323 
 324   return (*dest) += add_value;
 325 }
 326 
 327 intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
 328   // try to use the stub:
 329   add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
 330 
 331   if (func != NULL) {
 332     os::atomic_add_ptr_func = func;
 333     return (*func)(add_value, dest);
 334   }
 335   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 336 
 337   return (*dest) += add_value;
 338 }
 339 
 340 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
 341 xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
 342 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 343 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 344 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 345 add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
 346 
 347 #endif // AMD64
 348 
 349 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 350 
 351 #ifdef AMD64
 352 /*
 353  * Windows/x64 does not use stack frames the way expected by Java:
 354  * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
 355  * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
 356  *     not be RBP.
 357  * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
 358  *
 359  * So it's not possible to print the native stack using the
 360  *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
 361  * loop in vmError.cpp. We need to roll our own loop.




 202   // Addresses are relative to the beginning on the code cache area
 203   prt = &pDCD->rt;
 204   prt->BeginAddress = 0;
 205   prt->EndAddress = (ULONG)(high - low);
 206   prt->UnwindData = ((char *)punwind - low);
 207 
 208   guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
 209             "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
 210 
 211 #endif // AMD64
 212   return true;
 213 }
 214 
 215 void os::initialize_thread(Thread* thr) {
 216 // Nothing to do.
 217 }
 218 
 219 // Atomics and Stub Functions
 220 
 221 typedef jint      xchg_func_t            (jint,     volatile jint*);
 222 typedef intptr_t  xchg_long_func_t       (jlong,    volatile jlong*);
 223 typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
 224 typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
 225 typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
 226 typedef jint      add_func_t             (jint,     volatile jint*);
 227 typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
 228 
 229 #ifdef AMD64
 230 
 231 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
 232   // try to use the stub:
 233   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 234 
 235   if (func != NULL) {
 236     os::atomic_xchg_func = func;
 237     return (*func)(exchange_value, dest);
 238   }
 239   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 240 
 241   jint old_value = *dest;
 242   *dest = exchange_value;
 243   return old_value;
 244 }
 245 
 246 intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
 247   // try to use the stub:
 248   xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
 249 
 250   if (func != NULL) {
 251     os::atomic_xchg_long_func = func;
 252     return (*func)(exchange_value, dest);
 253   }
 254   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 255 
 256   intptr_t old_value = *dest;
 257   *dest = exchange_value;
 258   return old_value;
 259 }
 260 
 261 
 262 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
 263   // try to use the stub:
 264   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 265 
 266   if (func != NULL) {
 267     os::atomic_cmpxchg_func = func;
 268     return (*func)(exchange_value, dest, compare_value);
 269   }
 270   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 271 


 321   }
 322   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 323 
 324   return (*dest) += add_value;
 325 }
 326 
 327 intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
 328   // try to use the stub:
 329   add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
 330 
 331   if (func != NULL) {
 332     os::atomic_add_ptr_func = func;
 333     return (*func)(add_value, dest);
 334   }
 335   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 336 
 337   return (*dest) += add_value;
 338 }
 339 
 340 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
 341 xchg_long_func_t*    os::atomic_xchg_long_func    = os::atomic_xchg_long_bootstrap;
 342 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 343 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 344 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 345 add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
 346 
 347 #endif // AMD64
 348 
 349 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 350 
 351 #ifdef AMD64
 352 /*
 353  * Windows/x64 does not use stack frames the way expected by Java:
 354  * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
 355  * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
 356  *     not be RBP.
 357  * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
 358  *
 359  * So it's not possible to print the native stack using the
 360  *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
 361  * loop in vmError.cpp. We need to roll our own loop.


< prev index next >