353 if (ret_fp) *ret_fp = (intptr_t *)NULL;
354 }
355
356 return epc;
357 }
358
359 frame os::fetch_frame_from_context(void* ucVoid) {
360 intptr_t* sp;
361 intptr_t* fp;
362 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
363 return frame(sp, fp, epc.pc());
364 }
365
366 // VC++ does not save frame pointer on stack in optimized build. It
367 // can be turned off by /Oy-. If we really want to walk C frames,
368 // we can use the StackWalk() API.
369 frame os::get_sender_for_C_frame(frame* fr) {
370 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
371 }
372
373
374 #ifndef AMD64
375 intptr_t* _get_previous_fp() {
376 intptr_t **frameptr;
377 __asm {
378 mov frameptr, ebp
379 };
380 return *frameptr;
381 }
382 #endif // !AMD64
383
384 frame os::current_frame() {
385
386 #ifdef AMD64
387 // apparently _asm not supported on windows amd64
388 typedef intptr_t* get_fp_func ();
389 get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
390 StubRoutines::x86::get_previous_fp_entry());
391 if (func == NULL) return frame(NULL, NULL, NULL);
392 intptr_t* fp = (*func)();
529 #ifdef AMD64
530 return 0 ;
531 #else
532 // pause == rep:nop
533 // On systems that don't support pause a rep:nop
534 // is executed as a nop. The rep: prefix is ignored.
535 _asm {
536 pause ;
537 };
538 return 1 ;
539 #endif // AMD64
540 }
541
542
543 void os::setup_fpu() {
544 #ifndef AMD64
545 int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
546 __asm fldcw fpu_cntrl_word;
547 #endif // !AMD64
548 }
|
353 if (ret_fp) *ret_fp = (intptr_t *)NULL;
354 }
355
356 return epc;
357 }
358
359 frame os::fetch_frame_from_context(void* ucVoid) {
360 intptr_t* sp;
361 intptr_t* fp;
362 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
363 return frame(sp, fp, epc.pc());
364 }
365
366 // VC++ does not save frame pointer on stack in optimized build. It
367 // can be turned off by /Oy-. If we really want to walk C frames,
368 // we can use the StackWalk() API.
369 frame os::get_sender_for_C_frame(frame* fr) {
370 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
371 }
372
373 #ifndef AMD64
374 // Returns an estimate of the current stack pointer. Result must be guaranteed
375 // to point into the calling threads stack, and be no lower than the current
376 // stack pointer.
377 address os::current_stack_pointer() {
378 int dummy;
379 address sp = (address)&dummy;
380 return sp;
381 }
382 #else
383 // Returns the current stack pointer. Accurate value needed for
384 // os::verify_stack_alignment().
385 address os::current_stack_pointer() {
386 typedef address get_sp_func();
387 get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
388 StubRoutines::x86::get_previous_sp_entry());
389 return (*func)();
390 }
391 #endif
392
393
394 #ifndef AMD64
395 intptr_t* _get_previous_fp() {
396 intptr_t **frameptr;
397 __asm {
398 mov frameptr, ebp
399 };
400 return *frameptr;
401 }
402 #endif // !AMD64
403
404 frame os::current_frame() {
405
406 #ifdef AMD64
407 // apparently _asm not supported on windows amd64
408 typedef intptr_t* get_fp_func ();
409 get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
410 StubRoutines::x86::get_previous_fp_entry());
411 if (func == NULL) return frame(NULL, NULL, NULL);
412 intptr_t* fp = (*func)();
549 #ifdef AMD64
550 return 0 ;
551 #else
552 // pause == rep:nop
553 // On systems that don't support pause a rep:nop
554 // is executed as a nop. The rep: prefix is ignored.
555 _asm {
556 pause ;
557 };
558 return 1 ;
559 #endif // AMD64
560 }
561
562
563 void os::setup_fpu() {
564 #ifndef AMD64
565 int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
566 __asm fldcw fpu_cntrl_word;
567 #endif // !AMD64
568 }
569
570 #ifndef PRODUCT
571 void os::verify_stack_alignment() {
572 #ifdef AMD64
573 assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
574 #endif
575 }
576 #endif
|