src/cpu/x86/vm/macroAssembler_x86.cpp

Print this page




1046   Label loop;
1047   bind(loop);
1048   movl(Address(tmp, (-os::vm_page_size())), size );
1049   subptr(tmp, os::vm_page_size());
1050   subl(size, os::vm_page_size());
1051   jcc(Assembler::greater, loop);
1052 
1053   // Bang down shadow pages too.
1054   // At this point, (tmp-0) is the last address touched, so don't
1055   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
1056   // was post-decremented.)  Skip this address by starting at i=1, and
1057   // touch a few more pages below.  N.B.  It is important to touch all
1058   // the way down to and including i=StackShadowPages.
1059   for (int i = 1; i < StackShadowPages; i++) {
1060     // this could be any sized move but this is can be a debugging crumb
1061     // so the bigger the better.
1062     movptr(Address(tmp, (-i*os::vm_page_size())), size );
1063   }
1064 }
1065 
















1066 int MacroAssembler::biased_locking_enter(Register lock_reg,
1067                                          Register obj_reg,
1068                                          Register swap_reg,
1069                                          Register tmp_reg,
1070                                          bool swap_reg_contains_mark,
1071                                          Label& done,
1072                                          Label* slow_case,
1073                                          BiasedLockingCounters* counters) {
1074   assert(UseBiasedLocking, "why call this otherwise?");
1075   assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1076   assert(tmp_reg != noreg, "tmp_reg must be supplied");
1077   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1078   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1079   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
1080   Address saved_mark_addr(lock_reg, 0);
1081 
1082   if (PrintBiasedLockingStatistics && counters == NULL) {
1083     counters = BiasedLocking::counters();
1084   }
1085   // Biased locking




1046   Label loop;
1047   bind(loop);
1048   movl(Address(tmp, (-os::vm_page_size())), size );
1049   subptr(tmp, os::vm_page_size());
1050   subl(size, os::vm_page_size());
1051   jcc(Assembler::greater, loop);
1052 
1053   // Bang down shadow pages too.
1054   // At this point, (tmp-0) is the last address touched, so don't
1055   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
1056   // was post-decremented.)  Skip this address by starting at i=1, and
1057   // touch a few more pages below.  N.B.  It is important to touch all
1058   // the way down to and including i=StackShadowPages.
1059   for (int i = 1; i < StackShadowPages; i++) {
1060     // this could be any sized move but this is can be a debugging crumb
1061     // so the bigger the better.
1062     movptr(Address(tmp, (-i*os::vm_page_size())), size );
1063   }
1064 }
1065 
1066 void MacroAssembler::reserved_stack_check() {
1067     // testing if reserved zone needs to be enabled
1068     Label no_reserved_zone_enabling;
1069     Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
1070     NOT_LP64(get_thread(rsi);)
1071 
1072     cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset()));
1073     jcc(Assembler::below, no_reserved_zone_enabling);
1074 
1075     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread);
1076     jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
1077     should_not_reach_here();
1078 
1079     bind(no_reserved_zone_enabling);
1080 }
1081 
1082 int MacroAssembler::biased_locking_enter(Register lock_reg,
1083                                          Register obj_reg,
1084                                          Register swap_reg,
1085                                          Register tmp_reg,
1086                                          bool swap_reg_contains_mark,
1087                                          Label& done,
1088                                          Label* slow_case,
1089                                          BiasedLockingCounters* counters) {
1090   assert(UseBiasedLocking, "why call this otherwise?");
1091   assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1092   assert(tmp_reg != noreg, "tmp_reg must be supplied");
1093   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1094   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1095   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
1096   Address saved_mark_addr(lock_reg, 0);
1097 
1098   if (PrintBiasedLockingStatistics && counters == NULL) {
1099     counters = BiasedLocking::counters();
1100   }
1101   // Biased locking