< prev index next >

src/cpu/x86/vm/relocInfo_x86.cpp

Print this page




  50     // both compressed oops and compressed classes look the same
  51     if (Universe::heap()->is_in_reserved((oop)x)) {
  52     if (verify_only) {
  53       guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
  54     } else {
  55       *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
  56     }
  57   } else {
  58       if (verify_only) {
  59         guarantee(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
  60       } else {
  61         *(int32_t*) disp = Klass::encode_klass((Klass*)x);
  62       }
  63     }
  64   } else {
  65     // Note:  Use runtime_call_type relocations for call32_operand.
  66     address ip = addr();
  67     address disp = Assembler::locate_operand(ip, which);
  68     address next_ip = Assembler::locate_next_instruction(ip);
  69     if (verify_only) {
  70       assert(*(int32_t*) disp == (x - next_ip), "instructions must match");
  71     } else {
  72       *(int32_t*) disp = x - next_ip;
  73     }
  74   }
  75 #else
  76   if (verify_only) {
  77     assert(*pd_address_in_code() == (x + o), "instructions must match");
  78   } else {
  79     *pd_address_in_code() = x + o;
  80   }
  81 #endif // AMD64
  82 }
  83 
  84 
  85 address Relocation::pd_call_destination(address orig_addr) {
  86   intptr_t adj = 0;
  87   if (orig_addr != NULL) {
  88     // We just moved this call instruction from orig_addr to addr().
  89     // This means its target will appear to have grown by addr() - orig_addr.
  90     adj = -( addr() - orig_addr );
  91   }
  92   NativeInstruction* ni = nativeInstruction_at(addr());
  93   if (ni->is_call()) {
  94     return nativeCall_at(addr())->destination() + adj;
  95   } else if (ni->is_jump()) {
  96     return nativeJump_at(addr())->jump_destination() + adj;
  97   } else if (ni->is_cond_jump()) {




  50     // both compressed oops and compressed classes look the same
  51     if (Universe::heap()->is_in_reserved((oop)x)) {
  52     if (verify_only) {
  53       guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
  54     } else {
  55       *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
  56     }
  57   } else {
  58       if (verify_only) {
  59         guarantee(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
  60       } else {
  61         *(int32_t*) disp = Klass::encode_klass((Klass*)x);
  62       }
  63     }
  64   } else {
  65     // Note:  Use runtime_call_type relocations for call32_operand.
  66     address ip = addr();
  67     address disp = Assembler::locate_operand(ip, which);
  68     address next_ip = Assembler::locate_next_instruction(ip);
  69     if (verify_only) {
  70       guarantee(*(int32_t*) disp == (x - next_ip), "instructions must match");
  71     } else {
  72       *(int32_t*) disp = x - next_ip;
  73     }
  74   }
  75 #else
  76   if (verify_only) {
  77     guarantee(*pd_address_in_code() == (x + o), "instructions must match");
  78   } else {
  79     *pd_address_in_code() = x + o;
  80   }
  81 #endif // AMD64
  82 }
  83 
  84 
  85 address Relocation::pd_call_destination(address orig_addr) {
  86   intptr_t adj = 0;
  87   if (orig_addr != NULL) {
  88     // We just moved this call instruction from orig_addr to addr().
  89     // This means its target will appear to have grown by addr() - orig_addr.
  90     adj = -( addr() - orig_addr );
  91   }
  92   NativeInstruction* ni = nativeInstruction_at(addr());
  93   if (ni->is_call()) {
  94     return nativeCall_at(addr())->destination() + adj;
  95   } else if (ni->is_jump()) {
  96     return nativeJump_at(addr())->jump_destination() + adj;
  97   } else if (ni->is_cond_jump()) {


< prev index next >