< prev index next >

src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp

Print this page
rev 10997 : 8154957: AArch64: Better byte behavior
Summary:  The fix for 8132051 is needed for AArch64.
Reviewed-by: roland


 314   bool needs_range_check = x->compute_needs_range_check();
 315   bool use_length = x->length() != NULL;
 316   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 317   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 318                                          !get_jobject_constant(x->value())->is_null_object() ||
 319                                          x->should_profile());
 320 
 321   LIRItem array(x->array(), this);
 322   LIRItem index(x->index(), this);
 323   LIRItem value(x->value(), this);
 324   LIRItem length(this);
 325 
 326   array.load_item();
 327   index.load_nonconstant();
 328 
 329   if (use_length && needs_range_check) {
 330     length.set_instruction(x->length());
 331     length.load_item();
 332 
 333   }
 334   if (needs_store_check) {
 335     value.load_item();
 336   } else {
 337     value.load_for_store(x->elt_type());
 338   }
 339 
 340   set_no_result(x);
 341 
 342   // the CodeEmitInfo must be duplicated for each different
 343   // LIR-instruction because spilling can occur anywhere between two
 344   // instructions and so the debug information must be different
 345   CodeEmitInfo* range_check_info = state_for(x);
 346   CodeEmitInfo* null_check_info = NULL;
 347   if (x->needs_null_check()) {
 348     null_check_info = new CodeEmitInfo(range_check_info);
 349   }
 350 
 351   // emit array address setup early so it schedules better
 352   // FIXME?  No harm in this on aarch64, and it might help
 353   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 354 


 363     }
 364   }
 365 
 366   if (GenerateArrayStoreCheck && needs_store_check) {
 367     LIR_Opr tmp1 = new_register(objectType);
 368     LIR_Opr tmp2 = new_register(objectType);
 369     LIR_Opr tmp3 = new_register(objectType);
 370 
 371     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 372     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 373   }
 374 
 375   if (obj_store) {
 376     // Needs GC write barriers.
 377     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 378                 true /* do_load */, false /* patch */, NULL);
 379     __ move(value.result(), array_addr, null_check_info);
 380     // Seems to be a precise
 381     post_barrier(LIR_OprFact::address(array_addr), value.result());
 382   } else {
 383     __ move(value.result(), array_addr, null_check_info);

 384   }
 385 }
 386 
 387 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 388   assert(x->is_pinned(),"");
 389   LIRItem obj(x->obj(), this);
 390   obj.load_item();
 391 
 392   set_no_result(x);
 393 
 394   // "lock" stores the address of the monitor stack slot, so this is not an oop
 395   LIR_Opr lock = new_register(T_INT);
 396   // Need a scratch register for biased locking
 397   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 398   if (UseBiasedLocking) {
 399     scratch = new_register(T_INT);
 400   }
 401 
 402   CodeEmitInfo* info_for_exception = NULL;
 403   if (x->needs_null_check()) {




 314   bool needs_range_check = x->compute_needs_range_check();
 315   bool use_length = x->length() != NULL;
 316   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 317   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 318                                          !get_jobject_constant(x->value())->is_null_object() ||
 319                                          x->should_profile());
 320 
 321   LIRItem array(x->array(), this);
 322   LIRItem index(x->index(), this);
 323   LIRItem value(x->value(), this);
 324   LIRItem length(this);
 325 
 326   array.load_item();
 327   index.load_nonconstant();
 328 
 329   if (use_length && needs_range_check) {
 330     length.set_instruction(x->length());
 331     length.load_item();
 332 
 333   }
 334   if (needs_store_check || x->check_boolean()) {
 335     value.load_item();
 336   } else {
 337     value.load_for_store(x->elt_type());
 338   }
 339 
 340   set_no_result(x);
 341 
 342   // the CodeEmitInfo must be duplicated for each different
 343   // LIR-instruction because spilling can occur anywhere between two
 344   // instructions and so the debug information must be different
 345   CodeEmitInfo* range_check_info = state_for(x);
 346   CodeEmitInfo* null_check_info = NULL;
 347   if (x->needs_null_check()) {
 348     null_check_info = new CodeEmitInfo(range_check_info);
 349   }
 350 
 351   // emit array address setup early so it schedules better
 352   // FIXME?  No harm in this on aarch64, and it might help
 353   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 354 


 363     }
 364   }
 365 
 366   if (GenerateArrayStoreCheck && needs_store_check) {
 367     LIR_Opr tmp1 = new_register(objectType);
 368     LIR_Opr tmp2 = new_register(objectType);
 369     LIR_Opr tmp3 = new_register(objectType);
 370 
 371     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 372     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 373   }
 374 
 375   if (obj_store) {
 376     // Needs GC write barriers.
 377     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 378                 true /* do_load */, false /* patch */, NULL);
 379     __ move(value.result(), array_addr, null_check_info);
 380     // Seems to be a precise
 381     post_barrier(LIR_OprFact::address(array_addr), value.result());
 382   } else {
 383     LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
 384     __ move(result, array_addr, null_check_info);
 385   }
 386 }
 387 
 388 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 389   assert(x->is_pinned(),"");
 390   LIRItem obj(x->obj(), this);
 391   obj.load_item();
 392 
 393   set_no_result(x);
 394 
 395   // "lock" stores the address of the monitor stack slot, so this is not an oop
 396   LIR_Opr lock = new_register(T_INT);
 397   // Need a scratch register for biased locking
 398   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 399   if (UseBiasedLocking) {
 400     scratch = new_register(T_INT);
 401   }
 402 
 403   CodeEmitInfo* info_for_exception = NULL;
 404   if (x->needs_null_check()) {


< prev index next >