< prev index next >

src/share/vm/c1/c1_LIRGenerator.hpp

Print this page




 296 
 297   void trace_block_entry(BlockBegin* block);
 298 
 299   // volatile field operations are never patchable because a klass
 300   // must be loaded to know it's volatile which means that the offset
 301   // it always known as well.
 302   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 303   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 304 
 305   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 306   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
 307 
 308   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
 309 
 310   void increment_counter(address counter, BasicType type, int step = 1);
 311   void increment_counter(LIR_Address* addr, int step = 1);
 312 
 313   // is_strictfp is only needed for mul and div (and only generates different code on i486)
 314   void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
 315   // machine dependent.  returns true if it emitted code for the multiply
 316   bool strength_reduce_multiply(LIR_Opr left, int constant, LIR_Opr result, LIR_Opr tmp);
 317 
 318   void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes);
 319 
 320   void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false);
 321 
 322   // this loads the length and compares against the index
 323   void array_range_check          (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info);
 324   // For java.nio.Buffer.checkIndex
 325   void nio_range_check            (LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info);
 326 
 327   void arithmetic_op_int  (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp);
 328   void arithmetic_op_long (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL);
 329   void arithmetic_op_fpu  (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp = LIR_OprFact::illegalOpr);
 330 
 331   void shift_op   (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr value, LIR_Opr count, LIR_Opr tmp);
 332 
 333   void logic_op   (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr left, LIR_Opr right);
 334 
 335   void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info);
 336   void monitor_exit  (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no);




 296 
 297   void trace_block_entry(BlockBegin* block);
 298 
 299   // volatile field operations are never patchable because a klass
 300   // must be loaded to know it's volatile which means that the offset
 301   // it always known as well.
 302   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 303   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 304 
 305   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 306   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
 307 
 308   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
 309 
 310   void increment_counter(address counter, BasicType type, int step = 1);
 311   void increment_counter(LIR_Address* addr, int step = 1);
 312 
 313   // is_strictfp is only needed for mul and div (and only generates different code on i486)
 314   void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
 315   // machine dependent.  returns true if it emitted code for the multiply
 316   bool strength_reduce_multiply(LIR_Opr left, jint constant, LIR_Opr result, LIR_Opr tmp);
 317 
 318   void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes);
 319 
 320   void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false);
 321 
 322   // this loads the length and compares against the index
 323   void array_range_check          (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info);
 324   // For java.nio.Buffer.checkIndex
 325   void nio_range_check            (LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info);
 326 
 327   void arithmetic_op_int  (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp);
 328   void arithmetic_op_long (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL);
 329   void arithmetic_op_fpu  (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp = LIR_OprFact::illegalOpr);
 330 
 331   void shift_op   (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr value, LIR_Opr count, LIR_Opr tmp);
 332 
 333   void logic_op   (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr left, LIR_Opr right);
 334 
 335   void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info);
 336   void monitor_exit  (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no);


< prev index next >