< prev index next >

src/share/vm/c1/c1_LIRGenerator.hpp

Print this page
rev 12906 : [mq]: gc_interface


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
  26 #define SHARE_VM_C1_C1_LIRGENERATOR_HPP
  27 


  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIR.hpp"
  30 #include "ci/ciMethodData.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/sizes.hpp"
  33 
  34 // The classes responsible for code emission and register allocation
  35 
  36 
  37 class LIRGenerator;
  38 class LIREmitter;
  39 class Invoke;
  40 class SwitchRange;
  41 class LIRItem;
  42 
  43 typedef GrowableArray<LIRItem*> LIRItemList;
  44 
  45 class SwitchRange: public CompilationResourceObj {
  46  private:
  47   int _low_key;


 133 
 134   void emit_move(LIR_Opr src, LIR_Opr dest);
 135   void move_to_temp(LIR_Opr src);
 136   void move_temp_to(LIR_Opr dest);
 137   void move(ResolveNode* src, ResolveNode* dest);
 138 
 139   LIRGenerator* gen() {
 140     return _gen;
 141   }
 142 
 143  public:
 144   PhiResolver(LIRGenerator* _lir_gen, int max_vregs);
 145   ~PhiResolver();
 146 
 147   void move(LIR_Opr src, LIR_Opr dest);
 148 };
 149 
 150 
 151 // only the classes below belong in the same file
 152 class LIRGenerator: public InstructionVisitor, public BlockClosure {




 153  // LIRGenerator should never get instatiated on the heap.
 154  private:
 155   void* operator new(size_t size) throw();
 156   void* operator new[](size_t size) throw();
 157   void operator delete(void* p) { ShouldNotReachHere(); }
 158   void operator delete[](void* p) { ShouldNotReachHere(); }
 159 
 160   Compilation*  _compilation;
 161   ciMethod*     _method;    // method that we are compiling
 162   PhiResolverState  _resolver_state;
 163   BlockBegin*   _block;
 164   int           _virtual_register_number;
 165   Values        _instruction_for_operand;
 166   BitMap2D      _vreg_flags; // flags which can be set on a per-vreg basis
 167   LIR_List*     _lir;
 168   BarrierSet*   _bs;
 169 
 170   LIRGenerator* gen() {
 171     return this;
 172   }
 173 
 174   void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
 175 
 176 #ifdef ASSERT
 177   LIR_List* lir(const char * file, int line) const {
 178     _lir->set_file_and_line(file, line);
 179     return _lir;
 180   }
 181 #endif
 182   LIR_List* lir() const {
 183     return _lir;
 184   }
 185 
 186   // a simple cache of constants used within a block
 187   GrowableArray<LIR_Const*>       _constants;
 188   LIR_OprList                     _reg_for_constants;


 252   void do_ArrayCopy(Intrinsic* x);
 253   void do_CompareAndSwap(Intrinsic* x, ValueType* type);
 254   void do_NIOCheckIndex(Intrinsic* x);
 255   void do_FPIntrinsics(Intrinsic* x);
 256   void do_Reference_get(Intrinsic* x);
 257   void do_update_CRC32(Intrinsic* x);
 258   void do_update_CRC32C(Intrinsic* x);
 259   void do_vectorizedMismatch(Intrinsic* x);
 260 
 261   LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 262   LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 263 
 264   // convenience functions
 265   LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
 266   LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
 267 
 268   // GC Barriers
 269 
 270   // generic interface
 271 
 272   void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
 273   void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);





















 274 
 275   // specific implementations
 276   // pre barriers
 277 
 278   void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
 279                                          bool do_load, bool patch, CodeEmitInfo* info);
 280 
 281   // post barriers
 282 
 283   void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
 284   void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
 285 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
 286   void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
 287 #endif
 288 
 289 
 290   static LIR_Opr result_register_for(ValueType* type, bool callee = false);
 291 
 292   ciObject* get_jobject_constant(Value value);
 293 
 294   LIRItemList* invoke_visit_arguments(Invoke* x);
 295   void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
 296 
 297   void trace_block_entry(BlockBegin* block);
 298 
 299   // volatile field operations are never patchable because a klass
 300   // must be loaded to know it's volatile which means that the offset
 301   // it always known as well.
 302   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 303   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 304 
 305   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 306   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
 307 
 308   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);


 417   static LIR_Condition lir_cond(If::Condition cond) {
 418     LIR_Condition l = lir_cond_unknown;
 419     switch (cond) {
 420     case If::eql: l = lir_cond_equal;        break;
 421     case If::neq: l = lir_cond_notEqual;     break;
 422     case If::lss: l = lir_cond_less;         break;
 423     case If::leq: l = lir_cond_lessEqual;    break;
 424     case If::geq: l = lir_cond_greaterEqual; break;
 425     case If::gtr: l = lir_cond_greater;      break;
 426     case If::aeq: l = lir_cond_aboveEqual;   break;
 427     case If::beq: l = lir_cond_belowEqual;   break;
 428     default: fatal("You must pass valid If::Condition");
 429     };
 430     return l;
 431   }
 432 
 433 #ifdef __SOFTFP__
 434   void do_soft_float_compare(If *x);
 435 #endif // __SOFTFP__
 436 
 437   void init();
 438 
 439   SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
 440   SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
 441   void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
 442 
 443 #ifdef TRACE_HAVE_INTRINSICS
 444   void do_ClassIDIntrinsic(Intrinsic* x);
 445   void do_getBufferWriter(Intrinsic* x);
 446 #endif
 447 
 448   void do_RuntimeCall(address routine, Intrinsic* x);
 449 
 450   ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
 451                         Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
 452                         ciKlass* callee_signature_k);
 453   void profile_arguments(ProfileCall* x);
 454   void profile_parameters(Base* x);
 455   void profile_parameters_at_call(ProfileCall* x);

 456   LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
 457 
 458  public:
 459   Compilation*  compilation() const              { return _compilation; }
 460   FrameMap*     frame_map() const                { return _compilation->frame_map(); }
 461   ciMethod*     method() const                   { return _method; }
 462   BlockBegin*   block() const                    { return _block; }
 463   IRScope*      scope() const                    { return block()->scope(); }
 464 
 465   int max_virtual_register_number() const        { return _virtual_register_number; }
 466 
 467   void block_do(BlockBegin* block);
 468 
 469   // Flags that can be set on vregs
 470   enum VregFlag {
 471       must_start_in_memory = 0  // needs to be assigned a memory location at beginning, but may then be loaded in a register
 472     , callee_saved     = 1    // must be in a callee saved register
 473     , byte_reg         = 2    // must be in a byte register
 474     , num_vreg_flags
 475 
 476   };
 477 
 478   LIRGenerator(Compilation* compilation, ciMethod* method)
 479     : _compilation(compilation)
 480     , _method(method)
 481     , _virtual_register_number(LIR_OprDesc::vreg_base)
 482     , _vreg_flags(num_vreg_flags) {
 483     init();
 484   }
 485 
 486   // for virtual registers, maps them back to Phi's or Local's
 487   Instruction* instruction_for_opr(LIR_Opr opr);
 488   Instruction* instruction_for_vreg(int reg_num);
 489 
 490   void set_vreg_flag   (int vreg_num, VregFlag f);
 491   bool is_vreg_flag_set(int vreg_num, VregFlag f);
 492   void set_vreg_flag   (LIR_Opr opr,  VregFlag f) { set_vreg_flag(opr->vreg_number(), f); }
 493   bool is_vreg_flag_set(LIR_Opr opr,  VregFlag f) { return is_vreg_flag_set(opr->vreg_number(), f); }
 494 
 495   // statics
 496   static LIR_Opr exceptionOopOpr();
 497   static LIR_Opr exceptionPcOpr();
 498   static LIR_Opr divInOpr();
 499   static LIR_Opr divOutOpr();
 500   static LIR_Opr remOutOpr();
 501 #ifdef S390
 502   // On S390 we can do ldiv, lrem without RT call.
 503   static LIR_Opr ldivInOpr();




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
  26 #define SHARE_VM_C1_C1_LIRGENERATOR_HPP
  27 
  28 #include "gc/shared/c1BarrierSetCodeGen.hpp"
  29 #include "c1/c1_Decorators.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIR.hpp"
  32 #include "ci/ciMethodData.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "utilities/sizes.hpp"
  35 
  36 // The classes responsible for code emission and register allocation
  37 
  38 
  39 class LIRGenerator;
  40 class LIREmitter;
  41 class Invoke;
  42 class SwitchRange;
  43 class LIRItem;
  44 
  45 typedef GrowableArray<LIRItem*> LIRItemList;
  46 
  47 class SwitchRange: public CompilationResourceObj {
  48  private:
  49   int _low_key;


 135 
 136   void emit_move(LIR_Opr src, LIR_Opr dest);
 137   void move_to_temp(LIR_Opr src);
 138   void move_temp_to(LIR_Opr dest);
 139   void move(ResolveNode* src, ResolveNode* dest);
 140 
 141   LIRGenerator* gen() {
 142     return _gen;
 143   }
 144 
 145  public:
 146   PhiResolver(LIRGenerator* _lir_gen, int max_vregs);
 147   ~PhiResolver();
 148 
 149   void move(LIR_Opr src, LIR_Opr dest);
 150 };
 151 
 152 
 153 // only the classes below belong in the same file
 154 class LIRGenerator: public InstructionVisitor, public BlockClosure {
 155   friend class C1BarrierSetCodeGen;
 156   friend class C1ModRefBSCodeGen;
 157   friend class C1CardTableModRefBSCodeGen;
 158   friend class C1G1BSCodeGen;
 159  // LIRGenerator should never get instatiated on the heap.
 160  private:
 161   void* operator new(size_t size) throw();
 162   void* operator new[](size_t size) throw();
 163   void operator delete(void* p) { ShouldNotReachHere(); }
 164   void operator delete[](void* p) { ShouldNotReachHere(); }
 165 
 166   Compilation*  _compilation;
 167   ciMethod*     _method;    // method that we are compiling
 168   PhiResolverState  _resolver_state;
 169   BlockBegin*   _block;
 170   int           _virtual_register_number;
 171   Values        _instruction_for_operand;
 172   BitMap2D      _vreg_flags; // flags which can be set on a per-vreg basis
 173   LIR_List*     _lir;

 174 
 175   LIRGenerator* gen() {
 176     return this;
 177   }
 178 
 179   void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
 180 
 181 #ifdef ASSERT
 182   LIR_List* lir(const char * file, int line) const {
 183     _lir->set_file_and_line(file, line);
 184     return _lir;
 185   }
 186 #endif
 187   LIR_List* lir() const {
 188     return _lir;
 189   }
 190 
 191   // a simple cache of constants used within a block
 192   GrowableArray<LIR_Const*>       _constants;
 193   LIR_OprList                     _reg_for_constants;


 257   void do_ArrayCopy(Intrinsic* x);
 258   void do_CompareAndSwap(Intrinsic* x, ValueType* type);
 259   void do_NIOCheckIndex(Intrinsic* x);
 260   void do_FPIntrinsics(Intrinsic* x);
 261   void do_Reference_get(Intrinsic* x);
 262   void do_update_CRC32(Intrinsic* x);
 263   void do_update_CRC32C(Intrinsic* x);
 264   void do_vectorizedMismatch(Intrinsic* x);
 265 
 266   LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 267   LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 268 
 269   // convenience functions
 270   LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
 271   LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
 272 
 273   // GC Barriers
 274 
 275   // generic interface
 276 
 277   void access_store_at(C1DecoratorSet decorators, BasicType type,
 278                        LIRItem& base, LIR_Opr offset, LIR_Opr value,
 279                        CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info);
 280 
 281   LIR_Opr access_load_at(C1DecoratorSet decorators, BasicType type,
 282                          LIRItem& base, LIR_Opr offset,
 283                          CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info);
 284 
 285   LIR_Opr access_cas_at(C1DecoratorSet decorators, BasicType type,
 286                         LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
 287 
 288   LIR_Opr access_swap_at(C1DecoratorSet decorators, BasicType type,
 289                          LIRItem& base, LIRItem& offset, LIRItem& value);
 290 
 291   LIR_Opr access_add_at(C1DecoratorSet decorators, BasicType type,
 292                         LIRItem& base, LIRItem& offset, LIRItem& value);
 293 
 294   // These need to guarantee JMM volatile semantics are preserved on each platform
 295   // and requires one implementation per architecture.
 296   LIR_Opr cas(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
 297   LIR_Opr swap(BasicType type, LIR_Opr addr, LIRItem& new_value);
 298   LIR_Opr add(BasicType type, LIR_Opr addr, LIRItem& new_value);
 299   void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
 300 
 301   // specific implementations













 302 
 303   static LIR_Opr result_register_for(ValueType* type, bool callee = false);
 304 
 305   ciObject* get_jobject_constant(Value value);
 306 
 307   LIRItemList* invoke_visit_arguments(Invoke* x);
 308   void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
 309 
 310   void trace_block_entry(BlockBegin* block);
 311 
 312   // volatile field operations are never patchable because a klass
 313   // must be loaded to know it's volatile which means that the offset
 314   // it always known as well.
 315   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 316   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 317 
 318   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 319   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
 320 
 321   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);


 430   static LIR_Condition lir_cond(If::Condition cond) {
 431     LIR_Condition l = lir_cond_unknown;
 432     switch (cond) {
 433     case If::eql: l = lir_cond_equal;        break;
 434     case If::neq: l = lir_cond_notEqual;     break;
 435     case If::lss: l = lir_cond_less;         break;
 436     case If::leq: l = lir_cond_lessEqual;    break;
 437     case If::geq: l = lir_cond_greaterEqual; break;
 438     case If::gtr: l = lir_cond_greater;      break;
 439     case If::aeq: l = lir_cond_aboveEqual;   break;
 440     case If::beq: l = lir_cond_belowEqual;   break;
 441     default: fatal("You must pass valid If::Condition");
 442     };
 443     return l;
 444   }
 445 
 446 #ifdef __SOFTFP__
 447   void do_soft_float_compare(If *x);
 448 #endif // __SOFTFP__
 449 


 450   SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
 451   SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
 452   void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
 453 
 454 #ifdef TRACE_HAVE_INTRINSICS
 455   void do_ClassIDIntrinsic(Intrinsic* x);
 456   void do_getBufferWriter(Intrinsic* x);
 457 #endif
 458 
 459   void do_RuntimeCall(address routine, Intrinsic* x);
 460 
 461   ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
 462                         Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
 463                         ciKlass* callee_signature_k);
 464   void profile_arguments(ProfileCall* x);
 465   void profile_parameters(Base* x);
 466   void profile_parameters_at_call(ProfileCall* x);
 467   LIR_Opr mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
 468   LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
 469 
 470  public:
 471   Compilation*  compilation() const              { return _compilation; }
 472   FrameMap*     frame_map() const                { return _compilation->frame_map(); }
 473   ciMethod*     method() const                   { return _method; }
 474   BlockBegin*   block() const                    { return _block; }
 475   IRScope*      scope() const                    { return block()->scope(); }
 476 
 477   int max_virtual_register_number() const        { return _virtual_register_number; }
 478 
 479   void block_do(BlockBegin* block);
 480 
 481   // Flags that can be set on vregs
 482   enum VregFlag {
 483       must_start_in_memory = 0  // needs to be assigned a memory location at beginning, but may then be loaded in a register
 484     , callee_saved     = 1    // must be in a callee saved register
 485     , byte_reg         = 2    // must be in a byte register
 486     , num_vreg_flags
 487 
 488   };
 489 
 490   LIRGenerator(Compilation* compilation, ciMethod* method)
 491     : _compilation(compilation)
 492     , _method(method)
 493     , _virtual_register_number(LIR_OprDesc::vreg_base)
 494     , _vreg_flags(num_vreg_flags) {

 495   }
 496 
 497   // for virtual registers, maps them back to Phi's or Local's
 498   Instruction* instruction_for_opr(LIR_Opr opr);
 499   Instruction* instruction_for_vreg(int reg_num);
 500 
 501   void set_vreg_flag   (int vreg_num, VregFlag f);
 502   bool is_vreg_flag_set(int vreg_num, VregFlag f);
 503   void set_vreg_flag   (LIR_Opr opr,  VregFlag f) { set_vreg_flag(opr->vreg_number(), f); }
 504   bool is_vreg_flag_set(LIR_Opr opr,  VregFlag f) { return is_vreg_flag_set(opr->vreg_number(), f); }
 505 
 506   // statics
 507   static LIR_Opr exceptionOopOpr();
 508   static LIR_Opr exceptionPcOpr();
 509   static LIR_Opr divInOpr();
 510   static LIR_Opr divOutOpr();
 511   static LIR_Opr remOutOpr();
 512 #ifdef S390
 513   // On S390 we can do ldiv, lrem without RT call.
 514   static LIR_Opr ldivInOpr();


< prev index next >