< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.hpp

Print this page


   1 /*
   2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 247   // code emission
 248   void do_ArithmeticOp_Long(ArithmeticOp* x);
 249   void do_ArithmeticOp_Int (ArithmeticOp* x);
 250   void do_ArithmeticOp_FPU (ArithmeticOp* x);
 251 
 252   void do_RegisterFinalizer(Intrinsic* x);
 253   void do_isInstance(Intrinsic* x);
 254   void do_isPrimitive(Intrinsic* x);
 255   void do_getClass(Intrinsic* x);
 256   void do_currentThread(Intrinsic* x);
 257   void do_FmaIntrinsic(Intrinsic* x);
 258   void do_MathIntrinsic(Intrinsic* x);
 259   void do_LibmIntrinsic(Intrinsic* x);
 260   void do_ArrayCopy(Intrinsic* x);
 261   void do_CompareAndSwap(Intrinsic* x, ValueType* type);
 262   void do_NIOCheckIndex(Intrinsic* x);
 263   void do_FPIntrinsics(Intrinsic* x);
 264   void do_Reference_get(Intrinsic* x);
 265   void do_update_CRC32(Intrinsic* x);
 266   void do_update_CRC32C(Intrinsic* x);



 267   void do_vectorizedMismatch(Intrinsic* x);






 268 
 269  public:
 270   LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 271   LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 272 
 273   // convenience functions
 274   LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
 275   LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
 276 
 277   // Access API
 278 
 279  private:
 280   BarrierSetC1 *_barrier_set;
 281 
 282  public:
 283   void access_store_at(DecoratorSet decorators, BasicType type,
 284                        LIRItem& base, LIR_Opr offset, LIR_Opr value,
 285                        CodeEmitInfo* patch_info = NULL, CodeEmitInfo* store_emit_info = NULL);
 286 
 287   void access_load_at(DecoratorSet decorators, BasicType type,


 294   LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
 295                                 LIRItem& base, LIRItem& offset, LIRItem& value);
 296 
 297   LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
 298                                LIRItem& base, LIRItem& offset, LIRItem& value);
 299 
 300   // These need to guarantee JMM volatile semantics are preserved on each platform
 301   // and requires one implementation per architecture.
 302   LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
 303   LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
 304   LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
 305 
 306 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
 307   virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
 308 #endif
 309 
 310   // specific implementations
 311   void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
 312 
 313   static LIR_Opr result_register_for(ValueType* type, bool callee = false);



 314 
 315   ciObject* get_jobject_constant(Value value);
 316 
 317   LIRItemList* invoke_visit_arguments(Invoke* x);
 318   void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
 319 
 320   void trace_block_entry(BlockBegin* block);
 321 
 322   // volatile field operations are never patchable because a klass
 323   // must be loaded to know it's volatile which means that the offset
 324   // it always known as well.
 325   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 326   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 327 
 328   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 329   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
 330 
 331   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
 332 
 333   void increment_counter(address counter, BasicType type, int step = 1);


   1 /*
   2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


 248   // code emission
 249   void do_ArithmeticOp_Long(ArithmeticOp* x);
 250   void do_ArithmeticOp_Int (ArithmeticOp* x);
 251   void do_ArithmeticOp_FPU (ArithmeticOp* x);
 252 
 253   void do_RegisterFinalizer(Intrinsic* x);
 254   void do_isInstance(Intrinsic* x);
 255   void do_isPrimitive(Intrinsic* x);
 256   void do_getClass(Intrinsic* x);
 257   void do_currentThread(Intrinsic* x);
 258   void do_FmaIntrinsic(Intrinsic* x);
 259   void do_MathIntrinsic(Intrinsic* x);
 260   void do_LibmIntrinsic(Intrinsic* x);
 261   void do_ArrayCopy(Intrinsic* x);
 262   void do_CompareAndSwap(Intrinsic* x, ValueType* type);
 263   void do_NIOCheckIndex(Intrinsic* x);
 264   void do_FPIntrinsics(Intrinsic* x);
 265   void do_Reference_get(Intrinsic* x);
 266   void do_update_CRC32(Intrinsic* x);
 267   void do_update_CRC32C(Intrinsic* x);
 268 #ifdef AARCH32
 269   void do_update_CRC32_inner(Intrinsic* x, int is_crc32c);
 270 #endif
 271   void do_vectorizedMismatch(Intrinsic* x);
 272 #ifdef AARCH32
 273   void do_aescrypt_block(Intrinsic* x);
 274   void do_aescrypt_cbc(Intrinsic* x);
 275   void do_sha(Intrinsic* x);
 276   void do_montgomery_intrinsic(Intrinsic *x);
 277 #endif
 278 
 279  public:
 280   LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 281   LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
 282 
 283   // convenience functions
 284   LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
 285   LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
 286 
 287   // Access API
 288 
 289  private:
 290   BarrierSetC1 *_barrier_set;
 291 
 292  public:
 293   void access_store_at(DecoratorSet decorators, BasicType type,
 294                        LIRItem& base, LIR_Opr offset, LIR_Opr value,
 295                        CodeEmitInfo* patch_info = NULL, CodeEmitInfo* store_emit_info = NULL);
 296 
 297   void access_load_at(DecoratorSet decorators, BasicType type,


 304   LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
 305                                 LIRItem& base, LIRItem& offset, LIRItem& value);
 306 
 307   LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
 308                                LIRItem& base, LIRItem& offset, LIRItem& value);
 309 
 310   // These need to guarantee JMM volatile semantics are preserved on each platform
 311   // and requires one implementation per architecture.
 312   LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
 313   LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
 314   LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
 315 
 316 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
 317   virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
 318 #endif
 319 
 320   // specific implementations
 321   void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
 322 
 323   static LIR_Opr result_register_for(ValueType* type, bool callee = false);
 324 #ifdef AARCH32
 325   static LIR_Opr java_result_register_for(ValueType* type, bool callee = false);
 326 #endif
 327 
 328   ciObject* get_jobject_constant(Value value);
 329 
 330   LIRItemList* invoke_visit_arguments(Invoke* x);
 331   void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
 332 
 333   void trace_block_entry(BlockBegin* block);
 334 
 335   // volatile field operations are never patchable because a klass
 336   // must be loaded to know it's volatile which means that the offset
 337   // it always known as well.
 338   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 339   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 340 
 341   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 342   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
 343 
 344   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
 345 
 346   void increment_counter(address counter, BasicType type, int step = 1);


< prev index next >