323 bool inline_hasNegatives();
324 bool inline_squareToLen();
325 bool inline_mulAdd();
326 bool inline_montgomeryMultiply();
327 bool inline_montgomerySquare();
328 bool inline_vectorizedMismatch();
329 bool inline_fma(vmIntrinsics::ID id);
330
331 bool inline_profileBoolean();
332 bool inline_isCompileConstant();
333
334 // Vector API support
335 bool inline_vector_nary_operation(int n);
336 bool inline_vector_broadcast_coerced();
337 bool inline_vector_mem_operation(bool is_store);
338 bool inline_vector_reduction();
339 bool inline_vector_test();
340 bool inline_vector_blend();
341 bool inline_vector_compare();
342 bool inline_vector_broadcast_int();
343 bool inline_vector_reinterpret();
344 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType bt, int num_elem);
345 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType bt, int num_elem);
346 Node* shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
347 void set_vector_result(Node* result, bool set_res = true);
348
349 void clear_upper_avx() {
350 #ifdef X86
351 if (UseAVX >= 2) {
352 C->set_clear_upper_avx(true);
353 }
354 #endif
355 }
356 };
357
358 //---------------------------make_vm_intrinsic----------------------------
359 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
360 vmIntrinsics::ID id = m->intrinsic_id();
361 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
362
363 if (!m->is_loaded()) {
893 case vmIntrinsics::_VectorTernaryOp:
894 return inline_vector_nary_operation(3);
895
896 case vmIntrinsics::_VectorBroadcastCoerced:
897 return inline_vector_broadcast_coerced();
898 case vmIntrinsics::_VectorLoadOp:
899 return inline_vector_mem_operation(/*is_store=*/false);
900 case vmIntrinsics::_VectorStoreOp:
901 return inline_vector_mem_operation(/*is_store=*/true);
902 case vmIntrinsics::_VectorReductionCoerced:
903 return inline_vector_reduction();
904 case vmIntrinsics::_VectorTest:
905 return inline_vector_test();
906 case vmIntrinsics::_VectorBlend:
907 return inline_vector_blend();
908 case vmIntrinsics::_VectorCompare:
909 return inline_vector_compare();
910 case vmIntrinsics::_VectorBroadcastInt:
911 return inline_vector_broadcast_int();
912 case vmIntrinsics::_VectorReinterpret:
913 return inline_vector_reinterpret();
914
915 default:
916 // If you get here, it may be that someone has added a new intrinsic
917 // to the list in vmSymbols.hpp without implementing it here.
918 #ifndef PRODUCT
919 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
920 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
921 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
922 }
923 #endif
924 return false;
925 }
926 }
927
928 Node* LibraryCallKit::try_to_predicate(int predicate) {
929 if (!jvms()->has_method()) {
930 // Root JVMState has a null method.
931 assert(map()->memory()->Opcode() == Op_Parm, "");
932 // Insert the memory aliasing node
933 set_all_memory(reset_memory());
7699 int sopc = VectorNode::opcode(opc, elem_bt); // get_node_id(opr->get_con(), elem_bt);
7700 ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
7701 const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
7702
7703 if (!arch_supports_vector(sopc, num_elem, elem_bt, VecMaskNotUsed)) {
7704 return false; // not supported
7705 }
7706 Node* opd1 = unbox_vector(argument(4), vbox_type, elem_bt, num_elem);
7707 Node* opd2 = shift_count(argument(5), opc, elem_bt, num_elem);
7708 if (opd1 == NULL || opd2 == NULL) {
7709 return false;
7710 }
7711 Node* operation = _gvn.transform(VectorNode::make(sopc, opd1, opd2, num_elem, elem_bt));
7712
7713 Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
7714 set_vector_result(vbox);
7715 C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
7716 return true;
7717 }
7718
7719 bool LibraryCallKit::inline_vector_reinterpret() {
7720 const TypeInstPtr* vector_klass_from = gvn().type(argument(0))->is_instptr();
7721 const TypeInstPtr* elem_klass_from = gvn().type(argument(1))->is_instptr();
7722 const TypeInt* vlen_from = gvn().type(argument(2))->is_int();
7723
7724 const TypeInstPtr* elem_klass_to = gvn().type(argument(3))->is_instptr();
7725 const TypeInt* vlen_to = gvn().type(argument(4))->is_int();
7726
7727 if (vector_klass_from->const_oop() == NULL || elem_klass_from->const_oop() == NULL || !vlen_from->is_con() ||
7728 elem_klass_to->const_oop() == NULL || !vlen_to->is_con()) {
7729 return false; // not enough info for intrinsification
7730 }
7731
7732 ciKlass* vbox_klass_from = vector_klass_from->const_oop()->as_instance()->java_lang_Class_klass();
7733 if (!vbox_klass_from->is_vectorapi_vector()) {
7734 return false; // only vector & mask are supported
7735 }
7736 bool is_mask = vbox_klass_from->is_vectormask();
7737
7738 ciType* elem_type_from = elem_klass_from->const_oop()->as_instance()->java_mirror_type();
7739 if (!elem_type_from->is_primitive_type()) {
7743 if (is_mask) {
7744 elem_bt_from = getMaskBasicType(elem_bt_from);
7745 }
7746 ciType* elem_type_to = elem_klass_to->const_oop()->as_instance()->java_mirror_type();
7747 if (!elem_type_to->is_primitive_type()) {
7748 return false; // should be primitive type
7749 }
7750 BasicType elem_bt_to = elem_type_to->basic_type();
7751 if (is_mask) {
7752 elem_bt_to = getMaskBasicType(elem_bt_to);
7753 }
7754 if (is_mask && elem_bt_from != elem_bt_to) {
7755 return false; // type mismatch
7756 }
7757 int num_elem_from = vlen_from->get_con();
7758 int num_elem_to = vlen_to->get_con();
7759 if (is_mask) {
7760 elem_bt_to = getMaskBasicType(elem_bt_to);
7761 }
7762
7763 // Check whether we can unbox to appropriate size.
7764 if (!arch_supports_vector(Op_VectorReinterpret,
7765 num_elem_from,
7766 elem_bt_from,
7767 is_mask ? VecMaskUseAll : VecMaskNotUsed)) {
7768 return false;
7769 }
7770
7771 // Check whether we can support resizing/reinterpreting to the new size.
7772 if (!arch_supports_vector(Op_VectorReinterpret,
7773 num_elem_to,
7774 elem_bt_to,
7775 is_mask ? VecMaskUseAll : VecMaskNotUsed)) {
7776 return false;
7777 }
7778
7779 const TypeInstPtr* vbox_type_from = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_from);
7780
7781 Node* opd1 = unbox_vector(argument(5), vbox_type_from, elem_bt_from, num_elem_from);
7782 if (opd1 == NULL) {
7783 return false;
7784 }
7785
7786 // Can assert when Phi merges vectors of different types:
7787 // # Internal Error (/Users/vlivanov/ws/jdk/panama-dev/open/src/hotspot/share/opto/type.cpp:2291), pid=67536, tid=14083
7788 // # Error: assert(length() == v->length()) failed
7789 const TypeVect* src_type = TypeVect::make(elem_bt_from, num_elem_from);
7790 const TypeVect* dst_type = TypeVect::make(elem_bt_to, num_elem_to);
7791 Node* op = opd1;
7792 if (Type::cmp(src_type, dst_type) != 0) {
7793 op = _gvn.transform(new VectorReinterpretNode(op, src_type, dst_type));
7794 }
7795 ciKlass* vbox_klass_to = get_exact_klass_for_vector_box(vbox_klass_from, elem_type_to->basic_type(),
7796 num_elem_to, is_mask ? VECAPI_MASK : VECAPI_VECTOR);
7797 const TypeInstPtr* vbox_type_to = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_to);
7798 Node* vbox = box_vector(op, vbox_type_to, elem_bt_to, num_elem_to);
7799 set_vector_result(vbox);
7800 return true;
7801 }
7802
7803 //------------------------------get_state_from_sha_object-----------------------
7804 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
7805 Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
7806 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
7807 if (sha_state == NULL) return (Node *) NULL;
7808
7809 // now have the array, need to get the start address of the state array
7810 Node* state = array_element_address(sha_state, intcon(0), T_INT);
7811 return state;
7812 }
7813
7814 //------------------------------get_state_from_sha5_object-----------------------
7815 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
7816 Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
7817 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
7818 if (sha_state == NULL) return (Node *) NULL;
7819
|
323 bool inline_hasNegatives();
324 bool inline_squareToLen();
325 bool inline_mulAdd();
326 bool inline_montgomeryMultiply();
327 bool inline_montgomerySquare();
328 bool inline_vectorizedMismatch();
329 bool inline_fma(vmIntrinsics::ID id);
330
331 bool inline_profileBoolean();
332 bool inline_isCompileConstant();
333
334 // Vector API support
335 bool inline_vector_nary_operation(int n);
336 bool inline_vector_broadcast_coerced();
337 bool inline_vector_mem_operation(bool is_store);
338 bool inline_vector_reduction();
339 bool inline_vector_test();
340 bool inline_vector_blend();
341 bool inline_vector_compare();
342 bool inline_vector_broadcast_int();
343 bool inline_vector_cast_reinterpret(bool is_cast);
344 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType bt, int num_elem);
345 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType bt, int num_elem);
346 Node* shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
347 void set_vector_result(Node* result, bool set_res = true);
348
349 void clear_upper_avx() {
350 #ifdef X86
351 if (UseAVX >= 2) {
352 C->set_clear_upper_avx(true);
353 }
354 #endif
355 }
356 };
357
358 //---------------------------make_vm_intrinsic----------------------------
359 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
360 vmIntrinsics::ID id = m->intrinsic_id();
361 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
362
363 if (!m->is_loaded()) {
893 case vmIntrinsics::_VectorTernaryOp:
894 return inline_vector_nary_operation(3);
895
896 case vmIntrinsics::_VectorBroadcastCoerced:
897 return inline_vector_broadcast_coerced();
898 case vmIntrinsics::_VectorLoadOp:
899 return inline_vector_mem_operation(/*is_store=*/false);
900 case vmIntrinsics::_VectorStoreOp:
901 return inline_vector_mem_operation(/*is_store=*/true);
902 case vmIntrinsics::_VectorReductionCoerced:
903 return inline_vector_reduction();
904 case vmIntrinsics::_VectorTest:
905 return inline_vector_test();
906 case vmIntrinsics::_VectorBlend:
907 return inline_vector_blend();
908 case vmIntrinsics::_VectorCompare:
909 return inline_vector_compare();
910 case vmIntrinsics::_VectorBroadcastInt:
911 return inline_vector_broadcast_int();
912 case vmIntrinsics::_VectorReinterpret:
913 return inline_vector_cast_reinterpret(/*is_cast*/ false);
914 case vmIntrinsics::_VectorCast:
915 return inline_vector_cast_reinterpret(/*is_cast*/ true);
916
917 default:
918 // If you get here, it may be that someone has added a new intrinsic
919 // to the list in vmSymbols.hpp without implementing it here.
920 #ifndef PRODUCT
921 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
922 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
923 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
924 }
925 #endif
926 return false;
927 }
928 }
929
930 Node* LibraryCallKit::try_to_predicate(int predicate) {
931 if (!jvms()->has_method()) {
932 // Root JVMState has a null method.
933 assert(map()->memory()->Opcode() == Op_Parm, "");
934 // Insert the memory aliasing node
935 set_all_memory(reset_memory());
7701 int sopc = VectorNode::opcode(opc, elem_bt); // get_node_id(opr->get_con(), elem_bt);
7702 ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
7703 const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
7704
7705 if (!arch_supports_vector(sopc, num_elem, elem_bt, VecMaskNotUsed)) {
7706 return false; // not supported
7707 }
7708 Node* opd1 = unbox_vector(argument(4), vbox_type, elem_bt, num_elem);
7709 Node* opd2 = shift_count(argument(5), opc, elem_bt, num_elem);
7710 if (opd1 == NULL || opd2 == NULL) {
7711 return false;
7712 }
7713 Node* operation = _gvn.transform(VectorNode::make(sopc, opd1, opd2, num_elem, elem_bt));
7714
7715 Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
7716 set_vector_result(vbox);
7717 C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
7718 return true;
7719 }
7720
7721 bool LibraryCallKit::inline_vector_cast_reinterpret(bool is_cast) {
7722 const TypeInstPtr* vector_klass_from = gvn().type(argument(0))->is_instptr();
7723 const TypeInstPtr* elem_klass_from = gvn().type(argument(1))->is_instptr();
7724 const TypeInt* vlen_from = gvn().type(argument(2))->is_int();
7725
7726 const TypeInstPtr* elem_klass_to = gvn().type(argument(3))->is_instptr();
7727 const TypeInt* vlen_to = gvn().type(argument(4))->is_int();
7728
7729 if (vector_klass_from->const_oop() == NULL || elem_klass_from->const_oop() == NULL || !vlen_from->is_con() ||
7730 elem_klass_to->const_oop() == NULL || !vlen_to->is_con()) {
7731 return false; // not enough info for intrinsification
7732 }
7733
7734 ciKlass* vbox_klass_from = vector_klass_from->const_oop()->as_instance()->java_lang_Class_klass();
7735 if (!vbox_klass_from->is_vectorapi_vector()) {
7736 return false; // only vector & mask are supported
7737 }
7738 bool is_mask = vbox_klass_from->is_vectormask();
7739
7740 ciType* elem_type_from = elem_klass_from->const_oop()->as_instance()->java_mirror_type();
7741 if (!elem_type_from->is_primitive_type()) {
7745 if (is_mask) {
7746 elem_bt_from = getMaskBasicType(elem_bt_from);
7747 }
7748 ciType* elem_type_to = elem_klass_to->const_oop()->as_instance()->java_mirror_type();
7749 if (!elem_type_to->is_primitive_type()) {
7750 return false; // should be primitive type
7751 }
7752 BasicType elem_bt_to = elem_type_to->basic_type();
7753 if (is_mask) {
7754 elem_bt_to = getMaskBasicType(elem_bt_to);
7755 }
7756 if (is_mask && elem_bt_from != elem_bt_to) {
7757 return false; // type mismatch
7758 }
7759 int num_elem_from = vlen_from->get_con();
7760 int num_elem_to = vlen_to->get_con();
7761 if (is_mask) {
7762 elem_bt_to = getMaskBasicType(elem_bt_to);
7763 }
7764
7765 // Check whether we can unbox to appropriate size. Even with casting, checking for reinterpret is needed
7766 // since we may need to change size.
7767 if (!arch_supports_vector(Op_VectorReinterpret,
7768 num_elem_from,
7769 elem_bt_from,
7770 is_mask ? VecMaskUseAll : VecMaskNotUsed)) {
7771 return false;
7772 }
7773
7774 // Check whether we can support resizing/reinterpreting to the new size.
7775 if (!arch_supports_vector(Op_VectorReinterpret,
7776 num_elem_to,
7777 elem_bt_to,
7778 is_mask ? VecMaskUseAll : VecMaskNotUsed)) {
7779 return false;
7780 }
7781
7782 // At this point, we know that both input and output vector registers are supported
7783 // by the architecture. Next check if the casted type is simply to same type - which means
7784 // that it is actually a resize and not a cast.
7785 if (is_cast && elem_bt_from == elem_bt_to) {
7786 is_cast = false;
7787 }
7788
7789 int cast_vopc = 0;
7790 if (is_cast) {
7791 assert(!is_mask, "masks cannot be casted");
7792 switch (elem_bt_from) {
7793 case T_BYTE:
7794 cast_vopc = Op_VectorCastB2X;
7795 break;
7796 case T_SHORT:
7797 cast_vopc = Op_VectorCastS2X;
7798 break;
7799 case T_INT:
7800 cast_vopc = Op_VectorCastI2X;
7801 break;
7802 case T_LONG:
7803 cast_vopc = Op_VectorCastL2X;
7804 break;
7805 case T_FLOAT:
7806 cast_vopc = Op_VectorCastF2X;
7807 break;
7808 case T_DOUBLE:
7809 cast_vopc = Op_VectorCastD2X;
7810 break;
7811 default:
7812 Unimplemented();
7813 }
7814 assert(cast_vopc != 0, "need to find vector cast operand");
7815
7816 // Make sure that cast is implemented to particular type/size combination.
7817 if (!arch_supports_vector(cast_vopc, num_elem_to, elem_bt_to, VecMaskNotUsed)) {
7818 return false;
7819 }
7820 }
7821
7822 const TypeInstPtr* vbox_type_from = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_from);
7823
7824 Node* opd1 = unbox_vector(argument(5), vbox_type_from, elem_bt_from, num_elem_from);
7825 if (opd1 == NULL) {
7826 return false;
7827 }
7828
7829 const TypeVect* src_type = TypeVect::make(elem_bt_from, num_elem_from);
7830 const TypeVect* dst_type = TypeVect::make(elem_bt_to, num_elem_to);
7831
7832 Node* op = opd1;
7833 if (is_cast) {
7834 if (num_elem_from < num_elem_to) {
7835 // Since input and output number of elements are not consistent, we need to make sure we
7836 // properly size. Thus, first make a cast that retains the number of elements from source.
7837 // In case the size exceeds the arch size, we do the minimum.
7838 int num_elem_for_cast = MIN2(num_elem_from, Matcher::max_vector_size(elem_bt_to));
7839
7840 // It is possible that arch does not support this intermediate vector size
7841 // TODO More complex logic required here to handle this corner case for the sizes.
7842 if (!arch_supports_vector(cast_vopc, num_elem_for_cast, elem_bt_to, VecMaskNotUsed)) {
7843 return false;
7844 }
7845
7846 op = _gvn.transform(VectorNode::make(cast_vopc, op, NULL, num_elem_for_cast, elem_bt_to));
7847 // Now ensure that the destination gets properly resized to needed size.
7848 op = _gvn.transform(new VectorReinterpretNode(op, op->bottom_type()->is_vect(), dst_type));
7849 } else if (num_elem_from > num_elem_to) {
7850 // Since number elements from input is larger than output, simply reduce size of input (we are supposed to
7851 // drop top elements anyway).
7852 int num_elem_for_resize = MAX2(num_elem_to, Matcher::min_vector_size(elem_bt_to));
7853
7854 // It is possible that arch does not support this intermediate vector size
7855 // TODO More complex logic required here to handle this corner case for the sizes.
7856 if (!arch_supports_vector(Op_VectorReinterpret,
7857 num_elem_for_resize,
7858 elem_bt_from,
7859 VecMaskNotUsed)) {
7860 return false;
7861 }
7862
7863 op = _gvn.transform(new VectorReinterpretNode(op,
7864 src_type,
7865 TypeVect::make(elem_bt_from,
7866 num_elem_for_resize)));
7867 op = _gvn.transform(VectorNode::make(cast_vopc, op, NULL, num_elem_to, elem_bt_to));
7868 } else {
7869 // Since input and output number of elements match, and since we know this vector size is
7870 // supported, simply do a cast with no resize needed.
7871 op = _gvn.transform(VectorNode::make(cast_vopc, op, NULL, num_elem_to, elem_bt_to));
7872 }
7873 } else if (Type::cmp(src_type, dst_type) != 0) {
7874 assert(!is_cast, "must be reinterpret");
7875 op = _gvn.transform(new VectorReinterpretNode(op, src_type, dst_type));
7876 }
7877
7878 ciKlass* vbox_klass_to = get_exact_klass_for_vector_box(vbox_klass_from, elem_type_to->basic_type(),
7879 num_elem_to, is_mask ? VECAPI_MASK : VECAPI_VECTOR);
7880 const TypeInstPtr* vbox_type_to = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_to);
7881 Node* vbox = box_vector(op, vbox_type_to, elem_bt_to, num_elem_to);
7882 set_vector_result(vbox);
7883 C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem_to * type2aelembytes(elem_bt_to))));
7884 return true;
7885 }
7886
7887 //------------------------------get_state_from_sha_object-----------------------
7888 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
7889 Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
7890 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
7891 if (sha_state == NULL) return (Node *) NULL;
7892
7893 // now have the array, need to get the start address of the state array
7894 Node* state = array_element_address(sha_state, intcon(0), T_INT);
7895 return state;
7896 }
7897
7898 //------------------------------get_state_from_sha5_object-----------------------
7899 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
7900 Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
7901 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
7902 if (sha_state == NULL) return (Node *) NULL;
7903
|