src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File open Sdiff src/hotspot/cpu/ppc

src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp

Print this page




2757   assert(data->is_CounterData(), "need CounterData for calls");
2758   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2759   Register mdo = op->mdo()->as_register();
2760 #ifdef _LP64
2761   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2762   Register tmp1 = op->tmp1()->as_register_lo();
2763 #else
2764   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2765   Register tmp1 = op->tmp1()->as_register();
2766 #endif
2767   metadata2reg(md->constant_encoding(), mdo);
2768   int mdo_offset_bias = 0;
2769   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2770                             data->size_in_bytes())) {
2771     // The offset is large so bias the mdo by the base of the slot so
2772     // that the ld can use simm16s to reference the slots of the data.
2773     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2774     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2775   }
2776 
2777   Bytecodes::Code bc = method->java_code_at_bci(bci);
2778   const bool callee_is_static = callee->is_loaded() && callee->is_static();
2779   // Perform additional virtual call profiling for invokevirtual and
2780   // invokeinterface bytecodes.
2781   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2782       !callee_is_static &&  // Required for optimized MH invokes.
2783       C1ProfileVirtualCalls) {
2784     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2785     Register recv = op->recv()->as_register();
2786     assert_different_registers(mdo, tmp1, recv);
2787     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2788     ciKlass* known_klass = op->known_holder();
2789     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2790       // We know the type that will be seen at this call site; we can
2791       // statically update the MethodData* rather than needing to do
2792       // dynamic tests on the receiver type.
2793 
2794       // NOTE: we should probably put a lock around this search to
2795       // avoid collisions by concurrent compilations.
2796       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2797       uint i;
2798       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2799         ciKlass* receiver = vc_data->receiver(i);
2800         if (known_klass->equals(receiver)) {
2801           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2802           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2803           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);




2757   assert(data->is_CounterData(), "need CounterData for calls");
2758   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2759   Register mdo = op->mdo()->as_register();
2760 #ifdef _LP64
2761   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2762   Register tmp1 = op->tmp1()->as_register_lo();
2763 #else
2764   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2765   Register tmp1 = op->tmp1()->as_register();
2766 #endif
2767   metadata2reg(md->constant_encoding(), mdo);
2768   int mdo_offset_bias = 0;
2769   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2770                             data->size_in_bytes())) {
2771     // The offset is large so bias the mdo by the base of the slot so
2772     // that the ld can use simm16s to reference the slots of the data.
2773     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2774     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2775   }
2776 


2777   // Perform additional virtual call profiling for invokevirtual and
2778   // invokeinterface bytecodes
2779   if (op->should_profile_receiver_type()) {


2780     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2781     Register recv = op->recv()->as_register();
2782     assert_different_registers(mdo, tmp1, recv);
2783     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2784     ciKlass* known_klass = op->known_holder();
2785     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2786       // We know the type that will be seen at this call site; we can
2787       // statically update the MethodData* rather than needing to do
2788       // dynamic tests on the receiver type.
2789 
2790       // NOTE: we should probably put a lock around this search to
2791       // avoid collisions by concurrent compilations.
2792       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2793       uint i;
2794       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2795         ciKlass* receiver = vc_data->receiver(i);
2796         if (known_klass->equals(receiver)) {
2797           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2798           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2799           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);


src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File