Print this page


Split Close
Expand all
Collapse all
          --- old/src/share/vm/opto/library_call.cpp
          +++ new/src/share/vm/opto/library_call.cpp
   1    1  /*
   2      - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
        2 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
   3    3   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4    4   *
   5    5   * This code is free software; you can redistribute it and/or modify it
   6    6   * under the terms of the GNU General Public License version 2 only, as
   7    7   * published by the Free Software Foundation.
   8    8   *
   9    9   * This code is distributed in the hope that it will be useful, but WITHOUT
  10   10   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11   11   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12   12   * version 2 for more details (a copy is included in the LICENSE file that
↓ open down ↓ 77 lines elided ↑ open up ↑
  90   90                                  // resulting CastII of index:
  91   91                                  Node* *pos_index = NULL);
  92   92    Node* generate_nonpositive_guard(Node* index, bool never_negative,
  93   93                                     // resulting CastII of index:
  94   94                                     Node* *pos_index = NULL);
  95   95    Node* generate_limit_guard(Node* offset, Node* subseq_length,
  96   96                               Node* array_length,
  97   97                               RegionNode* region);
  98   98    Node* generate_current_thread(Node* &tls_output);
  99   99    address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
 100      -                              bool disjoint_bases, const char* &name);
      100 +                              bool disjoint_bases, const char* &name, bool dest_uninitialized);
 101  101    Node* load_mirror_from_klass(Node* klass);
 102  102    Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
 103  103                                        int nargs,
 104  104                                        RegionNode* region, int null_path,
 105  105                                        int offset);
 106  106    Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs,
 107  107                                 RegionNode* region, int null_path) {
 108  108      int offset = java_lang_Class::klass_offset_in_bytes();
 109  109      return load_klass_from_mirror_common(mirror, never_see_null, nargs,
 110  110                                           region, null_path,
↓ open down ↓ 94 lines elided ↑ open up ↑
 205  205                              Node* dest,
 206  206                              BasicType basic_elem_type,
 207  207                              Node* slice_off,
 208  208                              Node* slice_len,
 209  209                              Node* slice_end);
 210  210    bool generate_block_arraycopy(const TypePtr* adr_type,
 211  211                                  BasicType basic_elem_type,
 212  212                                  AllocateNode* alloc,
 213  213                                  Node* src,  Node* src_offset,
 214  214                                  Node* dest, Node* dest_offset,
 215      -                                Node* dest_size);
      215 +                                Node* dest_size, bool dest_uninitialized);
 216  216    void generate_slow_arraycopy(const TypePtr* adr_type,
 217  217                                 Node* src,  Node* src_offset,
 218  218                                 Node* dest, Node* dest_offset,
 219      -                               Node* copy_length);
      219 +                               Node* copy_length, bool dest_uninitialized);
 220  220    Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
 221  221                                       Node* dest_elem_klass,
 222  222                                       Node* src,  Node* src_offset,
 223  223                                       Node* dest, Node* dest_offset,
 224      -                                     Node* copy_length);
      224 +                                     Node* copy_length, bool dest_uninitialized);
 225  225    Node* generate_generic_arraycopy(const TypePtr* adr_type,
 226  226                                     Node* src,  Node* src_offset,
 227  227                                     Node* dest, Node* dest_offset,
 228      -                                   Node* copy_length);
      228 +                                   Node* copy_length, bool dest_uninitialized);
 229  229    void generate_unchecked_arraycopy(const TypePtr* adr_type,
 230  230                                      BasicType basic_elem_type,
 231  231                                      bool disjoint_bases,
 232  232                                      Node* src,  Node* src_offset,
 233  233                                      Node* dest, Node* dest_offset,
 234      -                                    Node* copy_length);
      234 +                                    Node* copy_length, bool dest_uninitialized);
 235  235    bool inline_unsafe_CAS(BasicType type);
 236  236    bool inline_unsafe_ordered_store(BasicType type);
 237  237    bool inline_fp_conversions(vmIntrinsics::ID id);
 238  238    bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
 239  239    bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
 240  240    bool inline_bitCount(vmIntrinsics::ID id);
 241  241    bool inline_reverseBytes(vmIntrinsics::ID id);
 242  242  };
 243  243  
 244  244  
↓ open down ↓ 3829 lines elided ↑ open up ↑
4074 4074    dest = basic_plus_adr(dest, base_off);
4075 4075  
4076 4076    // Compute the length also, if needed:
4077 4077    Node* countx = size;
4078 4078    countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
4079 4079    countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4080 4080  
4081 4081    const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4082 4082    bool disjoint_bases = true;
4083 4083    generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4084      -                               src, NULL, dest, NULL, countx);
     4084 +                               src, NULL, dest, NULL, countx,
     4085 +                               /*dest_uninitialized*/false);
4085 4086  
4086 4087    // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4087 4088    if (card_mark) {
4088 4089      assert(!is_array, "");
4089 4090      // Put in store barrier for any and all oops we are sticking
4090 4091      // into this object.  (We could avoid this if we could prove
4091 4092      // that the object type contains no oop fields at all.)
4092 4093      Node* no_particular_value = NULL;
4093 4094      Node* no_particular_field = NULL;
4094 4095      int raw_adr_idx = Compile::AliasIdxRaw;
↓ open down ↓ 193 lines elided ↑ open up ↑
4288 4289  enum {
4289 4290    COPYFUNC_UNALIGNED = 0,
4290 4291    COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
4291 4292    COPYFUNC_CONJOINT = 0,
4292 4293    COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
4293 4294  };
4294 4295  
4295 4296  // Note:  The condition "disjoint" applies also for overlapping copies
4296 4297  // where an descending copy is permitted (i.e., dest_offset <= src_offset).
4297 4298  static address
4298      -select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name) {
     4299 +select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
4299 4300    int selector =
4300 4301      (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
4301 4302      (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
4302 4303  
4303 4304  #define RETURN_STUB(xxx_arraycopy) { \
4304 4305    name = #xxx_arraycopy; \
4305 4306    return StubRoutines::xxx_arraycopy(); }
4306 4307  
     4308 +#define RETURN_STUB_PARM(xxx_arraycopy, parm) {           \
     4309 +  name = #xxx_arraycopy; \
     4310 +  return StubRoutines::xxx_arraycopy(parm); }
     4311 +
4307 4312    switch (t) {
4308 4313    case T_BYTE:
4309 4314    case T_BOOLEAN:
4310 4315      switch (selector) {
4311 4316      case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
4312 4317      case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
4313 4318      case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
4314 4319      case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
4315 4320      }
4316 4321    case T_CHAR:
↓ open down ↓ 16 lines elided ↑ open up ↑
4333 4338    case T_LONG:
4334 4339      switch (selector) {
4335 4340      case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
4336 4341      case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
4337 4342      case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
4338 4343      case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
4339 4344      }
4340 4345    case T_ARRAY:
4341 4346    case T_OBJECT:
4342 4347      switch (selector) {
4343      -    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(oop_arraycopy);
4344      -    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_oop_arraycopy);
4345      -    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(oop_disjoint_arraycopy);
4346      -    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_oop_disjoint_arraycopy);
     4348 +    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
     4349 +    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
     4350 +    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
     4351 +    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
4347 4352      }
4348 4353    default:
4349 4354      ShouldNotReachHere();
4350 4355      return NULL;
4351 4356    }
4352 4357  
4353 4358  #undef RETURN_STUB
     4359 +#undef RETURN_STUB_PARM
4354 4360  }
4355 4361  
4356 4362  //------------------------------basictype2arraycopy----------------------------
4357 4363  address LibraryCallKit::basictype2arraycopy(BasicType t,
4358 4364                                              Node* src_offset,
4359 4365                                              Node* dest_offset,
4360 4366                                              bool disjoint_bases,
4361      -                                            const char* &name) {
     4367 +                                            const char* &name,
     4368 +                                            bool dest_uninitialized) {
4362 4369    const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
4363 4370    const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
4364 4371  
4365 4372    bool aligned = false;
4366 4373    bool disjoint = disjoint_bases;
4367 4374  
4368 4375    // if the offsets are the same, we can treat the memory regions as
4369 4376    // disjoint, because either the memory regions are in different arrays,
4370 4377    // or they are identical (which we can treat as disjoint.)  We can also
4371 4378    // treat a copy with a destination index  less that the source index
↓ open down ↓ 5 lines elided ↑ open up ↑
4377 4384      int d_offs = dest_offset_inttype->get_con();
4378 4385      int element_size = type2aelembytes(t);
4379 4386      aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
4380 4387                ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
4381 4388      if (s_offs >= d_offs)  disjoint = true;
4382 4389    } else if (src_offset == dest_offset && src_offset != NULL) {
4383 4390      // This can occur if the offsets are identical non-constants.
4384 4391      disjoint = true;
4385 4392    }
4386 4393  
4387      -  return select_arraycopy_function(t, aligned, disjoint, name);
     4394 +  return select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
4388 4395  }
4389 4396  
4390 4397  
4391 4398  //------------------------------inline_arraycopy-----------------------
4392 4399  bool LibraryCallKit::inline_arraycopy() {
4393 4400    // Restore the stack and pop off the arguments.
4394 4401    int nargs = 5;  // 2 oops, 3 ints, no size_t or long
4395 4402    assert(callee()->signature()->size() == nargs, "copy has 5 arguments");
4396 4403  
4397 4404    Node *src         = argument(0);
↓ open down ↓ 35 lines elided ↑ open up ↑
4433 4440    // Figure out the size and type of the elements we will be copying.
4434 4441    BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
4435 4442    BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4436 4443    if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4437 4444    if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4438 4445  
4439 4446    if (src_elem != dest_elem || dest_elem == T_VOID) {
4440 4447      // The component types are not the same or are not recognized.  Punt.
4441 4448      // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4442 4449      generate_slow_arraycopy(TypePtr::BOTTOM,
4443      -                            src, src_offset, dest, dest_offset, length);
     4450 +                            src, src_offset, dest, dest_offset, length,
     4451 +                            /*uninitialized_target*/false);
4444 4452      return true;
4445 4453    }
4446 4454  
4447 4455    //---------------------------------------------------------------------------
4448 4456    // We will make a fast path for this call to arraycopy.
4449 4457  
4450 4458    // We have the following tests left to perform:
4451 4459    //
4452 4460    // (3) src and dest must not be null.
4453 4461    // (4) src_offset must not be negative.
↓ open down ↓ 123 lines elided ↑ open up ↑
4577 4585      // From this point on, every exit path is responsible for
4578 4586      // initializing any non-copied parts of the object to zero.
4579 4587      must_clear_dest = true;
4580 4588    } else {
4581 4589      // No zeroing elimination here.
4582 4590      alloc             = NULL;
4583 4591      //original_dest   = dest;
4584 4592      //must_clear_dest = false;
4585 4593    }
4586 4594  
     4595 +  // We must choose different arraycopy stubs if the target array are not initialized,
     4596 +  // so the the pre-barriers wouldn't peek into the old values. See CR 6627983.
     4597 +  const bool& dest_uninitialized = must_clear_dest;
     4598 +
4587 4599    // Results are placed here:
4588 4600    enum { fast_path        = 1,  // normal void-returning assembly stub
4589 4601           checked_path     = 2,  // special assembly stub with cleanup
4590 4602           slow_call_path   = 3,  // something went wrong; call the VM
4591 4603           zero_path        = 4,  // bypass when length of copy is zero
4592 4604           bcopy_path       = 5,  // copy primitive array by 64-bit blocks
4593 4605           PATH_LIMIT       = 6
4594 4606    };
4595 4607    RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
4596 4608    PhiNode*    result_i_o    = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO);
↓ open down ↓ 12 lines elided ↑ open up ↑
4609 4621    // Checked control path:
4610 4622    Node* checked_control = top();
4611 4623    Node* checked_mem     = NULL;
4612 4624    Node* checked_i_o     = NULL;
4613 4625    Node* checked_value   = NULL;
4614 4626  
4615 4627    if (basic_elem_type == T_CONFLICT) {
4616 4628      assert(!must_clear_dest, "");
4617 4629      Node* cv = generate_generic_arraycopy(adr_type,
4618 4630                                            src, src_offset, dest, dest_offset,
4619      -                                          copy_length);
     4631 +                                          copy_length, dest_uninitialized);
4620 4632      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4621 4633      checked_control = control();
4622 4634      checked_i_o     = i_o();
4623 4635      checked_mem     = memory(adr_type);
4624 4636      checked_value   = cv;
4625 4637      set_control(top());         // no fast path
4626 4638    }
4627 4639  
4628 4640    Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
4629 4641    if (not_pos != NULL) {
↓ open down ↓ 61 lines elided ↑ open up ↑
4691 4703        assert(tail_ctl != NULL || !stopped(), "must be an outcome");
4692 4704      }
4693 4705  
4694 4706      // At this point, let's assume there is no tail.
4695 4707      if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
4696 4708        // There is no tail.  Try an upgrade to a 64-bit copy.
4697 4709        bool didit = false;
4698 4710        { PreserveJVMState pjvms(this);
4699 4711          didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
4700 4712                                           src, src_offset, dest, dest_offset,
4701      -                                         dest_size);
     4713 +                                         dest_size, dest_uninitialized);
4702 4714          if (didit) {
4703 4715            // Present the results of the block-copying fast call.
4704 4716            result_region->init_req(bcopy_path, control());
4705 4717            result_i_o   ->init_req(bcopy_path, i_o());
4706 4718            result_memory->init_req(bcopy_path, memory(adr_type));
4707 4719          }
4708 4720        }
4709 4721        if (didit)
4710 4722          set_control(top());     // no regular fast path
4711 4723      }
↓ open down ↓ 55 lines elided ↑ open up ↑
4767 4779        PreserveJVMState pjvms(this);
4768 4780        set_control(not_subtype_ctrl);
4769 4781        // (At this point we can assume disjoint_bases, since types differ.)
4770 4782        int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc);
4771 4783        Node* p1 = basic_plus_adr(dest_klass, ek_offset);
4772 4784        Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
4773 4785        Node* dest_elem_klass = _gvn.transform(n1);
4774 4786        Node* cv = generate_checkcast_arraycopy(adr_type,
4775 4787                                                dest_elem_klass,
4776 4788                                                src, src_offset, dest, dest_offset,
4777      -                                              ConvI2X(copy_length));
     4789 +                                              ConvI2X(copy_length), dest_uninitialized);
4778 4790        if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4779 4791        checked_control = control();
4780 4792        checked_i_o     = i_o();
4781 4793        checked_mem     = memory(adr_type);
4782 4794        checked_value   = cv;
4783 4795      }
4784 4796      // At this point we know we do not need type checks on oop stores.
4785 4797  
4786 4798      // Let's see if we need card marks:
4787 4799      if (alloc != NULL && use_ReduceInitialCardMarks()) {
↓ open down ↓ 2 lines elided ↑ open up ↑
4790 4802        assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
4791 4803               "sizes agree");
4792 4804      }
4793 4805    }
4794 4806  
4795 4807    if (!stopped()) {
4796 4808      // Generate the fast path, if possible.
4797 4809      PreserveJVMState pjvms(this);
4798 4810      generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
4799 4811                                   src, src_offset, dest, dest_offset,
4800      -                                 ConvI2X(copy_length));
     4812 +                                 ConvI2X(copy_length), dest_uninitialized);
4801 4813  
4802 4814      // Present the results of the fast call.
4803 4815      result_region->init_req(fast_path, control());
4804 4816      result_i_o   ->init_req(fast_path, i_o());
4805 4817      result_memory->init_req(fast_path, memory(adr_type));
4806 4818    }
4807 4819  
4808 4820    // Here are all the slow paths up to this point, in one bundle:
4809 4821    slow_control = top();
4810 4822    if (slow_region != NULL)
↓ open down ↓ 66 lines elided ↑ open up ↑
4877 4889      set_i_o(slow_i_o);
4878 4890  
4879 4891      if (must_clear_dest) {
4880 4892        generate_clear_array(adr_type, dest, basic_elem_type,
4881 4893                             intcon(0), NULL,
4882 4894                             alloc->in(AllocateNode::AllocSize));
4883 4895      }
4884 4896  
4885 4897      generate_slow_arraycopy(adr_type,
4886 4898                              src, src_offset, dest, dest_offset,
4887      -                            copy_length);
     4899 +                            copy_length, /*dest_uninitialized*/false);
4888 4900  
4889 4901      result_region->init_req(slow_call_path, control());
4890 4902      result_i_o   ->init_req(slow_call_path, i_o());
4891 4903      result_memory->init_req(slow_call_path, memory(adr_type));
4892 4904    }
4893 4905  
4894 4906    // Remove unused edges.
4895 4907    for (uint i = 1; i < result_region->req(); i++) {
4896 4908      if (result_region->in(i) == NULL)
4897 4909        result_region->init_req(i, top());
↓ open down ↓ 223 lines elided ↑ open up ↑
5121 5133    set_memory(mem, adr_type);
5122 5134  }
5123 5135  
5124 5136  
5125 5137  bool
5126 5138  LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
5127 5139                                           BasicType basic_elem_type,
5128 5140                                           AllocateNode* alloc,
5129 5141                                           Node* src,  Node* src_offset,
5130 5142                                           Node* dest, Node* dest_offset,
5131      -                                         Node* dest_size) {
     5143 +                                         Node* dest_size, bool dest_uninitialized) {
5132 5144    // See if there is an advantage from block transfer.
5133 5145    int scale = exact_log2(type2aelembytes(basic_elem_type));
5134 5146    if (scale >= LogBytesPerLong)
5135 5147      return false;               // it is already a block transfer
5136 5148  
5137 5149    // Look at the alignment of the starting offsets.
5138 5150    int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5139 5151    const intptr_t BIG_NEG = -128;
5140 5152    assert(BIG_NEG + 2*abase < 0, "neg enough");
5141 5153  
↓ open down ↓ 24 lines elided ↑ open up ↑
5166 5178  
5167 5179    // Do this copy by giant steps.
5168 5180    Node* sptr  = basic_plus_adr(src,  src_off);
5169 5181    Node* dptr  = basic_plus_adr(dest, dest_off);
5170 5182    Node* countx = dest_size;
5171 5183    countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(dest_off)) );
5172 5184    countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) );
5173 5185  
5174 5186    bool disjoint_bases = true;   // since alloc != NULL
5175 5187    generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5176      -                               sptr, NULL, dptr, NULL, countx);
     5188 +                               sptr, NULL, dptr, NULL, countx, dest_uninitialized);
5177 5189  
5178 5190    return true;
5179 5191  }
5180 5192  
5181 5193  
5182 5194  // Helper function; generates code for the slow case.
5183 5195  // We make a call to a runtime method which emulates the native method,
5184 5196  // but without the native wrapper overhead.
5185 5197  void
5186 5198  LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
5187 5199                                          Node* src,  Node* src_offset,
5188 5200                                          Node* dest, Node* dest_offset,
5189      -                                        Node* copy_length) {
     5201 +                                        Node* copy_length, bool dest_uninitialized) {
     5202 +  assert(!dest_uninitialized, "Invariant");
5190 5203    Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
5191 5204                                   OptoRuntime::slow_arraycopy_Type(),
5192 5205                                   OptoRuntime::slow_arraycopy_Java(),
5193 5206                                   "slow_arraycopy", adr_type,
5194 5207                                   src, src_offset, dest, dest_offset,
5195 5208                                   copy_length);
5196 5209  
5197 5210    // Handle exceptions thrown by this fellow:
5198 5211    make_slow_call_ex(call, env()->Throwable_klass(), false);
5199 5212  }
5200 5213  
5201 5214  // Helper function; generates code for cases requiring runtime checks.
5202 5215  Node*
5203 5216  LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
5204 5217                                               Node* dest_elem_klass,
5205 5218                                               Node* src,  Node* src_offset,
5206 5219                                               Node* dest, Node* dest_offset,
5207      -                                             Node* copy_length) {
     5220 +                                             Node* copy_length, bool dest_uninitialized) {
5208 5221    if (stopped())  return NULL;
5209 5222  
5210      -  address copyfunc_addr = StubRoutines::checkcast_arraycopy();
     5223 +  address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
5211 5224    if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5212 5225      return NULL;
5213 5226    }
5214 5227  
5215 5228    // Pick out the parameters required to perform a store-check
5216 5229    // for the target array.  This is an optimistic check.  It will
5217 5230    // look in each non-null element's class, at the desired klass's
5218 5231    // super_check_offset, for the desired klass.
5219 5232    int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
5220 5233    Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
↓ open down ↓ 17 lines elided ↑ open up ↑
5238 5251  
5239 5252    return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
5240 5253  }
5241 5254  
5242 5255  
5243 5256  // Helper function; generates code for cases requiring runtime checks.
5244 5257  Node*
5245 5258  LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
5246 5259                                             Node* src,  Node* src_offset,
5247 5260                                             Node* dest, Node* dest_offset,
5248      -                                           Node* copy_length) {
     5261 +                                           Node* copy_length, bool dest_uninitialized) {
     5262 +  assert(!dest_uninitialized, "Invariant");
5249 5263    if (stopped())  return NULL;
5250      -
5251 5264    address copyfunc_addr = StubRoutines::generic_arraycopy();
5252 5265    if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5253 5266      return NULL;
5254 5267    }
5255 5268  
5256 5269    Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5257 5270                      OptoRuntime::generic_arraycopy_Type(),
5258 5271                      copyfunc_addr, "generic_arraycopy", adr_type,
5259 5272                      src, src_offset, dest, dest_offset, copy_length);
5260 5273  
5261 5274    return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
5262 5275  }
5263 5276  
5264 5277  // Helper function; generates the fast out-of-line call to an arraycopy stub.
5265 5278  void
5266 5279  LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
5267 5280                                               BasicType basic_elem_type,
5268 5281                                               bool disjoint_bases,
5269 5282                                               Node* src,  Node* src_offset,
5270 5283                                               Node* dest, Node* dest_offset,
5271      -                                             Node* copy_length) {
     5284 +                                             Node* copy_length, bool dest_uninitialized) {
5272 5285    if (stopped())  return;               // nothing to do
5273 5286  
5274 5287    Node* src_start  = src;
5275 5288    Node* dest_start = dest;
5276 5289    if (src_offset != NULL || dest_offset != NULL) {
5277 5290      assert(src_offset != NULL && dest_offset != NULL, "");
5278 5291      src_start  = array_element_address(src,  src_offset,  basic_elem_type);
5279 5292      dest_start = array_element_address(dest, dest_offset, basic_elem_type);
5280 5293    }
5281 5294  
5282 5295    // Figure out which arraycopy runtime method to call.
5283 5296    const char* copyfunc_name = "arraycopy";
5284 5297    address     copyfunc_addr =
5285 5298        basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
5286      -                          disjoint_bases, copyfunc_name);
     5299 +                          disjoint_bases, copyfunc_name, dest_uninitialized);
5287 5300  
5288 5301    // Call it.  Note that the count_ix value is not scaled to a byte-size.
5289 5302    make_runtime_call(RC_LEAF|RC_NO_FP,
5290 5303                      OptoRuntime::fast_arraycopy_Type(),
5291 5304                      copyfunc_addr, copyfunc_name, adr_type,
5292 5305                      src_start, dest_start, copy_length XTOP);
5293 5306  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX