1342 // Range checks
1343 generate_string_range_check(src, src_offset, length, compress && src_elem == T_BYTE);
1344 generate_string_range_check(dst, dst_offset, length, !compress && dst_elem == T_BYTE);
1345 if (stopped()) {
1346 return true;
1347 }
1348
1349 // Convert char[] offsets to byte[] offsets
1350 if (compress && src_elem == T_BYTE) {
1351 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1352 } else if (!compress && dst_elem == T_BYTE) {
1353 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1354 }
1355
1356 Node* src_start = array_element_address(src, src_offset, src_elem);
1357 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1358 // 'src_start' points to src array + scaled offset
1359 // 'dst_start' points to dst array + scaled offset
1360 Node* count = NULL;
1361 if (compress) {
1362 count = compress_string(src_start, dst_start, length);
1363 } else {
1364 inflate_string(src_start, dst_start, length);
1365 }
1366
1367 if (alloc != NULL) {
1368 if (alloc->maybe_set_complete(&_gvn)) {
1369 // "You break it, you buy it."
1370 InitializeNode* init = alloc->initialization();
1371 assert(init->is_complete(), "we just did this");
1372 init->set_complete_with_arraycopy();
1373 assert(dst->is_CheckCastPP(), "sanity");
1374 assert(dst->in(0)->in(0) == init, "dest pinned");
1375 }
1376 // Do not let stores that initialize this object be reordered with
1377 // a subsequent store that would make this object accessible by
1378 // other threads.
1379 // Record what AllocateNode this StoreStore protects so that
1380 // escape analysis can go from the MemBarStoreStoreNode to the
1381 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1382 // based on the escape status of the AllocateNode.
1383 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1384 }
1570 // Store/Load char to/from byte[] array.
1571 // static void StringUTF16.putChar(byte[] val, int index, int c)
1572 // static char StringUTF16.getChar(byte[] val, int index)
1573 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1574 Node* value = argument(0);
1575 Node* index = argument(1);
1576 Node* ch = is_store ? argument(2) : NULL;
1577
1578 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1579 // correctly requires matched array shapes.
1580 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1581 "sanity: byte[] and char[] bases agree");
1582 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1583 "sanity: byte[] and char[] scales agree");
1584
1585 Node* adr = array_element_address(value, index, T_CHAR);
1586 if (is_store) {
1587 (void) store_to_memory(control(), adr, ch, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
1588 false, false, true /* mismatched */);
1589 } else {
1590 ch = make_load(control(), adr, TypeInt::CHAR, T_CHAR, MemNode::unordered,
1591 LoadNode::DependsOnlyOnTest, false, false, true /* mismatched */);
1592 set_result(ch);
1593 }
1594 return true;
1595 }
1596
1597 //--------------------------round_double_node--------------------------------
1598 // Round a double node if necessary.
1599 Node* LibraryCallKit::round_double_node(Node* n) {
1600 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1601 n = _gvn.transform(new RoundDoubleNode(0, n));
1602 return n;
1603 }
1604
1605 //------------------------------inline_math-----------------------------------
1606 // public static double Math.abs(double)
1607 // public static double Math.sqrt(double)
1608 // public static double Math.log(double)
1609 // public static double Math.log10(double)
1610 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
|
1342 // Range checks
1343 generate_string_range_check(src, src_offset, length, compress && src_elem == T_BYTE);
1344 generate_string_range_check(dst, dst_offset, length, !compress && dst_elem == T_BYTE);
1345 if (stopped()) {
1346 return true;
1347 }
1348
1349 // Convert char[] offsets to byte[] offsets
1350 if (compress && src_elem == T_BYTE) {
1351 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1352 } else if (!compress && dst_elem == T_BYTE) {
1353 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1354 }
1355
1356 Node* src_start = array_element_address(src, src_offset, src_elem);
1357 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1358 // 'src_start' points to src array + scaled offset
1359 // 'dst_start' points to dst array + scaled offset
1360 Node* count = NULL;
1361 if (compress) {
1362 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1363 } else {
1364 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1365 }
1366
1367 if (alloc != NULL) {
1368 if (alloc->maybe_set_complete(&_gvn)) {
1369 // "You break it, you buy it."
1370 InitializeNode* init = alloc->initialization();
1371 assert(init->is_complete(), "we just did this");
1372 init->set_complete_with_arraycopy();
1373 assert(dst->is_CheckCastPP(), "sanity");
1374 assert(dst->in(0)->in(0) == init, "dest pinned");
1375 }
1376 // Do not let stores that initialize this object be reordered with
1377 // a subsequent store that would make this object accessible by
1378 // other threads.
1379 // Record what AllocateNode this StoreStore protects so that
1380 // escape analysis can go from the MemBarStoreStoreNode to the
1381 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1382 // based on the escape status of the AllocateNode.
1383 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1384 }
1570 // Store/Load char to/from byte[] array.
1571 // static void StringUTF16.putChar(byte[] val, int index, int c)
1572 // static char StringUTF16.getChar(byte[] val, int index)
1573 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1574 Node* value = argument(0);
1575 Node* index = argument(1);
1576 Node* ch = is_store ? argument(2) : NULL;
1577
1578 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1579 // correctly requires matched array shapes.
1580 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1581 "sanity: byte[] and char[] bases agree");
1582 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1583 "sanity: byte[] and char[] scales agree");
1584
1585 Node* adr = array_element_address(value, index, T_CHAR);
1586 if (is_store) {
1587 (void) store_to_memory(control(), adr, ch, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
1588 false, false, true /* mismatched */);
1589 } else {
1590 ch = make_load(control(), adr, TypeInt::CHAR, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
1591 LoadNode::DependsOnlyOnTest, false, false, true /* mismatched */);
1592 set_result(ch);
1593 }
1594 return true;
1595 }
1596
1597 //--------------------------round_double_node--------------------------------
1598 // Round a double node if necessary.
1599 Node* LibraryCallKit::round_double_node(Node* n) {
1600 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1601 n = _gvn.transform(new RoundDoubleNode(0, n));
1602 return n;
1603 }
1604
1605 //------------------------------inline_math-----------------------------------
1606 // public static double Math.abs(double)
1607 // public static double Math.sqrt(double)
1608 // public static double Math.log(double)
1609 // public static double Math.log10(double)
1610 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
|