1 /*
2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
1670 if (n != NULL) { // Call, AddP, LoadP, StoreP
1671 build_connection_graph(n, igvn);
1672 if (ptn->node_type() != PointsToNode::UnknownType)
1673 cg_worklist.append(n->_idx); // Collect CG nodes
1674 if (!_processed.test(n->_idx))
1675 worklist.append(n->_idx); // Collect C/A/L/S nodes
1676 }
1677 }
1678
1679 // After IGVN user nodes may have smaller _idx than
1680 // their inputs so they will be processed first in
1681 // previous loop. Because of that not all Graph
1682 // edges will be created. Walk over interesting
1683 // nodes again until no new edges are created.
1684 //
1685 // Normally only 1-3 passes needed to build
1686 // Connection Graph depending on graph complexity.
1687 // Observed 8 passes in jvm2008 compiler.compiler.
1688 // Set limit to 20 to catch situation when something
1689 // did go wrong and recompile the method without EA.
1690
1691 #define CG_BUILD_ITER_LIMIT 20
1692
1693 uint length = worklist.length();
1694 int iterations = 0;
1695 while(_progress && (iterations++ < CG_BUILD_ITER_LIMIT)) {
1696 _progress = false;
1697 for( uint next = 0; next < length; ++next ) {
1698 int ni = worklist.at(next);
1699 PointsToNode* ptn = ptnode_adr(ni);
1700 Node* n = ptn->_node;
1701 assert(n != NULL, "should be known node");
1702 build_connection_graph(n, igvn);
1703 }
1704 }
1705 if (iterations >= CG_BUILD_ITER_LIMIT) {
1706 assert(iterations < CG_BUILD_ITER_LIMIT,
1707 err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
1708 nodes_size(), length));
1709 // Possible infinite build_connection_graph loop,
1710 // retry compilation without escape analysis.
1711 C->record_failure(C2Compiler::retry_no_escape_analysis());
1712 _collecting = false;
1713 return false;
1714 }
1715 #undef CG_BUILD_ITER_LIMIT
1716
1717 // 5. Propagate escaped states.
1718 worklist.clear();
1719
1720 // mark all nodes reachable from GlobalEscape nodes
1721 (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
1722
1723 // mark all nodes reachable from ArgEscape nodes
1724 bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
1725
1726 Arena* arena = Thread::current()->resource_area();
1727 VectorSet visited(arena);
1728
1729 // 6. Find fields initializing values for not escaped allocations
1730 uint alloc_length = alloc_worklist.length();
1731 for (uint next = 0; next < alloc_length; ++next) {
1732 Node* n = alloc_worklist.at(next);
1733 PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
1734 if (es == PointsToNode::NoEscape) {
1735 has_non_escaping_obj = true;
2275 assert(false, "should be done already");
2276 break;
2277 #endif
2278 case Op_CallLeafNoFP:
2279 is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
2280 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
2281 // fall through
2282 case Op_CallLeaf:
2283 {
2284 // Stub calls, objects do not escape but they are not scale replaceable.
2285 // Adjust escape state for outgoing arguments.
2286 const TypeTuple * d = call->tf()->domain();
2287 bool src_has_oops = false;
2288 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2289 const Type* at = d->field_at(i);
2290 Node *arg = call->in(i)->uncast();
2291 const Type *aat = phase->type(arg);
2292 PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
2293 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
2294 (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
2295
2296 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2297 aat->isa_ptr() != NULL, "expecting an Ptr");
2298 bool arg_has_oops = aat->isa_oopptr() &&
2299 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
2300 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
2301 if (i == TypeFunc::Parms) {
2302 src_has_oops = arg_has_oops;
2303 }
2304 //
2305 // src or dst could be j.l.Object when other is basic type array:
2306 //
2307 // arraycopy(char[],0,Object*,0,size);
2308 // arraycopy(Object*,0,char[],0,size);
2309 //
2310 // Don't add edges from dst's fields in such cases.
2311 //
2312 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2313 arg_has_oops && (i > TypeFunc::Parms);
2314 #ifdef ASSERT
2315 if (!(is_arraycopy ||
2316 call->as_CallLeaf()->_name != NULL &&
2317 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
2318 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
2319 ) {
2320 call->dump();
2321 assert(false, "EA: unexpected CallLeaf");
2322 }
2323 #endif
2324 // Always process arraycopy's destination object since
2325 // we need to add all possible edges to references in
2326 // source object.
2327 if (arg_esc >= PointsToNode::ArgEscape &&
2328 !arg_is_arraycopy_dest) {
2329 continue;
2330 }
2331 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
2332 Node* arg_base = arg;
2333 if (arg->is_AddP()) {
2334 //
2335 // The inline_native_clone() case when the arraycopy stub is called
2336 // after the allocation before Initialize and CheckCastPP nodes.
2337 // Or normal arraycopy for object arrays case.
2338 //
2339 // Set AddP's base (Allocate) as not scalar replaceable since
2340 // pointer to the base (with offset) is passed as argument.
2341 //
2342 arg_base = get_addp_base(arg);
2343 }
2344 VectorSet argset = *PointsTo(arg_base); // Clone set
2345 for( VectorSetI j(&argset); j.test(); ++j ) {
2346 uint pd = j.elem; // Destination object
2347 set_escape_state(pd, PointsToNode::ArgEscape);
2348
2349 if (arg_is_arraycopy_dest) {
2350 PointsToNode* ptd = ptnode_adr(pd);
2351 // Conservatively reference an unknown object since
2352 // not all source's fields/elements may be known.
2353 add_edge_from_fields(pd, _phantom_object, Type::OffsetBot);
2354
2355 Node *src = call->in(TypeFunc::Parms)->uncast();
2356 Node* src_base = src;
2357 if (src->is_AddP()) {
2358 src_base = get_addp_base(src);
2359 }
2360 // Create edges from destination's fields to
2361 // everything known source's fields could point to.
2362 for( VectorSetI s(PointsTo(src_base)); s.test(); ++s ) {
2363 uint ps = s.elem;
2364 bool has_bottom_offset = false;
2365 for (uint fd = 0; fd < ptd->edge_count(); fd++) {
2366 assert(ptd->edge_type(fd) == PointsToNode::FieldEdge, "expecting a field edge");
2367 int fdi = ptd->edge_target(fd);
2368 PointsToNode* pfd = ptnode_adr(fdi);
2369 int offset = pfd->offset();
2370 if (offset == Type::OffsetBot)
2371 has_bottom_offset = true;
2372 assert(offset != -1, "offset should be set");
2373 add_deferred_edge_to_fields(fdi, ps, offset);
2374 }
2375 // Destination object may not have access (no field edge)
2376 // to fields which are accessed in source object.
2377 // As result no edges will be created to those source's
2378 // fields and escape state of destination object will
2379 // not be propagated to those fields.
2380 //
2381 // Mark source object as global escape except in
2382 // the case with Type::OffsetBot field (which is
2383 // common case for array elements access) when
2384 // edges are created to all source's fields.
2385 if (!has_bottom_offset) {
2386 set_escape_state(ps, PointsToNode::GlobalEscape);
2387 }
2388 }
2389 }
2390 }
2391 }
2392 }
2393 break;
2394 }
2395
2396 case Op_CallStaticJava:
2397 // For a static call, we know exactly what method is being called.
2398 // Use bytecode estimator to record the call's escape affects
2399 {
2400 ciMethod *meth = call->as_CallJava()->method();
2401 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
2402 // fall-through if not a Java method or no analyzer information
2403 if (call_analyzer != NULL) {
2404 const TypeTuple * d = call->tf()->domain();
2405 bool copy_dependencies = false;
2406 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2407 const Type* at = d->field_at(i);
2408 int k = i - TypeFunc::Parms;
2409 Node *arg = call->in(i)->uncast();
2410
|
1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
1670 if (n != NULL) { // Call, AddP, LoadP, StoreP
1671 build_connection_graph(n, igvn);
1672 if (ptn->node_type() != PointsToNode::UnknownType)
1673 cg_worklist.append(n->_idx); // Collect CG nodes
1674 if (!_processed.test(n->_idx))
1675 worklist.append(n->_idx); // Collect C/A/L/S nodes
1676 }
1677 }
1678
1679 // After IGVN user nodes may have smaller _idx than
1680 // their inputs so they will be processed first in
1681 // previous loop. Because of that not all Graph
1682 // edges will be created. Walk over interesting
1683 // nodes again until no new edges are created.
1684 //
1685 // Normally only 1-3 passes needed to build
1686 // Connection Graph depending on graph complexity.
1687 // Observed 8 passes in jvm2008 compiler.compiler.
1688 // Set limit to 20 to catch situation when something
1689 // did go wrong and recompile the method without EA.
1690 // Also limit build time to 30 sec.
1691
1692 #define CG_BUILD_ITER_LIMIT 20
1693 #define CG_BUILD_TIME_LIMIT 30.0
1694
1695 uint length = worklist.length();
1696 int iterations = 0;
1697 elapsedTimer time;
1698 while(_progress &&
1699 (iterations++ < CG_BUILD_ITER_LIMIT) &&
1700 (time.seconds() < CG_BUILD_TIME_LIMIT)) {
1701 time.start();
1702 _progress = false;
1703 for( uint next = 0; next < length; ++next ) {
1704 int ni = worklist.at(next);
1705 PointsToNode* ptn = ptnode_adr(ni);
1706 Node* n = ptn->_node;
1707 assert(n != NULL, "should be known node");
1708 build_connection_graph(n, igvn);
1709 }
1710 time.stop();
1711 }
1712 if ((iterations >= CG_BUILD_ITER_LIMIT) ||
1713 (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
1714 assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1715 time.seconds(), iterations, nodes_size(), length));
1716 // Possible infinite build_connection_graph loop,
1717 // retry compilation without escape analysis.
1718 C->record_failure(C2Compiler::retry_no_escape_analysis());
1719 _collecting = false;
1720 return false;
1721 }
1722 #undef CG_BUILD_ITER_LIMIT
1723 #undef CG_BUILD_TIME_LIMIT
1724
1725 // 5. Propagate escaped states.
1726 worklist.clear();
1727
1728 // mark all nodes reachable from GlobalEscape nodes
1729 (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
1730
1731 // mark all nodes reachable from ArgEscape nodes
1732 bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
1733
1734 Arena* arena = Thread::current()->resource_area();
1735 VectorSet visited(arena);
1736
1737 // 6. Find fields initializing values for not escaped allocations
1738 uint alloc_length = alloc_worklist.length();
1739 for (uint next = 0; next < alloc_length; ++next) {
1740 Node* n = alloc_worklist.at(next);
1741 PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
1742 if (es == PointsToNode::NoEscape) {
1743 has_non_escaping_obj = true;
2283 assert(false, "should be done already");
2284 break;
2285 #endif
2286 case Op_CallLeafNoFP:
2287 is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
2288 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
2289 // fall through
2290 case Op_CallLeaf:
2291 {
2292 // Stub calls, objects do not escape but they are not scale replaceable.
2293 // Adjust escape state for outgoing arguments.
2294 const TypeTuple * d = call->tf()->domain();
2295 bool src_has_oops = false;
2296 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2297 const Type* at = d->field_at(i);
2298 Node *arg = call->in(i)->uncast();
2299 const Type *aat = phase->type(arg);
2300 PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
2301 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
2302 (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
2303 #ifdef ASSERT
2304 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2305 aat->isa_ptr() != NULL, "expecting an Ptr");
2306 if (!(is_arraycopy ||
2307 call->as_CallLeaf()->_name != NULL &&
2308 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
2309 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
2310 ) {
2311 call->dump();
2312 assert(false, "EA: unexpected CallLeaf");
2313 }
2314 #endif
2315 if (arg_esc < PointsToNode::ArgEscape) {
2316 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
2317 Node* arg_base = arg;
2318 if (arg->is_AddP()) {
2319 //
2320 // The inline_native_clone() case when the arraycopy stub is called
2321 // after the allocation before Initialize and CheckCastPP nodes.
2322 // Or normal arraycopy for object arrays case.
2323 //
2324 // Set AddP's base (Allocate) as not scalar replaceable since
2325 // pointer to the base (with offset) is passed as argument.
2326 //
2327 arg_base = get_addp_base(arg);
2328 set_escape_state(arg_base->_idx, PointsToNode::ArgEscape);
2329 }
2330 }
2331
2332 bool arg_has_oops = aat->isa_oopptr() &&
2333 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
2334 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
2335 if (i == TypeFunc::Parms) {
2336 src_has_oops = arg_has_oops;
2337 }
2338 //
2339 // src or dst could be j.l.Object when other is basic type array:
2340 //
2341 // arraycopy(char[],0,Object*,0,size);
2342 // arraycopy(Object*,0,char[],0,size);
2343 //
2344 // Do nothing special in such cases.
2345 //
2346 if (is_arraycopy && (i > TypeFunc::Parms) &&
2347 src_has_oops && arg_has_oops) {
2348 // Destination object's fields reference an unknown object.
2349 Node* arg_base = arg;
2350 if (arg->is_AddP()) {
2351 arg_base = get_addp_base(arg);
2352 }
2353 for (VectorSetI s(PointsTo(arg_base)); s.test(); ++s) {
2354 uint ps = s.elem;
2355 set_escape_state(ps, PointsToNode::ArgEscape);
2356 add_edge_from_fields(ps, _phantom_object, Type::OffsetBot);
2357 }
2358 // Conservatively all values in source object fields globally escape
2359 // since we don't know if values in destination object fields
2360 // escape (it could be traced but it is too expensive).
2361 Node* src = call->in(TypeFunc::Parms)->uncast();
2362 Node* src_base = src;
2363 if (src->is_AddP()) {
2364 src_base = get_addp_base(src);
2365 }
2366 for (VectorSetI s(PointsTo(src_base)); s.test(); ++s) {
2367 uint ps = s.elem;
2368 set_escape_state(ps, PointsToNode::ArgEscape);
2369 // Use OffsetTop to indicate fields global escape.
2370 add_edge_from_fields(ps, _phantom_object, Type::OffsetTop);
2371 }
2372 }
2373 }
2374 }
2375 break;
2376 }
2377
2378 case Op_CallStaticJava:
2379 // For a static call, we know exactly what method is being called.
2380 // Use bytecode estimator to record the call's escape affects
2381 {
2382 ciMethod *meth = call->as_CallJava()->method();
2383 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
2384 // fall-through if not a Java method or no analyzer information
2385 if (call_analyzer != NULL) {
2386 const TypeTuple * d = call->tf()->domain();
2387 bool copy_dependencies = false;
2388 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2389 const Type* at = d->field_at(i);
2390 int k = i - TypeFunc::Parms;
2391 Node *arg = call->in(i)->uncast();
2392
|