--- old/src/share/vm/opto/escape.cpp Tue Nov 17 16:51:05 2009 +++ new/src/share/vm/opto/escape.cpp Tue Nov 17 16:51:05 2009 @@ -779,6 +779,20 @@ } else { break; } + } else if (result->Opcode() == Op_ClearArray) { + // This node initializes new object storage to zero. + intptr_t offset; + AllocateNode* alloc = AllocateNode::Ideal_allocation(result->in(3), phase, offset); + // Can not bypass initialization of the instance + // we are looking for or when something is wrong. + if (alloc == NULL || alloc->_idx == (uint)tinst->instance_id()) + break; + // Otherwise skip it. + InitializeNode* init = alloc->initialization(); + if (init != NULL) + result = init->in(TypeFunc::Memory); + else + result = alloc->in(TypeFunc::Memory); } else if (result->Opcode() == Op_SCMemProj) { assert(result->in(0)->is_LoadStore(), "sanity"); const Type *at = phase->type(result->in(0)->in(MemNode::Address)); @@ -808,7 +822,6 @@ return result; } - // // Convert the types of unescaped object to instance types where possible, // propagate the new type information through the graph, and update memory @@ -900,7 +913,6 @@ // void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist) { GrowableArray memnode_worklist; - GrowableArray mergemem_worklist; GrowableArray orig_phis; PhaseGVN *igvn = _compile->initial_gvn(); uint new_index_start = (uint) _compile->num_alias_types(); @@ -1025,7 +1037,7 @@ alloc_worklist.append_if_missing(addp2); } alloc_worklist.append_if_missing(use); - } else if (use->is_Initialize()) { + } else if (use->is_MemBar()) { memnode_worklist.append_if_missing(use); } } @@ -1088,15 +1100,17 @@ } else { continue; } - // push users on appropriate worklist + // push allocation's users on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); if(use->is_Mem() && use->in(MemNode::Address) == n) { memnode_worklist.append_if_missing(use); - } else if (use->is_Initialize()) { + } else if (use->is_MemBar()) { memnode_worklist.append_if_missing(use); +#ifdef ASSERT } else if (use->is_MergeMem()) { - mergemem_worklist.append_if_missing(use); + assert(_mergemem_worklist.contains(use->as_MergeMem()), "missing MergeMem node in the worklist"); +#endif } else if (use->is_SafePoint() && tinst != NULL) { // Look for MergeMem nodes for calls which reference unique allocation // (through CheckCastPP nodes) even for debug info. @@ -1106,9 +1120,11 @@ m->in(0) != use && !m->in(0)->_idx != iid) { m = m->in(0)->in(TypeFunc::Memory); } +#ifdef ASSERT if (m->is_MergeMem()) { - mergemem_worklist.append_if_missing(m); + assert(_mergemem_worklist.contains(m->as_MergeMem()), "missing MergeMem node in the worklist"); } +#endif } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes Node* addp2 = find_second_addp(use, n); if (addp2 != NULL) { @@ -1138,13 +1154,12 @@ Node *n = memnode_worklist.pop(); if (visited.test_set(n->_idx)) continue; - if (n->is_Phi()) { - assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required"); + if (n->is_Phi() || n->Opcode() == Op_ClearArray) { // we don't need to do anything, but the users must be pushed if we haven't processed - // this Phi before - } else if (n->is_Initialize()) { + } else if (n->is_MemBar()) { // Initialize, MemBar nodes // we don't need to do anything, but the users of the memory projection must be pushed - n = n->as_Initialize()->proj_out(TypeFunc::Memory); + // especially MergeMem nodes to populate the instance memory slices + n = n->as_MemBar()->proj_out(TypeFunc::Memory); if (n == NULL) continue; } else { @@ -1182,31 +1197,41 @@ // push user on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); - if (use->is_Phi()) { + if (use->is_Phi() || use->Opcode() == Op_ClearArray) { memnode_worklist.append_if_missing(use); } else if(use->is_Mem() && use->in(MemNode::Memory) == n) { memnode_worklist.append_if_missing(use); - } else if (use->is_Initialize()) { + } else if (use->is_MemBar()) { memnode_worklist.append_if_missing(use); +#ifdef ASSERT } else if (use->is_MergeMem()) { - mergemem_worklist.append_if_missing(use); + assert(_mergemem_worklist.contains(use->as_MergeMem()), "missing MergeMem node in the worklist"); + } else { + uint op = use->Opcode(); + if (!(op = Op_StoreCM || op == Op_AryEq || op == Op_StrComp || + op == Op_StrEquals || op == Op_StrIndexOf)) { + n->dump(); + use->dump(); + assert(false, "EA missing memory path"); + } +#endif } } } // Phase 3: Process MergeMem nodes from mergemem_worklist. - // Walk each memory moving the first node encountered of each + // Walk each memory slice moving the first node encountered of each // instance type to the the input corresponding to its alias index. - while (mergemem_worklist.length() != 0) { - Node *n = mergemem_worklist.pop(); - assert(n->is_MergeMem(), "MergeMem node required."); - if (visited.test_set(n->_idx)) - continue; - MergeMemNode *nmm = n->as_MergeMem(); + uint length = _mergemem_worklist.length(); + for( uint next = 0; next < length; ++next ) { + MergeMemNode* nmm = _mergemem_worklist.at(next); + assert(!visited.test_set(nmm->_idx), "should not be visited before"); // Note: we don't want to use MergeMemStream here because we only want to - // scan inputs which exist at the start, not ones we add during processing. - uint nslices = nmm->req(); + // scan inputs which exist at the start, not ones we add during processing. + // Note 2: MergeMem may already contains instance memory slices added + // during find_inst_mem() call when memory nodes were processed above. igvn->hash_delete(nmm); + uint nslices = nmm->req(); for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { Node* mem = nmm->in(i); Node* cur = NULL; @@ -1260,41 +1285,6 @@ } igvn->hash_insert(nmm); record_for_optimizer(nmm); - - // Propagate new memory slices to following MergeMem nodes. - for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { - Node *use = n->fast_out(i); - if (use->is_Call()) { - CallNode* in = use->as_Call(); - if (in->proj_out(TypeFunc::Memory) != NULL) { - Node* m = in->proj_out(TypeFunc::Memory); - for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { - Node* mm = m->fast_out(j); - if (mm->is_MergeMem()) { - mergemem_worklist.append_if_missing(mm); - } - } - } - if (use->is_Allocate()) { - use = use->as_Allocate()->initialization(); - if (use == NULL) { - continue; - } - } - } - if (use->is_Initialize()) { - InitializeNode* in = use->as_Initialize(); - if (in->proj_out(TypeFunc::Memory) != NULL) { - Node* m = in->proj_out(TypeFunc::Memory); - for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { - Node* mm = m->fast_out(j); - if (mm->is_MergeMem()) { - mergemem_worklist.append_if_missing(mm); - } - } - } - } - } } // Phase 4: Update the inputs of non-instance memory Phis and @@ -1382,8 +1372,20 @@ ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) { has_allocations = true; } - if(n->is_AddP()) - cg_worklist.append(n->_idx); + if(n->is_AddP()) { + // Collect address nodes which directly reference an allocation. + // Use them during stage 3 below to build initial connection graph + // field edges. Other field edges could be added after StoreP/LoadP + // nodes are processed during stage 4 below. + Node* base = get_addp_base(n); + if(base->is_Proj() && base->in(0)->is_Allocate()) { + cg_worklist.append(n->_idx); + } + } else if (n->is_MergeMem()) { + // Collect all MergeMem nodes to add memory slices for + // scalar replaceable objects in split_unique_types(). + _mergemem_worklist.append(n->as_MergeMem()); + } for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* m = n->fast_out(i); // Get user worklist_init.push(m); @@ -1424,12 +1426,13 @@ } } - VectorSet ptset(Thread::current()->resource_area()); + Arena* arena = Thread::current()->resource_area(); + VectorSet ptset(arena); GrowableArray deferred_edges; - VectorSet visited(Thread::current()->resource_area()); + VectorSet visited(arena); - // 5. Remove deferred edges from the graph and collect - // information needed for type splitting. + // 5. Remove deferred edges from the graph and adjust + // escape state of nonescaping objects. cg_length = cg_worklist.length(); for( uint next = 0; next < cg_length; ++next ) { int ni = cg_worklist.at(next); @@ -1439,98 +1442,9 @@ remove_deferred(ni, &deferred_edges, &visited); Node *n = ptn->_node; if (n->is_AddP()) { - // Search for objects which are not scalar replaceable. - // Mark their escape state as ArgEscape to propagate the state - // to referenced objects. - // Note: currently there are no difference in compiler optimizations - // for ArgEscape objects and NoEscape objects which are not - // scalar replaceable. - - int offset = ptn->offset(); - Node *base = get_addp_base(n); - ptset.Clear(); - PointsTo(ptset, base, igvn); - int ptset_size = ptset.Size(); - - // Check if a field's initializing value is recorded and add - // a corresponding NULL field's value if it is not recorded. - // Connection Graph does not record a default initialization by NULL - // captured by Initialize node. - // - // Note: it will disable scalar replacement in some cases: - // - // Point p[] = new Point[1]; - // p[0] = new Point(); // Will be not scalar replaced - // - // but it will save us from incorrect optimizations in next cases: - // - // Point p[] = new Point[1]; - // if ( x ) p[0] = new Point(); // Will be not scalar replaced - // - // Without a control flow analysis we can't distinguish above cases. - // - if (offset != Type::OffsetBot && ptset_size == 1) { - uint elem = ptset.getelem(); // Allocation node's index - // It does not matter if it is not Allocation node since - // only non-escaping allocations are scalar replaced. - if (ptnode_adr(elem)->_node->is_Allocate() && - ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) { - AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate(); - InitializeNode* ini = alloc->initialization(); - Node* value = NULL; - if (ini != NULL) { - BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT; - Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn); - if (store != NULL && store->is_Store()) - value = store->in(MemNode::ValueIn); - } - if (value == NULL || value != ptnode_adr(value->_idx)->_node) { - // A field's initializing value was not recorded. Add NULL. - uint null_idx = UseCompressedOops ? _noop_null : _oop_null; - add_pointsto_edge(ni, null_idx); - } - } - } - - // An object is not scalar replaceable if the field which may point - // to it has unknown offset (unknown element of an array of objects). - // - if (offset == Type::OffsetBot) { - uint e_cnt = ptn->edge_count(); - for (uint ei = 0; ei < e_cnt; ei++) { - uint npi = ptn->edge_target(ei); - set_escape_state(npi, PointsToNode::ArgEscape); - ptnode_adr(npi)->_scalar_replaceable = false; - } - } - - // Currently an object is not scalar replaceable if a LoadStore node - // access its field since the field value is unknown after it. - // - bool has_LoadStore = false; - for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { - Node *use = n->fast_out(i); - if (use->is_LoadStore()) { - has_LoadStore = true; - break; - } - } - // An object is not scalar replaceable if the address points - // to unknown field (unknown element for arrays, offset is OffsetBot). - // - // Or the address may point to more then one object. This may produce - // the false positive result (set scalar_replaceable to false) - // since the flow-insensitive escape analysis can't separate - // the case when stores overwrite the field's value from the case - // when stores happened on different control branches. - // - if (ptset_size > 1 || ptset_size != 0 && - (has_LoadStore || offset == Type::OffsetBot)) { - for( VectorSetI j(&ptset); j.test(); ++j ) { - set_escape_state(j.elem, PointsToNode::ArgEscape); - ptnode_adr(j.elem)->_scalar_replaceable = false; - } - } + // Search for objects which are not scalar replaceable + // and adjust their escape state. + verify_escape_state(ni, ptset, igvn); } } } @@ -1647,6 +1561,150 @@ return has_non_escaping_obj; } +// Search for objects which are not scalar replaceable. +void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) { + PointsToNode* ptn = ptnode_adr(nidx); + Node* n = ptn->_node; + assert(n->is_AddP(), "Should be called for AddP nodes only"); + // Search for objects which are not scalar replaceable. + // Mark their escape state as ArgEscape to propagate the state + // to referenced objects. + // Note: currently there are no difference in compiler optimizations + // for ArgEscape objects and NoEscape objects which are not + // scalar replaceable. + + Compile* C = _compile; + + int offset = ptn->offset(); + Node* base = get_addp_base(n); + ptset.Clear(); + PointsTo(ptset, base, phase); + int ptset_size = ptset.Size(); + + // Check if a oop field's initializing value is recorded and add + // a corresponding NULL field's value if it is not recorded. + // Connection Graph does not record a default initialization by NULL + // captured by Initialize node. + // + // Note: it will disable scalar replacement in some cases: + // + // Point p[] = new Point[1]; + // p[0] = new Point(); // Will be not scalar replaced + // + // but it will save us from incorrect optimizations in next cases: + // + // Point p[] = new Point[1]; + // if ( x ) p[0] = new Point(); // Will be not scalar replaced + // + // Do a simple control flow analysis to distinguish above cases. + // + if (offset != Type::OffsetBot && ptset_size == 1) { + uint elem = ptset.getelem(); // Allocation node's index + // It does not matter if it is not Allocation node since + // only non-escaping allocations are scalar replaced. + if (ptnode_adr(elem)->_node->is_Allocate() && + ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) { + AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate(); + InitializeNode* ini = alloc->initialization(); + + // Check only oop fields. + const Type* adr_type = n->as_AddP()->bottom_type(); + BasicType basic_field_type = T_INT; + if (adr_type->isa_instptr()) { + ciField* field = C->alias_type(adr_type->isa_instptr())->field(); + if (field != NULL) { + basic_field_type = field->layout_type(); + } else { + // Ignore non field load (for example, klass load) + } + } else if (adr_type->isa_aryptr()) { + const Type* elemtype = adr_type->isa_aryptr()->elem(); + basic_field_type = elemtype->array_element_basic_type(); + } else { + // Raw pointers are used for initializing stores so skip it. + assert(adr_type->isa_rawptr() && base->is_Proj() && + (base->in(0) == alloc),"unexpected pointer type"); + } + if (basic_field_type == T_OBJECT || + basic_field_type == T_NARROWOOP || + basic_field_type == T_ARRAY) { + Node* value = NULL; + if (ini != NULL) { + BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT; + Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase); + if (store != NULL && store->is_Store()) { + value = store->in(MemNode::ValueIn); + } else if (ptn->edge_count() > 0) { // Are there oop stores? + // Check for a store which follows allocation without branches. + // For example, a volatile field store is not collected + // by Initialize node. TODO: it would be nice to use idom() here. + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + store = n->fast_out(i); + if (store->is_Store() && store->in(0) != NULL) { + Node* ctrl = store->in(0); + while(!(ctrl == ini || ctrl == alloc || ctrl == NULL || + ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() || + ctrl->is_IfTrue() || ctrl->is_IfFalse())) { + ctrl = ctrl->in(0); + } + if (ctrl == ini || ctrl == alloc) { + value = store->in(MemNode::ValueIn); + break; + } + } + } + } + } + if (value == NULL || value != ptnode_adr(value->_idx)->_node) { + // A field's initializing value was not recorded. Add NULL. + uint null_idx = UseCompressedOops ? _noop_null : _oop_null; + add_pointsto_edge(nidx, null_idx); + } + } + } + } + + // An object is not scalar replaceable if the field which may point + // to it has unknown offset (unknown element of an array of objects). + // + if (offset == Type::OffsetBot) { + uint e_cnt = ptn->edge_count(); + for (uint ei = 0; ei < e_cnt; ei++) { + uint npi = ptn->edge_target(ei); + set_escape_state(npi, PointsToNode::ArgEscape); + ptnode_adr(npi)->_scalar_replaceable = false; + } + } + + // Currently an object is not scalar replaceable if a LoadStore node + // access its field since the field value is unknown after it. + // + bool has_LoadStore = false; + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node *use = n->fast_out(i); + if (use->is_LoadStore()) { + has_LoadStore = true; + break; + } + } + // An object is not scalar replaceable if the address points + // to unknown field (unknown element for arrays, offset is OffsetBot). + // + // Or the address may point to more then one object. This may produce + // the false positive result (set scalar_replaceable to false) + // since the flow-insensitive escape analysis can't separate + // the case when stores overwrite the field's value from the case + // when stores happened on different control branches. + // + if (ptset_size > 1 || ptset_size != 0 && + (has_LoadStore || offset == Type::OffsetBot)) { + for( VectorSetI j(&ptset); j.test(); ++j ) { + set_escape_state(j.elem, PointsToNode::ArgEscape); + ptnode_adr(j.elem)->_scalar_replaceable = false; + } + } +} + void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) { switch (call->Opcode()) { @@ -1943,20 +2001,23 @@ record_for_optimizer(n); _processed.set(n->_idx); } else { - // Have to process call's arguments first. + // Don't mark as processed since call's arguments have to be processed. PointsToNode::NodeType nt = PointsToNode::UnknownType; + PointsToNode::EscapeState es = PointsToNode::UnknownEscape; // Check if a call returns an object. const TypeTuple *r = n->as_Call()->tf()->range(); - if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms && + if (r->cnt() > TypeFunc::Parms && + r->field_at(TypeFunc::Parms)->isa_ptr() && n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { - // Note: use isa_ptr() instead of isa_oopptr() here because - // the _multianewarray functions return a TypeRawPtr. - if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { - nt = PointsToNode::JavaObject; + nt = PointsToNode::JavaObject; + if (!n->is_CallStaticJava()) { + // Since the called mathod is statically unknown assume + // the worst case that the returned value globally escapes. + es = PointsToNode::GlobalEscape; } } - add_node(n, nt, PointsToNode::UnknownEscape, false); + add_node(n, nt, es, false); } return; } @@ -2089,18 +2150,27 @@ } case Op_Proj: { - // we are only interested in the result projection from a call + // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { - add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); - process_call_result(n->as_Proj(), phase); - if (!_processed.test(n->_idx)) { - // The call's result may need to be processed later if the call - // returns it's argument and the argument is not processed yet. - _delayed_worklist.push(n); + const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); + assert(r->cnt() > TypeFunc::Parms, "sanity"); + if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { + add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); + int ti = n->in(0)->_idx; + // The call may not be registered yet (since not all its inputs are registered) + // if this is the projection from backbranch edge of Phi. + if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) { + process_call_result(n->as_Proj(), phase); + } + if (!_processed.test(n->_idx)) { + // The call's result may need to be processed later if the call + // returns it's argument and the argument is not processed yet. + _delayed_worklist.push(n); + } + break; } - } else { - _processed.set(n->_idx); } + _processed.set(n->_idx); break; } case Op_Return: @@ -2161,6 +2231,15 @@ } break; } + case Op_AryEq: + case Op_StrComp: + case Op_StrEquals: + case Op_StrIndexOf: + { + // char[] arrays passed to string intrinsics are not scalar replaceable. + add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); + break; + } case Op_ThreadLocal: { add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); @@ -2175,6 +2254,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) { uint n_idx = n->_idx; + assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered"); // Don't set processed bit for AddP, LoadP, StoreP since // they may need more then one pass to process. @@ -2212,6 +2292,7 @@ case Op_DecodeN: { int ti = n->in(1)->_idx; + assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered"); if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { add_pointsto_edge(n_idx, ti); } else { @@ -2251,7 +2332,6 @@ #endif Node* adr = n->in(MemNode::Address)->uncast(); - const Type *adr_type = phase->type(adr); Node* adr_base; if (adr->is_AddP()) { adr_base = get_addp_base(adr); @@ -2303,13 +2383,19 @@ } case Op_Proj: { - // we are only interested in the result projection from a call + // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { - process_call_result(n->as_Proj(), phase); - assert(_processed.test(n_idx), "all call results should be processed"); - } else { - assert(false, "Op_Proj"); + assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType, + "all nodes should be registered"); + const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); + assert(r->cnt() > TypeFunc::Parms, "sanity"); + if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { + process_call_result(n->as_Proj(), phase); + assert(_processed.test(n_idx), "all call results should be processed"); + break; + } } + assert(false, "Op_Proj"); break; } case Op_Return: @@ -2321,6 +2407,7 @@ } #endif int ti = n->in(TypeFunc::Parms)->_idx; + assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered"); if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { add_pointsto_edge(n_idx, ti); } else { @@ -2355,6 +2442,30 @@ } break; } + case Op_AryEq: + case Op_StrComp: + case Op_StrEquals: + case Op_StrIndexOf: + { + // char[] arrays passed to string intrinsic do not escape but + // they are not scalar replaceable. Adjust escape state for them. + // Start from in(2) edge since in(1) is memory edge. + for (uint i = 2; i < n->req(); i++) { + Node* adr = n->in(i)->uncast(); + const Type *at = phase->type(adr); + if (!adr->is_top() && at->isa_ptr()) { + assert(at == Type::TOP || at == TypePtr::NULL_PTR || + at->isa_ptr() != NULL, "expecting an Ptr"); + if (adr->is_AddP()) { + adr = get_addp_base(adr); + } + // Mark as ArgEscape everything "adr" could point to. + set_escape_state(adr->_idx, PointsToNode::ArgEscape); + } + } + _processed.set(n_idx); + break; + } case Op_ThreadLocal: { assert(false, "Op_ThreadLocal"); @@ -2361,8 +2472,8 @@ break; } default: - ; - // nothing to do + // This method should be called only for EA specific nodes. + ShouldNotReachHere(); } }