--- old/make/linux/makefiles/gcc.make 2015-10-09 14:50:44.876113000 +0200 +++ new/make/linux/makefiles/gcc.make 2015-10-09 14:50:44.733092000 +0200 @@ -223,6 +223,8 @@ WARNING_FLAGS += -Wtype-limits # GCC < 4.8 don't accept this flag for C++. WARNING_FLAGS += -Wno-format-zero-length + # GCC 4.8 reports less false positives than the older compilers. + WARNING_FLAGS += -Wuninitialized endif endif --- old/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2015-10-09 14:50:45.782165000 +0200 +++ new/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2015-10-09 14:50:45.622151000 +0200 @@ -1667,8 +1667,8 @@ Register Rtmp1 = noreg; // check if it needs to be profiled - ciMethodData* md; - ciProfileData* data; + ciMethodData* md = NULL; + ciProfileData* data = NULL; if (op->should_profile()) { ciMethod* method = op->profiled_method(); @@ -1827,8 +1827,8 @@ CodeStub* stub = op->stub(); // check if it needs to be profiled - ciMethodData* md; - ciProfileData* data; + ciMethodData* md = NULL; + ciProfileData* data = NULL; if (op->should_profile()) { ciMethod* method = op->profiled_method(); @@ -3186,7 +3186,7 @@ int elem_size = type2aelembytes(basic_type); int shift_amount; - Address::ScaleFactor scale; + Address::ScaleFactor scale = Address::no_scale; switch (elem_size) { case 1 : --- old/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2015-10-09 14:50:46.757244000 +0200 +++ new/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2015-10-09 14:50:46.611252000 +0200 @@ -484,7 +484,7 @@ __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); - address entry; + address entry = NULL; switch (x->op()) { case Bytecodes::_lrem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); --- old/src/cpu/x86/vm/jniFastGetField_x86_32.cpp 2015-10-09 14:50:47.617288000 +0200 +++ new/src/cpu/x86/vm/jniFastGetField_x86_32.cpp 2015-10-09 14:50:47.481321000 +0200 @@ -48,7 +48,7 @@ // between loads, which is much more efficient than lfence. address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { - const char *name; + const char *name = NULL; switch (type) { case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break; @@ -122,7 +122,7 @@ slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); - address slow_case_addr; + address slow_case_addr = NULL; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; @@ -256,7 +256,7 @@ } address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { - const char *name; + const char *name = NULL; switch (type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; @@ -337,7 +337,7 @@ slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); - address slow_case_addr; + address slow_case_addr = NULL; switch (type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; --- old/src/cpu/x86/vm/jniFastGetField_x86_64.cpp 2015-10-09 14:50:48.440527000 +0200 +++ new/src/cpu/x86/vm/jniFastGetField_x86_64.cpp 2015-10-09 14:50:48.300335000 +0200 @@ -111,7 +111,7 @@ slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); - address slow_case_addr; + address slow_case_addr = NULL; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; @@ -206,7 +206,7 @@ slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); - address slow_case_addr; + address slow_case_addr = NULL; switch (type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); --- old/src/os/linux/vm/os_linux.cpp 2015-10-09 14:50:49.328414000 +0200 +++ new/src/os/linux/vm/os_linux.cpp 2015-10-09 14:50:49.160411000 +0200 @@ -2795,7 +2795,7 @@ int os::Linux::sched_getcpu_syscall(void) { - unsigned int cpu; + unsigned int cpu = 0; int retval = -1; #if defined(IA32) @@ -4188,8 +4188,8 @@ sigaddset(&(actp->sa_mask), sig); } - sa_handler_t hand; - sa_sigaction_t sa; + sa_handler_t hand = NULL; + sa_sigaction_t sa = NULL; bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; // retrieve the chained handler if (siginfo_flag_set) { @@ -4394,7 +4394,7 @@ static const char* get_signal_handler_name(address handler, char* buf, int buflen) { - int offset; + int offset = 0; bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); if (found) { // skip directory names --- old/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp 2015-10-09 14:50:50.365499000 +0200 +++ new/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp 2015-10-09 14:50:50.227496000 +0200 @@ -30,7 +30,7 @@ (void)memmove(to, from, count * HeapWordSize); #else // Includes a zero-count check. - intx temp; + intx temp = 0; __asm__ volatile(" testl %6,%6 ;" " jz 7f ;" " cmpl %4,%5 ;" @@ -88,7 +88,7 @@ } #else // Includes a zero-count check. - intx temp; + intx temp = 0; __asm__ volatile(" testl %6,%6 ;" " jz 3f ;" " cmpl $32,%6 ;" @@ -145,7 +145,7 @@ (void)memmove(to, from, count); #else // Includes a zero-count check. - intx temp; + intx temp = 0; __asm__ volatile(" testl %6,%6 ;" " jz 13f ;" " cmpl %4,%5 ;" --- old/src/share/vm/c1/c1_GraphBuilder.cpp 2015-10-09 14:50:51.287568000 +0200 +++ new/src/share/vm/c1/c1_GraphBuilder.cpp 2015-10-09 14:50:51.118548000 +0200 @@ -3823,8 +3823,8 @@ caller_state->truncate_stack(args_base); assert(callee_state->stack_size() == 0, "callee stack must be empty"); - Value lock; - BlockBegin* sync_handler; + Value lock = NULL; + BlockBegin* sync_handler = NULL; // Inline the locking of the receiver if the callee is synchronized if (callee->is_synchronized()) { --- old/src/share/vm/c1/c1_LIRGenerator.hpp 2015-10-09 14:50:52.259640000 +0200 +++ new/src/share/vm/c1/c1_LIRGenerator.hpp 2015-10-09 14:50:52.117646000 +0200 @@ -408,7 +408,7 @@ } static LIR_Condition lir_cond(If::Condition cond) { - LIR_Condition l; + LIR_Condition l = lir_cond_unknown; switch (cond) { case If::eql: l = lir_cond_equal; break; case If::neq: l = lir_cond_notEqual; break; --- old/src/share/vm/classfile/classFileParser.cpp 2015-10-09 14:50:53.150698000 +0200 +++ new/src/share/vm/classfile/classFileParser.cpp 2015-10-09 14:50:52.981705000 +0200 @@ -3201,19 +3201,19 @@ // Field size and offset computation int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size(); - int next_static_oop_offset; - int next_static_double_offset; - int next_static_word_offset; - int next_static_short_offset; - int next_static_byte_offset; - int next_nonstatic_oop_offset; - int next_nonstatic_double_offset; - int next_nonstatic_word_offset; - int next_nonstatic_short_offset; - int next_nonstatic_byte_offset; - int first_nonstatic_oop_offset; - int next_nonstatic_field_offset; - int next_nonstatic_padded_offset; + int next_static_oop_offset = 0; + int next_static_double_offset = 0; + int next_static_word_offset = 0; + int next_static_short_offset = 0; + int next_static_byte_offset = 0; + int next_nonstatic_oop_offset = 0; + int next_nonstatic_double_offset = 0; + int next_nonstatic_word_offset = 0; + int next_nonstatic_short_offset = 0; + int next_nonstatic_byte_offset = 0; + int first_nonstatic_oop_offset = 0; + int next_nonstatic_field_offset = 0; + int next_nonstatic_padded_offset = 0; // Count the contended fields by type. // @@ -3366,14 +3366,14 @@ ShouldNotReachHere(); } - int nonstatic_oop_space_count = 0; - int nonstatic_word_space_count = 0; - int nonstatic_short_space_count = 0; - int nonstatic_byte_space_count = 0; - int nonstatic_oop_space_offset; - int nonstatic_word_space_offset; - int nonstatic_short_space_offset; - int nonstatic_byte_space_offset; + int nonstatic_oop_space_count = 0; + int nonstatic_word_space_count = 0; + int nonstatic_short_space_count = 0; + int nonstatic_byte_space_count = 0; + int nonstatic_oop_space_offset = 0; + int nonstatic_word_space_offset = 0; + int nonstatic_short_space_offset = 0; + int nonstatic_byte_space_offset = 0; // Try to squeeze some of the fields into the gaps due to // long/double alignment. @@ -3445,7 +3445,7 @@ // contended instance fields are handled below if (fs.is_contended() && !fs.access_flags().is_static()) continue; - int real_offset; + int real_offset = 0; FieldAllocationType atype = (FieldAllocationType) fs.allocation_type(); // pack the rest of the fields --- old/src/share/vm/classfile/compactHashtable.cpp 2015-10-09 14:50:54.203802000 +0200 +++ new/src/share/vm/classfile/compactHashtable.cpp 2015-10-09 14:50:54.053784000 +0200 @@ -422,7 +422,7 @@ int HashtableTextDump::scan_string_prefix() { // Expect /[0-9]+: / - int utf8_length; + int utf8_length = 0; get_num(':', &utf8_length); if (*_p != ' ') { corrupted(_p, "Wrong prefix format for string"); @@ -433,7 +433,7 @@ int HashtableTextDump::scan_symbol_prefix() { // Expect /[0-9]+ (-|)[0-9]+: / - int utf8_length; + int utf8_length = 0; get_num(' ', &utf8_length); if (*_p == '-') { _p++; --- old/src/share/vm/classfile/placeholders.hpp 2015-10-09 14:50:55.169850000 +0200 +++ new/src/share/vm/classfile/placeholders.hpp 2015-10-09 14:50:55.001856000 +0200 @@ -220,7 +220,7 @@ } SeenThread* actionToQueue(PlaceholderTable::classloadAction action) { - SeenThread* queuehead; + SeenThread* queuehead = NULL; switch (action) { case PlaceholderTable::LOAD_INSTANCE: queuehead = _loadInstanceThreadQ; --- old/src/share/vm/compiler/oopMap.hpp 2015-10-09 14:50:56.159943000 +0200 +++ new/src/share/vm/compiler/oopMap.hpp 2015-10-09 14:50:56.009911000 +0200 @@ -73,8 +73,8 @@ // Constructors OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); } - OopMapValue (VMReg reg, oop_types t) { set_reg_type(reg,t); } - OopMapValue (VMReg reg, oop_types t, VMReg reg2) { set_reg_type(reg,t); set_content_reg(reg2); } + OopMapValue (VMReg reg, oop_types t) { set_reg_type(reg, t); set_content_reg(VMRegImpl::Bad()); } + OopMapValue (VMReg reg, oop_types t, VMReg reg2) { set_reg_type(reg, t); set_content_reg(reg2); } OopMapValue (CompressedReadStream* stream) { read_from(stream); } // Archiving @@ -87,7 +87,7 @@ void read_from(CompressedReadStream* stream) { set_value(stream->read_int()); - if(is_callee_saved() || is_derived_oop()) { + if (is_callee_saved() || is_derived_oop()) { set_content_reg(VMRegImpl::as_VMReg(stream->read_int(), true)); } } --- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2015-10-09 14:50:57.105024000 +0200 +++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2015-10-09 14:50:56.929005000 +0200 @@ -1795,7 +1795,7 @@ } // Used for PrintGC - size_t prev_used; + size_t prev_used = 0; if (PrintGC && Verbose) { prev_used = _cmsGen->used(); } --- old/src/share/vm/gc/g1/heapRegionRemSet.cpp 2015-10-09 14:50:58.291097000 +0200 +++ new/src/share/vm/gc/g1/heapRegionRemSet.cpp 2015-10-09 14:50:58.142099000 +0200 @@ -563,7 +563,7 @@ assert(_n_fine_entries == _max_fine_entries, "Precondition"); PerRegionTable* max = NULL; jint max_occ = 0; - PerRegionTable** max_prev; + PerRegionTable** max_prev = NULL; size_t max_ind; size_t i = _fine_eviction_start; @@ -1138,7 +1138,7 @@ void HeapRegionRemSet::print_recorded() { int cur_evnt = 0; - Event cur_evnt_kind; + Event cur_evnt_kind = Event_illegal; int cur_evnt_ind = 0; if (_n_recorded_events > 0) { cur_evnt_kind = _recorded_events[cur_evnt]; --- old/src/share/vm/gc/g1/heapRegionRemSet.hpp 2015-10-09 14:50:59.162164000 +0200 +++ new/src/share/vm/gc/g1/heapRegionRemSet.hpp 2015-10-09 14:50:59.021139000 +0200 @@ -222,7 +222,7 @@ public: enum Event { - Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd + Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd, Event_illegal }; private: --- old/src/share/vm/interpreter/templateInterpreter.cpp 2015-10-09 14:50:59.991260000 +0200 +++ new/src/share/vm/interpreter/templateInterpreter.cpp 2015-10-09 14:50:59.849298000 +0200 @@ -562,7 +562,7 @@ if (StopInterpreterAt > 0) stop_interpreter_at(); __ verify_FPU(1, t->tos_in()); #endif // !PRODUCT - int step; + int step = 0; if (!t->does_dispatch()) { step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode()); if (tos_out == ilgl) tos_out = t->tos_out(); --- old/src/share/vm/opto/callGenerator.cpp 2015-10-09 14:51:00.866297000 +0200 +++ new/src/share/vm/opto/callGenerator.cpp 2015-10-09 14:51:00.715291000 +0200 @@ -671,7 +671,7 @@ &exact_receiver); SafePointNode* slow_map = NULL; - JVMState* slow_jvms; + JVMState* slow_jvms = NULL; { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { --- old/src/share/vm/opto/compile.hpp 2015-10-09 14:51:01.837389000 +0200 +++ new/src/share/vm/opto/compile.hpp 2015-10-09 14:51:01.684363000 +0200 @@ -89,7 +89,7 @@ typedef unsigned int node_idx_t; class NodeCloneInfo { private: - uint64_t _idx_clone_orig; + uint64_t _idx_clone_orig; public: void set_idx(node_idx_t idx) { @@ -98,17 +98,17 @@ node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); } void set_gen(int generation) { - uint64_t g = (uint64_t)generation << 32; + uint64_t g = (uint64_t)generation << 32; _idx_clone_orig = _idx_clone_orig & 0xFFFFFFFF | g; } int gen() const { return (int)(_idx_clone_orig >> 32); } - void set(uint64_t x) { _idx_clone_orig = x; } - void set(node_idx_t x, int g) { set_idx(x); set_gen(g); } + void set(uint64_t x) { _idx_clone_orig = x; } + void set(node_idx_t x, int g) { set_idx(x); set_gen(g); } uint64_t get() const { return _idx_clone_orig; } NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {} - NodeCloneInfo(node_idx_t x, int g) {set(x, g);} + NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); } void dump() const; }; --- old/src/share/vm/opto/generateOptoStub.cpp 2015-10-09 14:51:02.774462000 +0200 +++ new/src/share/vm/opto/generateOptoStub.cpp 2015-10-09 14:51:02.630463000 +0200 @@ -261,7 +261,7 @@ //----------------------------- // If this is a normal subroutine return, issue the return and be done. - Node *ret; + Node *ret = NULL; switch( is_fancy_jump ) { case 0: // Make a return instruction // Return to caller, free any space for return address --- old/src/share/vm/opto/lcm.cpp 2015-10-09 14:51:03.685510000 +0200 +++ new/src/share/vm/opto/lcm.cpp 2015-10-09 14:51:03.527483000 +0200 @@ -806,7 +806,7 @@ block->insert_node(proj, node_cnt++); // Select the right register save policy. - const char * save_policy; + const char *save_policy = NULL; switch (op) { case Op_CallRuntime: case Op_CallLeaf: --- old/src/share/vm/opto/library_call.cpp 2015-10-09 14:51:04.811584000 +0200 +++ new/src/share/vm/opto/library_call.cpp 2015-10-09 14:51:04.638577000 +0200 @@ -2661,7 +2661,7 @@ // For now, we handle only those cases that actually exist: ints, // longs, and Object. Adding others should be straightforward. - Node* load_store; + Node* load_store = NULL; switch(type) { case T_INT: if (kind == LS_xadd) { @@ -3667,7 +3667,7 @@ Node* end = is_copyOfRange? argument(2): argument(1); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); - Node* newcopy; + Node* newcopy = NULL; // Set the original stack and the reexecute bit for the interpreter to reexecute // the bytecode that invokes Arrays.copyOf if deoptimization happens. --- old/src/share/vm/opto/macro.cpp 2015-10-09 14:51:05.941656000 +0200 +++ new/src/share/vm/opto/macro.cpp 2015-10-09 14:51:05.783678000 +0200 @@ -779,10 +779,10 @@ ciKlass* klass = NULL; ciInstanceKlass* iklass = NULL; int nfields = 0; - int array_base; - int element_size; - BasicType basic_elem_type; - ciType* elem_type; + int array_base = 0; + int element_size = 0; + BasicType basic_elem_type = T_ILLEGAL; + ciType* elem_type = NULL; Node* res = alloc->result_cast(); assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result"); @@ -1305,10 +1305,10 @@ // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. // they will not be used if "always_slow" is set enum { slow_result_path = 1, fast_result_path = 2 }; - Node *result_region; - Node *result_phi_rawmem; - Node *result_phi_rawoop; - Node *result_phi_i_o; + Node *result_region = NULL; + Node *result_phi_rawmem = NULL; + Node *result_phi_rawoop = NULL; + Node *result_phi_i_o = NULL; // The initial slow comparison is a size check, the comparison // we want to do is a BoolTest::gt --- old/src/share/vm/opto/split_if.cpp 2015-10-09 14:51:06.912732000 +0200 +++ new/src/share/vm/opto/split_if.cpp 2015-10-09 14:51:06.765767000 +0200 @@ -451,8 +451,8 @@ // Replace both uses of 'new_iff' with Regions merging True/False // paths. This makes 'new_iff' go dead. - Node *old_false, *old_true; - Node *new_false, *new_true; + Node *old_false = NULL, *old_true = NULL; + Node *new_false = NULL, *new_true = NULL; for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) { Node *ifp = iff->last_out(j2); assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" ); --- old/src/share/vm/prims/jvmtiEnter.xsl 2015-10-09 14:51:07.851827000 +0200 +++ new/src/share/vm/prims/jvmtiEnter.xsl 2015-10-09 14:51:07.698803000 +0200 @@ -632,8 +632,8 @@ jint trace_flags = JvmtiTrace::trace_flags( ); - const char *func_name; - const char *curr_thread_name; + const char *func_name = NULL; + const char *curr_thread_name = NULL; if (trace_flags) { func_name = JvmtiTrace::function_name( --- old/src/share/vm/prims/jvmtiEnvBase.cpp 2015-10-09 14:51:08.823890000 +0200 +++ new/src/share/vm/prims/jvmtiEnvBase.cpp 2015-10-09 14:51:08.653879000 +0200 @@ -512,7 +512,7 @@ // mean much better out of memory handling unsigned char * JvmtiEnvBase::jvmtiMalloc(jlong size) { - unsigned char* mem; + unsigned char* mem = NULL; jvmtiError result = allocate(size, &mem); assert(result == JVMTI_ERROR_NONE, "Allocate failed"); return mem; @@ -1032,7 +1032,7 @@ // implied else: entry_count == 0 } - jint nWant, nWait; + jint nWant = 0, nWait = 0; if (mon != NULL) { // this object has a heavyweight monitor nWant = mon->contentions(); // # of threads contending for monitor --- old/src/share/vm/prims/unsafe.cpp 2015-10-09 14:51:09.770988000 +0200 +++ new/src/share/vm/prims/unsafe.cpp 2015-10-09 14:51:09.615966000 +0200 @@ -785,7 +785,7 @@ UNSAFE_ENTRY(jint, Unsafe_ArrayBaseOffset(JNIEnv *env, jobject unsafe, jclass acls)) UnsafeWrapper("Unsafe_ArrayBaseOffset"); - int base, scale; + int base = 0, scale = 0; getBaseAndScale(base, scale, acls, CHECK_0); return field_offset_from_byte_offset(base); UNSAFE_END @@ -793,7 +793,7 @@ UNSAFE_ENTRY(jint, Unsafe_ArrayIndexScale(JNIEnv *env, jobject unsafe, jclass acls)) UnsafeWrapper("Unsafe_ArrayIndexScale"); - int base, scale; + int base = 0, scale = 0; getBaseAndScale(base, scale, acls, CHECK_0); // This VM packs both fields and array elements down to the byte. // But watch out: If this changes, so that array references for --- old/src/share/vm/runtime/safepoint.cpp 2015-10-09 14:51:10.724020000 +0200 +++ new/src/share/vm/runtime/safepoint.cpp 2015-10-09 14:51:10.558011000 +0200 @@ -124,7 +124,7 @@ // Save the starting time, so that it can be compared to see if this has taken // too long to complete. - jlong safepoint_limit_time; + jlong safepoint_limit_time = 0; timeout_error_printed = false; // PrintSafepointStatisticsTimeout can be specified separately. When @@ -903,7 +903,7 @@ void ThreadSafepointState::print_on(outputStream *st) const { - const char *s; + const char *s = NULL; switch(_type) { case _running : s = "_running"; break; --- old/src/share/vm/services/threadService.hpp 2015-10-09 14:51:11.701115000 +0200 +++ new/src/share/vm/services/threadService.hpp 2015-10-09 14:51:11.537075000 +0200 @@ -430,7 +430,7 @@ set_thread_status(state); } - JavaThreadStatusChanger(JavaThread* java_thread) { + JavaThreadStatusChanger(JavaThread* java_thread) : _old_state(java_lang_Thread::NEW) { save_old_state(java_thread); } @@ -527,7 +527,7 @@ // Current thread is the notifying thread which holds the monitor. static bool wait_reenter_begin(JavaThread *java_thread, ObjectMonitor *obj_m) { assert((java_thread != NULL), "Java thread should not be null here"); - bool active = false; + bool active = false; if (is_alive(java_thread) && ServiceUtil::visible_oop((oop)obj_m->object())) { active = contended_enter_begin(java_thread); } @@ -542,7 +542,7 @@ } JavaThreadBlockedOnMonitorEnterState(JavaThread *java_thread, ObjectMonitor *obj_m) : - JavaThreadStatusChanger(java_thread) { + _stat(NULL), _active(false), JavaThreadStatusChanger(java_thread) { assert((java_thread != NULL), "Java thread should not be null here"); // Change thread status and collect contended enter stats for monitor contended // enter done for external java world objects and it is contended. All other cases